Merge pull request #189 from nalind/storage

Storage in Image Management
This commit is contained in:
Antonio Murdaca 2017-01-18 23:02:08 +01:00 committed by GitHub
commit b9dc097c40
136 changed files with 3897 additions and 792 deletions

3
.gitignore vendored
View file

@ -7,3 +7,6 @@ pause/pause.o
ocid.conf ocid.conf
*.orig *.orig
*.rej *.rej
test/bin2img/bin2img
test/copyimg/copyimg
test/testdata/redis-image

View file

@ -9,6 +9,10 @@ sudo: required
services: services:
- docker - docker
before_install:
- sudo apt-get -qq update
- sudo apt-get -qq install btrfs-tools libdevmapper-dev libgpgme11-dev
install: install:
- make install.tools - make install.tools

View file

@ -23,6 +23,7 @@ RUN apt-get update && apt-get install -y \
btrfs-tools \ btrfs-tools \
libdevmapper1.02.1 \ libdevmapper1.02.1 \
libdevmapper-dev \ libdevmapper-dev \
libgpgme11-dev \
--no-install-recommends \ --no-install-recommends \
&& apt-get clean && apt-get clean
@ -52,6 +53,10 @@ RUN set -x \
&& cp runc /usr/local/bin/runc \ && cp runc /usr/local/bin/runc \
&& rm -rf "$GOPATH" && rm -rf "$GOPATH"
# Make sure we have some policy for pulling images
RUN mkdir -p /etc/containers
COPY test/policy.json /etc/containers/policy.json
WORKDIR /go/src/github.com/kubernetes-incubator/cri-o WORKDIR /go/src/github.com/kubernetes-incubator/cri-o
ADD . /go/src/github.com/kubernetes-incubator/cri-o ADD . /go/src/github.com/kubernetes-incubator/cri-o

View file

@ -44,6 +44,12 @@ conmon:
pause: pause:
make -C $@ make -C $@
bin2img:
make -C test/$@
copyimg:
make -C test/$@
ocid: ocid:
ifndef GOPATH ifndef GOPATH
$(error GOPATH is not set) $(error GOPATH is not set)
@ -69,10 +75,13 @@ ocid.conf: ocid
clean: clean:
rm -f docs/*.1 docs/*.5 docs/*.8 rm -f docs/*.1 docs/*.5 docs/*.8
rm -fr test/testdata/redis-image
find . -name \*~ -delete find . -name \*~ -delete
find . -name \#\* -delete find . -name \#\* -delete
make -C conmon clean make -C conmon clean
make -C pause clean make -C pause clean
make -C test/bin2img clean
make -C test/copyimg clean
ocidimage: ocidimage:
docker build -t ${OCID_IMAGE} . docker build -t ${OCID_IMAGE} .
@ -86,7 +95,7 @@ integration: ocidimage
localintegration: binaries localintegration: binaries
./test/test_runner.sh ${TESTFLAGS} ./test/test_runner.sh ${TESTFLAGS}
binaries: ocid ocic kpod conmon pause binaries: ocid ocic kpod conmon pause bin2img copyimg
MANPAGES_MD := $(wildcard docs/*.md) MANPAGES_MD := $(wildcard docs/*.md)
MANPAGES := $(MANPAGES_MD:%.md=%) MANPAGES := $(MANPAGES_MD:%.md=%)
@ -180,9 +189,11 @@ install.tools: .install.gitvalidation .install.gometalinter .install.md2man
go get -u github.com/cpuguy83/go-md2man go get -u github.com/cpuguy83/go-md2man
.PHONY: \ .PHONY: \
bin2img \
binaries \ binaries \
clean \ clean \
conmon \ conmon \
copyimg \
default \ default \
docs \ docs \
gofmt \ gofmt \

View file

@ -42,9 +42,11 @@ It is currently in active development in the Kubernetes community through the [d
### Build ### Build
`glib2-devel` and `glibc-static` packages on Fedora or ` libglib2.0-dev` on Ubuntu or equivalent is required. `btrfs-progs-devel`, `device-mapper-devel`, `glib2-devel`, `glibc-devel`, `gpgme-devel`, `libassuan-devel`, `libgpg-error-devel`, and `pkg-config` packages on CentOS/Fedora or `btrfs-tools`, `libassuan-dev`, `libc6-dev`, `libdevmapper-dev`, `libglib2.0-dev`, `libgpg-error-dev`, `libgpgme11-dev`, and `pkg-config` on Ubuntu or equivalent is required.
In order to enable seccomp support you will need to install `libseccomp` on your platform. In order to enable seccomp support you will need to install development files for `libseccomp` on your platform.
> e.g. `libseccomp-devel` for CentOS/Fedora, or `libseccomp-dev` for Ubuntu > e.g. `libseccomp-devel` for CentOS/Fedora, or `libseccomp-dev` for Ubuntu
In order to enable apparmor support you will need to install development files for `libapparmor` on your platform.
> e.g. `libapparmor-dev` for Ubuntu
```bash ```bash
$ GOPATH=/path/to/gopath $ GOPATH=/path/to/gopath

View file

@ -12,17 +12,21 @@ var commentedConfigTemplate = template.Must(template.New("config").Parse(`
# The "ocid" table contains all of the server options. # The "ocid" table contains all of the server options.
[ocid] [ocid]
# root is a path to the "root directory". OCID stores all of its state # root is a path to the "root directory". OCID stores all of its data,
# data, including container images, in this directory. # including container images, in this directory.
root = "{{ .Root }}" root = "{{ .Root }}"
# sandbox_dir is the directory where ocid will store all of its sandbox # run is a path to the "run directory". OCID stores all of its state
# state and other information. # in this directory.
sandbox_dir = "{{ .SandboxDir }}" runroot = "{{ .RunRoot }}"
# container_dir is the directory where ocid will store all of its # storage_driver select which storage driver is used to manage storage
# container state and other information. # of images and containers.
container_dir = "{{ .ContainerDir }}" storage_driver = "{{ .Storage }}"
# storage_option is used to pass an option to the storage driver.
storage_option = [
{{ range $opt := .StorageOptions }}{{ printf "\t%q,\n" $opt }}{{ end }}]
# The "ocid.api" table contains settings for the kubelet/gRPC # The "ocid.api" table contains settings for the kubelet/gRPC
# interface (which is also used by ocic). # interface (which is also used by ocic).
@ -67,9 +71,23 @@ cgroup_manager = "{{ .CgroupManager }}"
# management of OCI images. # management of OCI images.
[ocid.image] [ocid.image]
# pause is the path to the statically linked pause container binary, used # default_transport is the prefix we try prepending to an image name if the
# as the entrypoint for infra containers. # image name as we receive it can't be parsed as a valid source reference
pause = "{{ .Pause }}" default_transport = "{{ .DefaultTransport }}"
# pause_image is the image which we use to instantiate infra containers.
pause_image = "{{ .PauseImage }}"
# pause_command is the command to run in a pause_image to have a container just
# sit there. If the image contains the necessary information, this value need
# not be specified.
pause_command = "{{ .PauseCommand }}"
# signature_policy is the name of the file which decides what sort of policy we
# use when deciding whether or not to trust an image that we've pulled.
# Outside of testing situations, it is strongly advised that this be left
# unspecified so that the default system-wide policy will be used.
signature_policy = "{{ .SignaturePolicyPath }}"
# The "ocid.network" table contains settings pertaining to the # The "ocid.network" table contains settings pertaining to the
# management of CNI plugins. # management of CNI plugins.

View file

@ -8,7 +8,7 @@ import (
) )
func sdNotify() { func sdNotify() {
if _, err := systemdDaemon.SdNotify("READY=1"); err != nil { if _, err := systemdDaemon.SdNotify(true, "READY=1"); err != nil {
logrus.Warnf("Failed to sd_notify systemd: %v", err) logrus.Warnf("Failed to sd_notify systemd: %v", err)
} }
} }

View file

@ -4,7 +4,10 @@ import (
"fmt" "fmt"
"net" "net"
"os" "os"
"os/signal"
"sort" "sort"
"strings"
"syscall"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/reexec"
@ -36,17 +39,29 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error {
if ctx.GlobalIsSet("conmon") { if ctx.GlobalIsSet("conmon") {
config.Conmon = ctx.GlobalString("conmon") config.Conmon = ctx.GlobalString("conmon")
} }
if ctx.GlobalIsSet("containerdir") { if ctx.GlobalIsSet("pause-command") {
config.ContainerDir = ctx.GlobalString("containerdir") config.PauseCommand = ctx.GlobalString("pause-command")
} }
if ctx.GlobalIsSet("pause") { if ctx.GlobalIsSet("pause-image") {
config.Pause = ctx.GlobalString("pause") config.PauseImage = ctx.GlobalString("pause-image")
}
if ctx.GlobalIsSet("signature-policy") {
config.SignaturePolicyPath = ctx.GlobalString("signature-policy")
} }
if ctx.GlobalIsSet("root") { if ctx.GlobalIsSet("root") {
config.Root = ctx.GlobalString("root") config.Root = ctx.GlobalString("root")
} }
if ctx.GlobalIsSet("sandboxdir") { if ctx.GlobalIsSet("runroot") {
config.SandboxDir = ctx.GlobalString("sandboxdir") config.RunRoot = ctx.GlobalString("runroot")
}
if ctx.GlobalIsSet("storage-driver") {
config.Storage = ctx.GlobalString("storage-driver")
}
if ctx.GlobalIsSet("storage-option") {
config.StorageOptions = ctx.GlobalStringSlice("storage-option")
}
if ctx.GlobalIsSet("default-transport") {
config.DefaultTransport = ctx.GlobalString("default-transport")
} }
if ctx.GlobalIsSet("listen") { if ctx.GlobalIsSet("listen") {
config.Listen = ctx.GlobalString("listen") config.Listen = ctx.GlobalString("listen")
@ -75,6 +90,26 @@ func mergeConfig(config *server.Config, ctx *cli.Context) error {
return nil return nil
} }
func catchShutdown(gserver *grpc.Server, sserver *server.Server, signalled *bool) {
sig := make(chan os.Signal, 10)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
go func() {
for s := range sig {
switch s {
case syscall.SIGINT:
logrus.Debugf("Caught SIGINT")
case syscall.SIGTERM:
logrus.Debugf("Caught SIGTERM")
default:
continue
}
*signalled = true
gserver.GracefulStop()
return
}
}()
}
func main() { func main() {
if reexec.Init() { if reexec.Init() {
return return
@ -97,10 +132,6 @@ func main() {
Name: "conmon", Name: "conmon",
Usage: "path to the conmon executable", Usage: "path to the conmon executable",
}, },
cli.StringFlag{
Name: "containerdir",
Usage: "ocid container dir",
},
cli.BoolFlag{ cli.BoolFlag{
Name: "debug", Name: "debug",
Usage: "enable debug output for logging", Usage: "enable debug output for logging",
@ -120,20 +151,40 @@ func main() {
Usage: "set the format used by logs ('text' (default), or 'json')", Usage: "set the format used by logs ('text' (default), or 'json')",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "pause", Name: "pause-command",
Usage: "path to the pause executable", Usage: "name of the pause command in the pause image",
},
cli.StringFlag{
Name: "pause-image",
Usage: "name of the pause image",
},
cli.StringFlag{
Name: "signature-policy",
Usage: "path to signature policy file",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "root", Name: "root",
Usage: "ocid root dir", Usage: "ocid root dir",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "runtime", Name: "runroot",
Usage: "OCI runtime path", Usage: "ocid state dir",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "sandboxdir", Name: "storage-driver",
Usage: "ocid pod sandbox dir", Usage: "storage driver",
},
cli.StringSliceFlag{
Name: "storage-option",
Usage: "storage driver option",
},
cli.StringFlag{
Name: "default-transport",
Usage: "default transport",
},
cli.StringFlag{
Name: "runtime",
Usage: "OCI runtime path",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "seccomp-profile", Name: "seccomp-profile",
@ -236,13 +287,24 @@ func main() {
logrus.Fatal(err) logrus.Fatal(err)
} }
graceful := false
catchShutdown(s, service, &graceful)
runtime.RegisterRuntimeServiceServer(s, service) runtime.RegisterRuntimeServiceServer(s, service)
runtime.RegisterImageServiceServer(s, service) runtime.RegisterImageServiceServer(s, service)
// after the daemon is done setting up we can notify systemd api // after the daemon is done setting up we can notify systemd api
notifySystem() notifySystem()
if err := s.Serve(lis); err != nil { err = s.Serve(lis)
if graceful && strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
err = nil
}
if err2 := service.Shutdown(); err2 != nil {
logrus.Infof("error shutting down layer storage: %v", err2)
}
if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
return nil return nil

View file

@ -8,16 +8,20 @@ ocid - Enable OCI Kubernetes Container Runtime daemon
**ocid** **ocid**
[**--config**=[*value*]] [**--config**=[*value*]]
[**--conmon**=[*value*]] [**--conmon**=[*value*]]
[**--containerdir**=[*value*]]
[**--debug**] [**--debug**]
[**--default-transport**=[*value*]]
[**--help**|**-h**] [**--help**|**-h**]
[**--listen**=[*value*]] [**--listen**=[*value*]]
[**--log**=[*value*]] [**--log**=[*value*]]
[**--log-format value**] [**--log-format value**]
[**--pause**=[*value*]] [**--pause-command**=[*value*]]
[**--pause-image**=[*value*]]
[**--root**=[*value*]] [**--root**=[*value*]]
[**--runroot**=[*value*]]
[**--runtime**=[*value*]] [**--runtime**=[*value*]]
[**--sandboxdir**=[*value*]] [**--signature-policy**=[*value*]]
[**--storage-driver**=[*value*]]
[**--storage-option**=[*value*]]
[**--selinux**] [**--selinux**]
[**--seccomp-profile**=[*value*]] [**--seccomp-profile**=[*value*]]
[**--apparmor-profile**=[*value*]] [**--apparmor-profile**=[*value*]]
@ -43,18 +47,21 @@ ocid is meant to provide an integration path between OCI conformant runtimes and
# GLOBAL OPTIONS # GLOBAL OPTIONS
**--apparmor_profile**=""
Name of the apparmor profile to be used as the runtime's default (default: "ocid-default")
**--config**="" **--config**=""
path to configuration file path to configuration file
**--conmon**="" **--conmon**=""
path to the conmon executable (default: "/usr/libexec/ocid/conmon") path to the conmon executable (default: "/usr/libexec/ocid/conmon")
**--containerdir**=""
OCID container dir (default: "/var/lib/ocid/containers")
**--debug** **--debug**
Enable debug output for logging Enable debug output for logging
**--default-transport**
A prefix to prepend to image names that can't be pulled as-is.
**--help, -h** **--help, -h**
Print usage statement Print usage statement
@ -67,32 +74,41 @@ ocid is meant to provide an integration path between OCI conformant runtimes and
**--log-format**="" **--log-format**=""
Set the format used by logs ('text' (default), or 'json') (default: "text") Set the format used by logs ('text' (default), or 'json') (default: "text")
**--pause**="" **--pause-command**=""
Path to the pause executable (default: "/usr/libexec/ocid/pause") Path to the pause executable in the pause image (default: "/pause")
**--pause-image**=""
Image which contains the pause executable (default: "kubernetes/pause")
**--root**="" **--root**=""
OCID root dir (default: "/var/lib/ocid") OCID root dir (default: "/var/lib/containers")
**--runroot**=""
OCID state dir (default: "/var/run/containers")
**--runtime**="" **--runtime**=""
OCI runtime path (default: "/usr/bin/runc") OCI runtime path (default: "/usr/bin/runc")
**--sandboxdir**=""
OCID pod sandbox dir (default: "/var/lib/ocid/sandboxes")
**--selinux**=*true*|*false* **--selinux**=*true*|*false*
Enable selinux support (default: false) Enable selinux support (default: false)
**--seccomp_profile**="" **--seccomp-profile**=""
Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json") Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json")
**--apparmor_profile**="" **--signature-policy**=""
Name of the apparmor profile to be used as the runtime's default (default: "ocid-default") Path to the signature policy json file (default: "", to use the system-wide default)
**--storage-driver**
OCI storage driver (default: "devicemapper")
**--storage-option**
OCI storage driver option (no default)
**--cni-config-dir**="" **--cni-config-dir**=""
CNI configuration files directory (defautl: "/etc/cni/net.d/") CNI configuration files directory (default: "/etc/cni/net.d/")
**--cni-plugin-dir**="" **--cni-plugin-dir**=""
CNI plugin binaries directory (defautl: "/opt/cni/bin/") CNI plugin binaries directory (default: "/opt/cni/bin/")
**--version, -v** **--version, -v**
Print the version Print the version

View file

@ -29,15 +29,17 @@ No bare options are used. The format of TOML can be simplified to:
The `ocid` table supports the following options: The `ocid` table supports the following options:
**container_dir**=""
OCID container dir (default: "/var/lib/ocid/containers")
**root**="" **root**=""
OCID root dir (default: "/var/lib/ocid") OCID root dir (default: "/var/lib/containers")
**sandbox_dir**="" **runroot**=""
OCID pod sandbox dir (default: "/var/lib/ocid/sandboxes") OCID state dir (default: "/var/run/containers")
**storage_driver**=""
OCID storage driver (default is "devicemapper")
**storage_option**=[]
OCID storage driver option list (no default)
## OCID.API TABLE ## OCID.API TABLE
@ -58,6 +60,9 @@ The `ocid` table supports the following options:
**selinux**=*true*|*false* **selinux**=*true*|*false*
Enable selinux support (default: false) Enable selinux support (default: false)
**signature_policy**=""
Path to the signature policy json file (default: "", to use the system-wide default)
**seccomp_profile**="" **seccomp_profile**=""
Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json") Path to the seccomp json profile to be used as the runtime's default (default: "/etc/ocid/seccomp.json")
@ -66,8 +71,14 @@ The `ocid` table supports the following options:
## OCID.IMAGE TABLE ## OCID.IMAGE TABLE
**pause**="" **default_transport**
Path to the pause executable (default: "/usr/libexec/ocid/pause") A prefix to prepend to image names that can't be pulled as-is (default: "docker://")
**pause_command**=""
Path to the pause executable in the pause image (default: "/pause")
**pause_image**=""
Image which contains the pause executable (default: "kubernetes/pause")
## OCID.NETWORK TABLE ## OCID.NETWORK TABLE

View file

@ -60,7 +60,7 @@ clean() {
local packages=($(GOPATH=$original_GOPATH go list -e ./... | grep -v "^${PROJECT}/vendor")) local packages=($(GOPATH=$original_GOPATH go list -e ./... | grep -v "^${PROJECT}/vendor"))
local platforms=( linux/amd64 linux/386 ) local platforms=( linux/amd64 linux/386 )
local buildTags=( seccomp ) local buildTagSets=( seccomp )
echo echo
@ -68,10 +68,12 @@ clean() {
local IFS=$'\n' local IFS=$'\n'
local imports=( $( local imports=( $(
for platform in "${platforms[@]}"; do for platform in "${platforms[@]}"; do
export GOOS="${platform%/*}"; for buildTags in "" "${buildTagSets[@]}"; do
export GOARCH="${platform##*/}"; export GOOS="${platform%/*}";
go list -e -tags "$buildTags" -f '{{join .Deps "\n"}}' "${packages[@]}" export GOARCH="${platform##*/}";
go list -e -tags "$buildTags" -f '{{join .TestImports "\n"}}' "${packages[@]}" go list -e -tags "$buildTags" -f '{{join .Deps "\n"}}' "${packages[@]}"
go list -e -tags "$buildTags" -f '{{join .TestImports "\n"}}' "${packages[@]}"
done
done | grep -vE "^${PROJECT}" | sort -u done | grep -vE "^${PROJECT}" | sort -u
) ) ) )
# .TestImports does not include indirect dependencies, so do one more iteration. # .TestImports does not include indirect dependencies, so do one more iteration.

View file

@ -89,5 +89,13 @@ clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028
clone git github.com/gorilla/context v1.1 clone git github.com/gorilla/context v1.1
clone git golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 https://github.com/golang/sys.git clone git golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 https://github.com/golang/sys.git
clone git github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 clone git github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
clone git github.com/mistifyio/go-zfs master
clone git github.com/pborman/uuid master
clone git github.com/mtrmac/gpgme master
clone git gopkg.in/cheggaaa/pb.v1 master
clone git github.com/mattn/go-runewidth master
clone git github.com/docker/engine-api v0.4.0
clone git github.com/pkg/errors master
clone git github.com/opencontainers/go-digest master
clean clean

View file

@ -34,11 +34,10 @@ const (
) )
// New creates a new Runtime with options provided // New creates a new Runtime with options provided
func New(runtimePath string, containerDir string, conmonPath string, conmonEnv []string, cgroupManager string) (*Runtime, error) { func New(runtimePath string, conmonPath string, conmonEnv []string, cgroupManager string) (*Runtime, error) {
r := &Runtime{ r := &Runtime{
name: filepath.Base(runtimePath), name: filepath.Base(runtimePath),
path: runtimePath, path: runtimePath,
containerDir: containerDir,
conmonPath: conmonPath, conmonPath: conmonPath,
conmonEnv: conmonEnv, conmonEnv: conmonEnv,
cgroupManager: cgroupManager, cgroupManager: cgroupManager,
@ -50,7 +49,6 @@ func New(runtimePath string, containerDir string, conmonPath string, conmonEnv [
type Runtime struct { type Runtime struct {
name string name string
path string path string
containerDir string
conmonPath string conmonPath string
conmonEnv []string conmonEnv []string
cgroupManager string cgroupManager string
@ -76,11 +74,6 @@ func (r *Runtime) Path() string {
return r.path return r.path
} }
// ContainerDir returns the path to the base directory for storing container configurations
func (r *Runtime) ContainerDir() string {
return r.containerDir
}
// Version returns the version of the OCI Runtime // Version returns the version of the OCI Runtime
func (r *Runtime) Version() (string, error) { func (r *Runtime) Version() (string, error) {
runtimeVersion, err := getOCIVersion(r.path, "-v") runtimeVersion, err := getOCIVersion(r.path, "-v")

View file

@ -3,6 +3,7 @@ package storage
import ( import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"time" "time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
@ -38,7 +39,8 @@ var (
) )
type runtimeService struct { type runtimeService struct {
image ImageServer image ImageServer
pauseImage string
} }
// ContainerInfo wraps a subset of information about a container: its ID and // ContainerInfo wraps a subset of information about a container: its ID and
@ -176,12 +178,7 @@ func (r *runtimeService) createContainerOrPodSandbox(systemContext *types.System
} }
img, err = istorage.Transport.GetStoreImage(r.image.GetStore(), ref) img, err = istorage.Transport.GetStoreImage(r.image.GetStore(), ref)
} }
if err != nil && err != storage.ErrImageUnknown { if img == nil && err == storage.ErrImageUnknown && imageName == r.pauseImage {
return ContainerInfo{}, err
}
// Pull the image down if we don't already have it.
if err == storage.ErrImageUnknown {
image := imageID image := imageID
if imageName != "" { if imageName != "" {
image = imageName image = imageName
@ -200,6 +197,15 @@ func (r *runtimeService) createContainerOrPodSandbox(systemContext *types.System
} }
logrus.Debugf("successfully pulled image %q", image) logrus.Debugf("successfully pulled image %q", image)
} }
if img == nil && err == storage.ErrImageUnknown {
if imageID == "" {
return ContainerInfo{}, fmt.Errorf("image %q not present in image store", imageName)
}
if imageName == "" {
return ContainerInfo{}, fmt.Errorf("image with ID %q not present in image store", imageID)
}
return ContainerInfo{}, fmt.Errorf("image %q with ID %q not present in image store", imageName, imageID)
}
// Pull out a copy of the image's configuration. // Pull out a copy of the image's configuration.
image, err := ref.NewImage(systemContext) image, err := ref.NewImage(systemContext)
@ -449,8 +455,9 @@ func (r *runtimeService) GetRunDir(id string) (string, error) {
// GetRuntimeService returns a RuntimeServer that uses the passed-in image // GetRuntimeService returns a RuntimeServer that uses the passed-in image
// service to pull and manage images, and its store to manage containers based // service to pull and manage images, and its store to manage containers based
// on those images. // on those images.
func GetRuntimeService(image ImageServer) RuntimeServer { func GetRuntimeService(image ImageServer, pauseImage string) RuntimeServer {
return &runtimeService{ return &runtimeService{
image: image, image: image,
pauseImage: pauseImage,
} }
} }

View file

@ -3,7 +3,6 @@ package server
import ( import (
"bytes" "bytes"
"io/ioutil" "io/ioutil"
"path/filepath"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
"github.com/opencontainers/runc/libcontainer/selinux" "github.com/opencontainers/runc/libcontainer/selinux"
@ -11,16 +10,16 @@ import (
// Default paths if none are specified // Default paths if none are specified
const ( const (
ocidRoot = "/var/lib/ocid" ocidRoot = "/var/lib/ocid"
conmonPath = "/usr/libexec/ocid/conmon" ocidRunRoot = "/var/run/containers"
pausePath = "/usr/libexec/ocid/pause" conmonPath = "/usr/libexec/ocid/conmon"
seccompProfilePath = "/etc/ocid/seccomp.json" pauseImage = "kubernetes/pause"
cniConfigDir = "/etc/cni/net.d/" pauseCommand = "/pause"
cniBinDir = "/opt/cni/bin/" defaultTransport = "docker://"
) seccompProfilePath = "/etc/ocid/seccomp.json"
const (
apparmorProfileName = "ocid-default" apparmorProfileName = "ocid-default"
cniConfigDir = "/etc/cni/net.d/"
cniBinDir = "/opt/cni/bin/"
cgroupManager = "cgroupfs" cgroupManager = "cgroupfs"
) )
@ -40,17 +39,20 @@ type Config struct {
// RootConfig represents the root of the "ocid" TOML config table. // RootConfig represents the root of the "ocid" TOML config table.
type RootConfig struct { type RootConfig struct {
// Root is a path to the "root directory" where all information not // Root is a path to the "root directory" where data not
// explicitly handled by other options will be stored. // explicitly handled by other options will be stored.
Root string `toml:"root"` Root string `toml:"root"`
// SandboxDir is the directory where ocid will store all of its sandbox // RunRoot is a path to the "run directory" where state information not
// state and other information. // explicitly handled by other options will be stored.
SandboxDir string `toml:"sandbox_dir"` RunRoot string `toml:"runroot"`
// ContainerDir is the directory where ocid will store all of its container // Storage is the name of the storage driver which handles actually
// state and other information. // storing the contents of containers.
ContainerDir string `toml:"container_dir"` Storage string `toml:"storage_driver"`
// StorageOption is a list of storage driver specific options.
StorageOptions []string `toml:"storage_option"`
// LogDir is the default log directory were all logs will go unless kubelet // LogDir is the default log directory were all logs will go unless kubelet
// tells us to put them somewhere else. // tells us to put them somewhere else.
@ -98,17 +100,21 @@ type RuntimeConfig struct {
// ImageConfig represents the "ocid.image" TOML config table. // ImageConfig represents the "ocid.image" TOML config table.
type ImageConfig struct { type ImageConfig struct {
// Pause is the path to the statically linked pause container binary, used // DefaultTransport is a value we prefix to image names that fail to
// as the entrypoint for infra containers. // validate source references.
// DefaultTransport string `toml:"default_transport"`
// TODO(cyphar): This should be replaced with a path to an OCI image // PauseImage is the name of an image which we use to instantiate infra
// bundle, once the OCI image/storage code has been implemented. // containers.
Pause string `toml:"pause"` PauseImage string `toml:"pause_image"`
// PauseCommand is the path of the binary we run in an infra
// ImageStore is the directory where the ocid image store will be stored. // container that's been instantiated using PauseImage.
// TODO: This is currently not really used because we don't have PauseCommand string `toml:"pause_command"`
// containers/storage integrated. // SignaturePolicyPath is the name of the file which decides what sort
ImageDir string `toml:"image_dir"` // of policy we use when deciding whether or not to trust an image that
// we've pulled. Outside of testing situations, it is strongly advised
// that this be left unspecified so that the default system-wide policy
// will be used.
SignaturePolicyPath string `toml:"signature_policy"`
} }
// NetworkConfig represents the "ocid.network" TOML config table // NetworkConfig represents the "ocid.network" TOML config table
@ -191,10 +197,9 @@ func (c *Config) ToFile(path string) error {
func DefaultConfig() *Config { func DefaultConfig() *Config {
return &Config{ return &Config{
RootConfig: RootConfig{ RootConfig: RootConfig{
Root: ocidRoot, Root: ocidRoot,
SandboxDir: filepath.Join(ocidRoot, "sandboxes"), RunRoot: ocidRunRoot,
ContainerDir: filepath.Join(ocidRoot, "containers"), LogDir: "/var/log/ocid/pods",
LogDir: "/var/log/ocid/pods",
}, },
APIConfig: APIConfig{ APIConfig: APIConfig{
Listen: "/var/run/ocid.sock", Listen: "/var/run/ocid.sock",
@ -211,8 +216,10 @@ func DefaultConfig() *Config {
CgroupManager: cgroupManager, CgroupManager: cgroupManager,
}, },
ImageConfig: ImageConfig{ ImageConfig: ImageConfig{
Pause: pausePath, DefaultTransport: defaultTransport,
ImageDir: filepath.Join(ocidRoot, "store"), PauseImage: pauseImage,
PauseCommand: pauseCommand,
SignaturePolicyPath: "",
}, },
NetworkConfig: NetworkConfig{ NetworkConfig: NetworkConfig{
NetworkDir: cniConfigDir, NetworkDir: cniConfigDir,

View file

@ -4,7 +4,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"os"
"path/filepath" "path/filepath"
"strings" "strings"
"syscall" "syscall"
@ -14,7 +13,6 @@ import (
"github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/oci"
"github.com/kubernetes-incubator/cri-o/server/apparmor" "github.com/kubernetes-incubator/cri-o/server/apparmor"
"github.com/kubernetes-incubator/cri-o/server/seccomp" "github.com/kubernetes-incubator/cri-o/server/seccomp"
"github.com/kubernetes-incubator/cri-o/utils"
"github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/label"
"github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/runtime-tools/generate"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -30,6 +28,7 @@ const (
// CreateContainer creates a new container in specified PodSandbox // CreateContainer creates a new container in specified PodSandbox
func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) { func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (res *pb.CreateContainerResponse, err error) {
logrus.Debugf("CreateContainerRequest %+v", req) logrus.Debugf("CreateContainerRequest %+v", req)
s.Update()
sbID := req.GetPodSandboxId() sbID := req.GetPodSandboxId()
if sbID == "" { if sbID == "" {
return nil, fmt.Errorf("PodSandboxId should not be empty") return nil, fmt.Errorf("PodSandboxId should not be empty")
@ -62,30 +61,24 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
return nil, err return nil, err
} }
// containerDir is the dir for the container bundle.
containerDir := filepath.Join(s.runtime.ContainerDir(), containerID)
defer func() { defer func() {
if err != nil { if err != nil {
s.releaseContainerName(containerName) s.releaseContainerName(containerName)
err1 := os.RemoveAll(containerDir)
if err1 != nil {
logrus.Warnf("Failed to cleanup container directory: %v", err1)
}
} }
}() }()
if _, err = os.Stat(containerDir); err == nil { container, err := s.createSandboxContainer(ctx, containerID, containerName, sb, req.GetSandboxConfig(), containerConfig)
return nil, fmt.Errorf("container (%s) already exists", containerDir)
}
if err = os.MkdirAll(containerDir, 0755); err != nil {
return nil, err
}
container, err := s.createSandboxContainer(containerID, containerName, sb, containerDir, containerConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() {
if err != nil {
err2 := s.storage.DeleteContainer(containerID)
if err2 != nil {
logrus.Warnf("Failed to cleanup container directory: %v", err2)
}
}
}()
if err = s.runtime.CreateContainer(container); err != nil { if err = s.runtime.CreateContainer(container); err != nil {
return nil, err return nil, err
@ -110,23 +103,21 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq
return resp, nil return resp, nil
} }
func (s *Server) createSandboxContainer(containerID string, containerName string, sb *sandbox, containerDir string, containerConfig *pb.ContainerConfig) (*oci.Container, error) { func (s *Server) createSandboxContainer(ctx context.Context, containerID string, containerName string, sb *sandbox, SandboxConfig *pb.PodSandboxConfig, containerConfig *pb.ContainerConfig) (*oci.Container, error) {
if sb == nil { if sb == nil {
return nil, errors.New("createSandboxContainer needs a sandbox") return nil, errors.New("createSandboxContainer needs a sandbox")
} }
// TODO: factor generating/updating the spec into something other projects can vendor
// creates a spec Generator with the default spec. // creates a spec Generator with the default spec.
specgen := generate.New() specgen := generate.New()
// by default, the root path is an empty string.
// here set it to be "rootfs".
specgen.SetRootPath("rootfs")
processArgs := []string{} processArgs := []string{}
commands := containerConfig.GetCommand() commands := containerConfig.GetCommand()
args := containerConfig.GetArgs() args := containerConfig.GetArgs()
if commands == nil && args == nil { if commands == nil && args == nil {
// TODO: override with image's config in #189 processArgs = nil
processArgs = []string{"/bin/sh"}
} }
if commands != nil { if commands != nil {
processArgs = append(processArgs, commands...) processArgs = append(processArgs, commands...)
@ -135,8 +126,6 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
processArgs = append(processArgs, args...) processArgs = append(processArgs, args...)
} }
specgen.SetProcessArgs(processArgs)
cwd := containerConfig.GetWorkingDir() cwd := containerConfig.GetWorkingDir()
if cwd == "" { if cwd == "" {
cwd = "/" cwd = "/"
@ -151,8 +140,7 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
if key == "" { if key == "" {
continue continue
} }
env := fmt.Sprintf("%s=%s", key, value) specgen.AddProcessEnv(key, value)
specgen.AddProcessEnv(env)
} }
} }
@ -358,17 +346,46 @@ func (s *Server) createSandboxContainer(containerID string, containerName string
return nil, err return nil, err
} }
if err = specgen.SaveToFile(filepath.Join(containerDir, "config.json"), generate.ExportOptions{}); err != nil { metaname := metadata.GetName()
attempt := metadata.GetAttempt()
containerInfo, err := s.storage.CreateContainer(s.imageContext,
sb.name, sb.id,
image, "",
containerName, containerID,
metaname,
attempt,
sb.mountLabel,
nil)
if err != nil {
return nil, err return nil, err
} }
// TODO: copy the rootfs into the bundle. mountPoint, err := s.storage.StartContainer(containerID)
// Currently, utils.CreateFakeRootfs is used to populate the rootfs. if err != nil {
if err = utils.CreateFakeRootfs(containerDir, image); err != nil { return nil, fmt.Errorf("failed to mount container %s(%s): %v", containerName, containerID, err)
}
if processArgs == nil {
if containerInfo.Config != nil && len(containerInfo.Config.Config.Cmd) > 0 {
processArgs = containerInfo.Config.Config.Cmd
} else {
processArgs = []string{"/bin/sh"}
}
}
specgen.SetProcessArgs(processArgs)
// by default, the root path is an empty string. set it now.
specgen.SetRootPath(mountPoint)
saveOptions := generate.ExportOptions{}
if err = specgen.SaveToFile(filepath.Join(containerInfo.Dir, "config.json"), saveOptions); err != nil {
return nil, err
}
if err = specgen.SaveToFile(filepath.Join(containerInfo.RunDir, "config.json"), saveOptions); err != nil {
return nil, err return nil, err
} }
container, err := oci.NewContainer(containerID, containerName, containerDir, logPath, sb.netNs(), labels, annotations, imageSpec, metadata, sb.id, containerConfig.GetTty()) container, err := oci.NewContainer(containerID, containerName, containerInfo.RunDir, logPath, sb.netNs(), labels, annotations, imageSpec, metadata, sb.id, containerConfig.GetTty())
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -29,6 +29,7 @@ func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool {
// ListContainers lists all containers by filters. // ListContainers lists all containers by filters.
func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) { func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) {
logrus.Debugf("ListContainersRequest %+v", req) logrus.Debugf("ListContainersRequest %+v", req)
s.Update()
var ctrs []*pb.Container var ctrs []*pb.Container
filter := req.Filter filter := req.Filter
ctrList := s.state.containers.List() ctrList := s.state.containers.List()

View file

@ -2,8 +2,6 @@ package server
import ( import (
"fmt" "fmt"
"os"
"path/filepath"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/oci"
@ -15,6 +13,7 @@ import (
// should be force removed. // should be force removed.
func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) { func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) {
logrus.Debugf("RemoveContainerRequest %+v", req) logrus.Debugf("RemoveContainerRequest %+v", req)
s.Update()
c, err := s.getContainerFromRequest(req) c, err := s.getContainerFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err
@ -35,9 +34,12 @@ func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerReq
return nil, fmt.Errorf("failed to delete container %s: %v", c.ID(), err) return nil, fmt.Errorf("failed to delete container %s: %v", c.ID(), err)
} }
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID()) if err := s.storage.StopContainer(c.ID()); err != nil {
if err := os.RemoveAll(containerDir); err != nil { return nil, fmt.Errorf("failed to unmount container %s: %v", c.ID(), err)
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.ID(), err) }
if err := s.storage.DeleteContainer(c.ID()); err != nil {
return nil, fmt.Errorf("failed to delete storage for container %s: %v", c.ID(), err)
} }
s.releaseContainerName(c.Name()) s.releaseContainerName(c.Name())

View file

@ -11,12 +11,13 @@ import (
// StartContainer starts the container. // StartContainer starts the container.
func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) { func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) {
logrus.Debugf("StartContainerRequest %+v", req) logrus.Debugf("StartContainerRequest %+v", req)
s.Update()
c, err := s.getContainerFromRequest(req) c, err := s.getContainerFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err := s.runtime.StartContainer(c); err != nil { if err = s.runtime.StartContainer(c); err != nil {
return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err) return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err)
} }

View file

@ -10,6 +10,7 @@ import (
// ContainerStatus returns status of the container. // ContainerStatus returns status of the container.
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) { func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) {
logrus.Debugf("ContainerStatusRequest %+v", req) logrus.Debugf("ContainerStatusRequest %+v", req)
s.Update()
c, err := s.getContainerFromRequest(req) c, err := s.getContainerFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -12,6 +12,7 @@ import (
// StopContainer stops a running container with a grace period (i.e., timeout). // StopContainer stops a running container with a grace period (i.e., timeout).
func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) { func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) {
logrus.Debugf("StopContainerRequest %+v", req) logrus.Debugf("StopContainerRequest %+v", req)
s.Update()
c, err := s.getContainerFromRequest(req) c, err := s.getContainerFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -8,9 +8,27 @@ import (
// ListImages lists existing images. // ListImages lists existing images.
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) { func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
logrus.Debugf("ListImages: %+v", req) logrus.Debugf("ListImagesRequest: %+v", req)
// TODO filter := ""
// containers/storage will take care of this by looking inside /var/lib/ocid/images reqFilter := req.GetFilter()
// and listing images. if reqFilter != nil {
return &pb.ListImagesResponse{}, nil filterImage := reqFilter.GetImage()
if filterImage != nil {
filter = filterImage.GetImage()
}
}
results, err := s.images.ListImages(filter)
if err != nil {
return nil, err
}
response := pb.ListImagesResponse{}
for _, result := range results {
response.Images = append(response.Images, &pb.Image{
Id: sPtr(result.ID),
RepoTags: result.Names,
Size_: result.Size,
})
}
logrus.Debugf("ListImagesResponse: %+v", response)
return &response, nil
} }

View file

@ -1,86 +1,30 @@
package server package server
import ( import (
"errors"
"io"
"os"
"path/filepath"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/directory" "github.com/containers/image/copy"
"github.com/containers/image/image"
"github.com/containers/image/transports"
"golang.org/x/net/context" "golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
// PullImage pulls a image with authentication config. // PullImage pulls a image with authentication config.
func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) { func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) {
logrus.Debugf("PullImage: %+v", req) logrus.Debugf("PullImageRequest: %+v", req)
img := req.GetImage().GetImage() // TODO(runcom?): deal with AuthConfig in req.GetAuth()
if img == "" {
return nil, errors.New("got empty imagespec name")
}
// TODO(runcom): deal with AuthConfig in req.GetAuth()
// TODO(mrunalp,runcom): why do we need the SandboxConfig here?
// how do we pull in a specified sandbox?
tr, err := transports.ParseImageName(img)
if err != nil {
return nil, err
}
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
src, err := tr.NewImageSource(nil, nil)
if err != nil {
return nil, err
}
i, err := image.FromSource(src)
if err != nil {
return nil, err
}
blobs := i.LayerInfos()
config := i.ConfigInfo()
if config.Digest != "" {
blobs = append(blobs, config)
}
if err = os.Mkdir(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()), 0755); err != nil {
return nil, err
}
dir, err := directory.NewReference(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()))
if err != nil {
return nil, err
}
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
dest, err := dir.NewImageDestination(nil)
if err != nil {
return nil, err
}
// save blobs (layer + config for docker v2s2, layers only for docker v2s1 [the config is in the manifest])
for _, b := range blobs {
// TODO(runcom,nalin): we need do-then-commit to later purge on error
var r io.ReadCloser
r, _, err = src.GetBlob(b)
if err != nil {
return nil, err
}
if _, err = dest.PutBlob(r, b); err != nil {
r.Close()
return nil, err
}
r.Close()
}
// save manifest
m, _, err := i.Manifest()
if err != nil {
return nil, err
}
if err := dest.PutManifest(m); err != nil {
return nil, err
}
// TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://) // TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://)
image := ""
return &pb.PullImageResponse{}, nil img := req.GetImage()
if img != nil {
image = img.GetImage()
}
options := &copy.Options{}
_, err := s.images.PullImage(s.imageContext, image, options)
if err != nil {
return nil, err
}
resp := &pb.PullImageResponse{
ImageRef: &image,
}
logrus.Debugf("PullImageResponse: %+v", resp)
return resp, nil
} }

View file

@ -1,6 +1,8 @@
package server package server
import ( import (
"fmt"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
@ -8,6 +10,20 @@ import (
// RemoveImage removes the image. // RemoveImage removes the image.
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) { func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
logrus.Debugf("RemoveImage: %+v", req) logrus.Debugf("RemoveImageRequest: %+v", req)
return &pb.RemoveImageResponse{}, nil image := ""
img := req.GetImage()
if img != nil {
image = img.GetImage()
}
if image == "" {
return nil, fmt.Errorf("no image specified")
}
err := s.images.RemoveImage(s.imageContext, image)
if err != nil {
return nil, err
}
resp := &pb.RemoveImageResponse{}
logrus.Debugf("RemoveImageResponse: %+v", resp)
return resp, nil
} }

View file

@ -1,16 +1,39 @@
package server package server
import ( import (
"fmt"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/storage/storage"
"golang.org/x/net/context" "golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
// ImageStatus returns the status of the image. // ImageStatus returns the status of the image.
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) { func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
logrus.Debugf("ImageStatus: %+v", req) logrus.Debugf("ImageStatusRequest: %+v", req)
// TODO image := ""
// containers/storage will take care of this by looking inside /var/lib/ocid/images img := req.GetImage()
// and getting the image status if img != nil {
return &pb.ImageStatusResponse{}, nil image = img.GetImage()
}
if image == "" {
return nil, fmt.Errorf("no image specified")
}
status, err := s.images.ImageStatus(s.imageContext, image)
if err != nil {
if err == storage.ErrImageUnknown {
return &pb.ImageStatusResponse{}, nil
}
return nil, err
}
resp := &pb.ImageStatusResponse{
Image: &pb.Image{
Id: &status.ID,
RepoTags: status.Names,
Size_: status.Size,
},
}
logrus.Debugf("ImageStatusResponse: %+v", resp)
return resp, nil
} }

View file

@ -145,6 +145,7 @@ const (
podDefaultNamespace = "default" podDefaultNamespace = "default"
defaultShmSize = 64 * 1024 * 1024 defaultShmSize = 64 * 1024 * 1024
nsRunDir = "/var/run/netns" nsRunDir = "/var/run/netns"
podInfraCommand = "/pause"
) )
var ( var (
@ -277,7 +278,7 @@ func (s *Server) getPodSandboxFromRequest(req podSandboxRequest) (*sandbox, erro
sb := s.getSandbox(sandboxID) sb := s.getSandbox(sandboxID)
if sb == nil { if sb == nil {
return nil, fmt.Errorf("specified sandbox not found: %s", sandboxID) return nil, fmt.Errorf("specified pod sandbox not found: %s", sandboxID)
} }
return sb, nil return sb, nil
} }

View file

@ -29,6 +29,7 @@ func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool {
// ListPodSandbox returns a list of SandBoxes. // ListPodSandbox returns a list of SandBoxes.
func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) { func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
logrus.Debugf("ListPodSandboxRequest %+v", req) logrus.Debugf("ListPodSandboxRequest %+v", req)
s.Update()
var pods []*pb.PodSandbox var pods []*pb.PodSandbox
var podList []*sandbox var podList []*sandbox
for _, sb := range s.state.sandboxes { for _, sb := range s.state.sandboxes {

View file

@ -2,8 +2,6 @@ package server
import ( import (
"fmt" "fmt"
"os"
"path/filepath"
"syscall" "syscall"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
@ -17,6 +15,7 @@ import (
// sandbox, they should be force deleted. // sandbox, they should be force deleted.
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) { func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
logrus.Debugf("RemovePodSandboxRequest %+v", req) logrus.Debugf("RemovePodSandboxRequest %+v", req)
s.Update()
sb, err := s.getPodSandboxFromRequest(req) sb, err := s.getPodSandboxFromRequest(req)
if err != nil { if err != nil {
if err == errSandboxIDEmpty { if err == errSandboxIDEmpty {
@ -46,16 +45,18 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
} }
if err := s.runtime.DeleteContainer(c); err != nil { if err := s.runtime.DeleteContainer(c); err != nil {
return nil, fmt.Errorf("failed to delete container %s in sandbox %s: %v", c.Name(), sb.id, err) return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
} }
if c == podInfraContainer { if c == podInfraContainer {
continue continue
} }
containerDir := filepath.Join(s.runtime.ContainerDir(), c.ID()) if err := s.storage.StopContainer(c.ID()); err != nil {
if err := os.RemoveAll(containerDir); err != nil { return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
return nil, fmt.Errorf("failed to remove container %s directory: %v", c.Name(), err) }
if err := s.storage.DeleteContainer(c.ID()); err != nil {
return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
} }
s.releaseContainerName(c.Name()) s.releaseContainerName(c.Name())
@ -81,10 +82,13 @@ func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxR
} }
// Remove the files related to the sandbox // Remove the files related to the sandbox
podSandboxDir := filepath.Join(s.config.SandboxDir, sb.id) if err := s.storage.StopContainer(sb.id); err != nil {
if err := os.RemoveAll(podSandboxDir); err != nil { return nil, fmt.Errorf("failed to delete sandbox container in pod sandbox %s: %v", sb.id, err)
return nil, fmt.Errorf("failed to remove sandbox %s directory: %v", sb.id, err)
} }
if err := s.storage.RemovePodSandbox(sb.id); err != nil {
return nil, fmt.Errorf("failed to remove pod sandbox %s: %v", sb.id, err)
}
s.releaseContainerName(podInfraContainer.Name()) s.releaseContainerName(podInfraContainer.Name())
s.removeContainer(podInfraContainer) s.removeContainer(podInfraContainer)
sb.infraContainer = nil sb.infraContainer = nil

View file

@ -9,8 +9,8 @@ import (
"syscall" "syscall"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/storage/storage"
"github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/oci"
"github.com/kubernetes-incubator/cri-o/utils"
"github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/label"
"github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/runtime-tools/generate"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -54,6 +54,10 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
if err != nil { if err != nil {
return nil, err return nil, err
} }
_, containerName, err := s.generateContainerIDandName(name, "infra", attempt)
if err != nil {
return nil, err
}
defer func() { defer func() {
if err != nil { if err != nil {
@ -67,39 +71,51 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
defer func() { defer func() {
if err != nil { if err != nil {
if err = s.podIDIndex.Delete(id); err != nil { if err2 := s.podIDIndex.Delete(id); err2 != nil {
logrus.Warnf("couldn't delete pod id %s from idIndex", id) logrus.Warnf("couldn't delete pod id %s from idIndex", id)
} }
} }
}() }()
podSandboxDir := filepath.Join(s.config.SandboxDir, id) podContainer, err := s.storage.CreatePodSandbox(s.imageContext,
if _, err = os.Stat(podSandboxDir); err == nil { name, id,
return nil, fmt.Errorf("pod sandbox (%s) already exists", podSandboxDir) s.config.PauseImage, "",
containerName,
req.GetConfig().GetMetadata().GetName(),
req.GetConfig().GetMetadata().GetUid(),
namespace,
attempt,
nil)
if err == storage.ErrDuplicateName {
return nil, fmt.Errorf("pod sandbox with name %q already exists", name)
}
if err != nil {
return nil, fmt.Errorf("error creating pod sandbox with name %q: %v", name, err)
} }
defer func() { defer func() {
if err != nil { if err != nil {
if err2 := os.RemoveAll(podSandboxDir); err2 != nil { if err2 := s.storage.RemovePodSandbox(id); err2 != nil {
logrus.Warnf("couldn't cleanup podSandboxDir %s: %v", podSandboxDir, err2) logrus.Warnf("couldn't cleanup pod sandbox %q: %v", id, err2)
} }
} }
}() }()
if err = os.MkdirAll(podSandboxDir, 0755); err != nil { // TODO: factor generating/updating the spec into something other projects can vendor
return nil, err
}
// creates a spec Generator with the default spec. // creates a spec Generator with the default spec.
g := generate.New() g := generate.New()
// TODO: Make the `graph/vfs` part of this configurable once the storage
// integration has been merged.
podInfraRootfs := filepath.Join(s.config.Root, "graph/vfs/pause")
// setup defaults for the pod sandbox // setup defaults for the pod sandbox
g.SetRootPath(filepath.Join(podInfraRootfs, "rootfs"))
g.SetRootReadonly(true) g.SetRootReadonly(true)
g.SetProcessArgs([]string{"/pause"}) if s.config.PauseCommand == "" {
if podContainer.Config != nil {
g.SetProcessArgs(podContainer.Config.Config.Cmd)
} else {
g.SetProcessArgs([]string{podInfraCommand})
}
} else {
g.SetProcessArgs([]string{s.config.PauseCommand})
}
// set hostname // set hostname
hostname := req.GetConfig().GetHostname() hostname := req.GetConfig().GetHostname()
@ -117,7 +133,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
dnsServers := req.GetConfig().GetDnsConfig().GetServers() dnsServers := req.GetConfig().GetDnsConfig().GetServers()
dnsSearches := req.GetConfig().GetDnsConfig().GetSearches() dnsSearches := req.GetConfig().GetDnsConfig().GetSearches()
dnsOptions := req.GetConfig().GetDnsConfig().GetOptions() dnsOptions := req.GetConfig().GetDnsConfig().GetOptions()
resolvPath := fmt.Sprintf("%s/resolv.conf", podSandboxDir) resolvPath := fmt.Sprintf("%s/resolv.conf", podContainer.RunDir)
err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath) err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath)
if err != nil { if err != nil {
err1 := removeFile(resolvPath) err1 := removeFile(resolvPath)
@ -165,7 +181,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() { if req.GetConfig().GetLinux().GetSecurityContext().GetNamespaceOptions().GetHostIpc() {
shmPath = "/dev/shm" shmPath = "/dev/shm"
} else { } else {
shmPath, err = setupShm(podSandboxDir, mountLabel) shmPath, err = setupShm(podContainer.RunDir, mountLabel)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -178,7 +194,7 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
}() }()
} }
containerID, containerName, err := s.generateContainerIDandName(name, "infra", 0) err = s.setPodSandboxMountLabel(id, mountLabel)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -189,14 +205,14 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
} }
}() }()
if err = s.ctrIDIndex.Add(containerID); err != nil { if err = s.ctrIDIndex.Add(id); err != nil {
return nil, err return nil, err
} }
defer func() { defer func() {
if err != nil { if err != nil {
if err = s.ctrIDIndex.Delete(containerID); err != nil { if err2 := s.ctrIDIndex.Delete(id); err2 != nil {
logrus.Warnf("couldn't delete ctr id %s from idIndex", containerID) logrus.Warnf("couldn't delete ctr id %s from idIndex", id)
} }
} }
}() }()
@ -207,8 +223,9 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
g.AddAnnotation("ocid/log_path", logDir) g.AddAnnotation("ocid/log_path", logDir)
g.AddAnnotation("ocid/name", name) g.AddAnnotation("ocid/name", name)
g.AddAnnotation("ocid/container_type", containerTypeSandbox) g.AddAnnotation("ocid/container_type", containerTypeSandbox)
g.AddAnnotation("ocid/sandbox_id", id)
g.AddAnnotation("ocid/container_name", containerName) g.AddAnnotation("ocid/container_name", containerName)
g.AddAnnotation("ocid/container_id", containerID) g.AddAnnotation("ocid/container_id", id)
g.AddAnnotation("ocid/shm_path", shmPath) g.AddAnnotation("ocid/shm_path", shmPath)
sb := &sandbox{ sb := &sandbox{
@ -246,11 +263,11 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
cgroupParent := req.GetConfig().GetLinux().GetCgroupParent() cgroupParent := req.GetConfig().GetLinux().GetCgroupParent()
if cgroupParent != "" { if cgroupParent != "" {
if s.config.CgroupManager == "systemd" { if s.config.CgroupManager == "systemd" {
cgPath := sb.cgroupParent + ":" + "ocid" + ":" + containerID cgPath := sb.cgroupParent + ":" + "ocid" + ":" + id
g.SetLinuxCgroupsPath(cgPath) g.SetLinuxCgroupsPath(cgPath)
} else { } else {
g.SetLinuxCgroupsPath(sb.cgroupParent + "/" + containerID) g.SetLinuxCgroupsPath(sb.cgroupParent + "/" + id)
} }
sb.cgroupParent = cgroupParent sb.cgroupParent = cgroupParent
@ -308,23 +325,21 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
} }
} }
err = g.SaveToFile(filepath.Join(podSandboxDir, "config.json"), generate.ExportOptions{}) saveOptions := generate.ExportOptions{}
mountPoint, err := s.storage.StartContainer(id)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("failed to mount container %s in pod sandbox %s(%s): %v", containerName, sb.name, id, err)
}
g.SetRootPath(mountPoint)
err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions)
if err != nil {
return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.name, id, err)
}
if err = g.SaveToFile(filepath.Join(podContainer.RunDir, "config.json"), saveOptions); err != nil {
return nil, fmt.Errorf("failed to write runtime configuration for pod sandbox %s(%s): %v", sb.name, id, err)
} }
if _, err = os.Stat(podInfraRootfs); err != nil { container, err := oci.NewContainer(id, containerName, podContainer.RunDir, logDir, sb.netNs(), labels, annotations, nil, nil, id, false)
if os.IsNotExist(err) {
// TODO: Replace by rootfs creation API when it is ready
if err = utils.CreateInfraRootfs(podInfraRootfs, s.config.Pause); err != nil {
return nil, err
}
} else {
return nil, err
}
}
container, err := oci.NewContainer(containerID, containerName, podSandboxDir, podSandboxDir, sb.netNs(), labels, annotations, nil, nil, id, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -348,6 +363,19 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest
return resp, nil return resp, nil
} }
func (s *Server) setPodSandboxMountLabel(id, mountLabel string) error {
storageMetadata, err := s.storage.GetContainerMetadata(id)
if err != nil {
return err
}
storageMetadata.SetMountLabel(mountLabel)
err = s.storage.SetContainerMetadata(id, storageMetadata)
if err != nil {
return err
}
return nil
}
func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) { func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mountLabel string, err error) {
processLabel = "" processLabel = ""
if selinuxOptions != nil { if selinuxOptions != nil {
@ -375,8 +403,8 @@ func getSELinuxLabels(selinuxOptions *pb.SELinuxOption) (processLabel string, mo
return label.InitLabels(label.DupSecOpt(processLabel)) return label.InitLabels(label.DupSecOpt(processLabel))
} }
func setupShm(podSandboxDir, mountLabel string) (shmPath string, err error) { func setupShm(podSandboxRunDir, mountLabel string) (shmPath string, err error) {
shmPath = filepath.Join(podSandboxDir, "shm") shmPath = filepath.Join(podSandboxRunDir, "shm")
if err = os.Mkdir(shmPath, 0700); err != nil { if err = os.Mkdir(shmPath, 0700); err != nil {
return "", err return "", err
} }

View file

@ -10,6 +10,7 @@ import (
// PodSandboxStatus returns the Status of the PodSandbox. // PodSandboxStatus returns the Status of the PodSandbox.
func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) { func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusRequest) (*pb.PodSandboxStatusResponse, error) {
logrus.Debugf("PodSandboxStatusRequest %+v", req) logrus.Debugf("PodSandboxStatusRequest %+v", req)
s.Update()
sb, err := s.getPodSandboxFromRequest(req) sb, err := s.getPodSandboxFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -14,6 +14,7 @@ import (
// sandbox, they should be force terminated. // sandbox, they should be force terminated.
func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) { func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxRequest) (*pb.StopPodSandboxResponse, error) {
logrus.Debugf("StopPodSandboxRequest %+v", req) logrus.Debugf("StopPodSandboxRequest %+v", req)
s.Update()
sb, err := s.getPodSandboxFromRequest(req) sb, err := s.getPodSandboxFromRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err
@ -50,7 +51,7 @@ func (s *Server) StopPodSandbox(ctx context.Context, req *pb.StopPodSandboxReque
cStatus := s.runtime.ContainerStatus(c) cStatus := s.runtime.ContainerStatus(c)
if cStatus.Status != oci.ContainerStateStopped { if cStatus.Status != oci.ContainerStateStopped {
if err := s.runtime.StopContainer(c); err != nil { if err := s.runtime.StopContainer(c); err != nil {
return nil, fmt.Errorf("failed to stop container %s in sandbox %s: %v", c.Name(), sb.id, err) return nil, fmt.Errorf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.id, err)
} }
} }
} }

View file

@ -5,14 +5,16 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath"
"sync" "sync"
"syscall" "syscall"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/types"
sstorage "github.com/containers/storage/storage"
"github.com/docker/docker/pkg/registrar" "github.com/docker/docker/pkg/registrar"
"github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/pkg/truncindex"
"github.com/kubernetes-incubator/cri-o/oci" "github.com/kubernetes-incubator/cri-o/oci"
"github.com/kubernetes-incubator/cri-o/pkg/storage"
"github.com/kubernetes-incubator/cri-o/server/apparmor" "github.com/kubernetes-incubator/cri-o/server/apparmor"
"github.com/kubernetes-incubator/cri-o/server/seccomp" "github.com/kubernetes-incubator/cri-o/server/seccomp"
"github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/label"
@ -29,6 +31,9 @@ const (
type Server struct { type Server struct {
config Config config Config
runtime *oci.Runtime runtime *oci.Runtime
store sstorage.Store
images storage.ImageServer
storage storage.RuntimeServer
stateLock sync.Mutex stateLock sync.Mutex
state *serverState state *serverState
netPlugin ocicni.CNIPlugin netPlugin ocicni.CNIPlugin
@ -36,6 +41,7 @@ type Server struct {
podIDIndex *truncindex.TruncIndex podIDIndex *truncindex.TruncIndex
ctrNameIndex *registrar.Registrar ctrNameIndex *registrar.Registrar
ctrIDIndex *truncindex.TruncIndex ctrIDIndex *truncindex.TruncIndex
imageContext *types.SystemContext
seccompEnabled bool seccompEnabled bool
seccompProfile seccomp.Seccomp seccompProfile seccomp.Seccomp
@ -45,7 +51,7 @@ type Server struct {
} }
func (s *Server) loadContainer(id string) error { func (s *Server) loadContainer(id string) error {
config, err := ioutil.ReadFile(filepath.Join(s.runtime.ContainerDir(), id, "config.json")) config, err := s.store.GetFromContainerDirectory(id, "config.json")
if err != nil { if err != nil {
return err return err
} }
@ -62,21 +68,30 @@ func (s *Server) loadContainer(id string) error {
if err != nil { if err != nil {
return err return err
} }
defer func() {
if err != nil {
s.releaseContainerName(name)
}
}()
var metadata pb.ContainerMetadata var metadata pb.ContainerMetadata
if err = json.Unmarshal([]byte(m.Annotations["ocid/metadata"]), &metadata); err != nil { if err = json.Unmarshal([]byte(m.Annotations["ocid/metadata"]), &metadata); err != nil {
return err return err
} }
sb := s.getSandbox(m.Annotations["ocid/sandbox_id"]) sb := s.getSandbox(m.Annotations["ocid/sandbox_id"])
if sb == nil { if sb == nil {
logrus.Warnf("could not get sandbox with id %s, skipping", m.Annotations["ocid/sandbox_id"]) return fmt.Errorf("could not get sandbox with id %s, skipping", m.Annotations["ocid/sandbox_id"])
return nil
} }
var tty bool var tty bool
if v := m.Annotations["ocid/tty"]; v == "true" { if v := m.Annotations["ocid/tty"]; v == "true" {
tty = true tty = true
} }
containerPath := filepath.Join(s.runtime.ContainerDir(), id) containerPath, err := s.store.GetContainerRunDirectory(id)
if err != nil {
return err
}
var img *pb.ImageSpec var img *pb.ImageSpec
image, ok := m.Annotations["ocid/image"] image, ok := m.Annotations["ocid/image"]
@ -95,10 +110,10 @@ func (s *Server) loadContainer(id string) error {
if err != nil { if err != nil {
return err return err
} }
s.addContainer(ctr)
if err = s.runtime.UpdateStatus(ctr); err != nil { if err = s.runtime.UpdateStatus(ctr); err != nil {
logrus.Warnf("error updating status for container %s: %v", ctr.ID(), err) return fmt.Errorf("error updating status for container %s: %v", ctr.ID(), err)
} }
s.addContainer(ctr)
if err = s.ctrIDIndex.Add(id); err != nil { if err = s.ctrIDIndex.Add(id); err != nil {
return err return err
} }
@ -122,7 +137,7 @@ func configNetNsPath(spec rspec.Spec) (string, error) {
} }
func (s *Server) loadSandbox(id string) error { func (s *Server) loadSandbox(id string) error {
config, err := ioutil.ReadFile(filepath.Join(s.config.SandboxDir, id, "config.json")) config, err := s.store.GetFromContainerDirectory(id, "config.json")
if err != nil { if err != nil {
return err return err
} }
@ -139,6 +154,11 @@ func (s *Server) loadSandbox(id string) error {
if err != nil { if err != nil {
return err return err
} }
defer func() {
if err != nil {
s.releasePodName(name)
}
}()
var metadata pb.PodSandboxMetadata var metadata pb.PodSandboxMetadata
if err = json.Unmarshal([]byte(m.Annotations["ocid/metadata"]), &metadata); err != nil { if err = json.Unmarshal([]byte(m.Annotations["ocid/metadata"]), &metadata); err != nil {
return err return err
@ -184,9 +204,14 @@ func (s *Server) loadSandbox(id string) error {
s.addSandbox(sb) s.addSandbox(sb)
sandboxPath := filepath.Join(s.config.SandboxDir, id) defer func() {
if err != nil {
s.removeSandbox(sb.id)
}
}()
if err = label.ReserveLabel(processLabel); err != nil { sandboxPath, err := s.store.GetContainerRunDirectory(id)
if err != nil {
return err return err
} }
@ -194,14 +219,22 @@ func (s *Server) loadSandbox(id string) error {
if err != nil { if err != nil {
return err return err
} }
defer func() {
if err != nil {
s.releaseContainerName(cname)
}
}()
scontainer, err := oci.NewContainer(m.Annotations["ocid/container_id"], cname, sandboxPath, sandboxPath, sb.netNs(), labels, annotations, nil, nil, id, false) scontainer, err := oci.NewContainer(m.Annotations["ocid/container_id"], cname, sandboxPath, sandboxPath, sb.netNs(), labels, annotations, nil, nil, id, false)
if err != nil { if err != nil {
return err return err
} }
sb.infraContainer = scontainer
if err = s.runtime.UpdateStatus(scontainer); err != nil { if err = s.runtime.UpdateStatus(scontainer); err != nil {
logrus.Warnf("error updating status for container %s: %v", scontainer.ID(), err) return fmt.Errorf("error updating status for pod sandbox infra container %s: %v", scontainer.ID(), err)
} }
if err = label.ReserveLabel(processLabel); err != nil {
return err
}
sb.infraContainer = scontainer
if err = s.ctrIDIndex.Add(scontainer.ID()); err != nil { if err = s.ctrIDIndex.Add(scontainer.ID()); err != nil {
return err return err
} }
@ -212,31 +245,146 @@ func (s *Server) loadSandbox(id string) error {
} }
func (s *Server) restore() { func (s *Server) restore() {
sandboxDir, err := ioutil.ReadDir(s.config.SandboxDir) containers, err := s.store.Containers()
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
logrus.Warnf("could not read sandbox directory %s: %v", sandboxDir, err) logrus.Warnf("could not read containers and sandboxes: %v", err)
} }
for _, v := range sandboxDir { pods := map[string]*storage.RuntimeContainerMetadata{}
if !v.IsDir() { podContainers := map[string]*storage.RuntimeContainerMetadata{}
for _, container := range containers {
metadata, err2 := s.storage.GetContainerMetadata(container.ID)
if err2 != nil {
logrus.Warnf("error parsing metadata for %s: %v, ignoring", container.ID, err2)
continue continue
} }
if err = s.loadSandbox(v.Name()); err != nil { if metadata.Pod {
logrus.Warnf("could not restore sandbox %s: %v", v.Name(), err) pods[container.ID] = &metadata
} else {
podContainers[container.ID] = &metadata
} }
} }
containerDir, err := ioutil.ReadDir(s.runtime.ContainerDir()) for containerID, metadata := range pods {
if err != nil && !os.IsNotExist(err) { if err = s.loadSandbox(containerID); err != nil {
logrus.Warnf("could not read container directory %s: %v", s.runtime.ContainerDir(), err) logrus.Warnf("could not restore sandbox %s container %s: %v", metadata.PodID, containerID, err)
}
for _, v := range containerDir {
if !v.IsDir() {
continue
} }
if err := s.loadContainer(v.Name()); err != nil { }
logrus.Warnf("could not restore container %s: %v", v.Name(), err) for containerID := range podContainers {
if err := s.loadContainer(containerID); err != nil {
logrus.Warnf("could not restore container %s: %v", containerID, err)
}
}
}
// Update makes changes to the server's state (lists of pods and containers) to
// reflect the list of pods and containers that are stored on disk, possibly
// having been modified by other parties
func (s *Server) Update() {
logrus.Debugf("updating sandbox and container information")
if err := s.update(); err != nil {
logrus.Errorf("error updating sandbox and container information: %v", err)
}
}
func (s *Server) update() error {
containers, err := s.store.Containers()
if err != nil && !os.IsNotExist(err) {
logrus.Warnf("could not read containers and sandboxes: %v", err)
return err
}
newPods := map[string]*storage.RuntimeContainerMetadata{}
oldPods := map[string]string{}
removedPods := map[string]string{}
newPodContainers := map[string]*storage.RuntimeContainerMetadata{}
oldPodContainers := map[string]string{}
removedPodContainers := map[string]string{}
for _, container := range containers {
if s.hasSandbox(container.ID) {
// FIXME: do we need to reload/update any info about the sandbox?
oldPods[container.ID] = container.ID
oldPodContainers[container.ID] = container.ID
continue
}
if s.getContainer(container.ID) != nil {
// FIXME: do we need to reload/update any info about the container?
oldPodContainers[container.ID] = container.ID
continue
}
// not previously known, so figure out what it is
metadata, err2 := s.storage.GetContainerMetadata(container.ID)
if err2 != nil {
logrus.Errorf("error parsing metadata for %s: %v, ignoring", container.ID, err2)
continue
}
if metadata.Pod {
newPods[container.ID] = &metadata
} else {
newPodContainers[container.ID] = &metadata
} }
} }
s.ctrIDIndex.Iterate(func(id string) {
if _, ok := oldPodContainers[id]; !ok {
// this container's ID wasn't in the updated list -> removed
removedPodContainers[id] = id
}
})
for removedPodContainer := range removedPodContainers {
// forget this container
c := s.getContainer(removedPodContainer)
if c == nil {
logrus.Warnf("bad state when getting container removed %+v", removedPodContainer)
continue
}
s.releaseContainerName(c.Name())
s.removeContainer(c)
if err = s.ctrIDIndex.Delete(c.ID()); err != nil {
return err
}
logrus.Debugf("forgetting removed pod container %s", c.ID())
}
s.podIDIndex.Iterate(func(id string) {
if _, ok := oldPods[id]; !ok {
// this pod's ID wasn't in the updated list -> removed
removedPods[id] = id
}
})
for removedPod := range removedPods {
// forget this pod
sb := s.getSandbox(removedPod)
if sb == nil {
logrus.Warnf("bad state when getting pod to remove %+v", removedPod)
continue
}
podInfraContainer := sb.infraContainer
s.releaseContainerName(podInfraContainer.Name())
s.removeContainer(podInfraContainer)
if err = s.ctrIDIndex.Delete(podInfraContainer.ID()); err != nil {
return err
}
sb.infraContainer = nil
s.releasePodName(sb.name)
s.removeSandbox(sb.id)
if err = s.podIDIndex.Delete(sb.id); err != nil {
return err
}
logrus.Debugf("forgetting removed pod %s", sb.id)
}
for sandboxID := range newPods {
// load this pod
if err = s.loadSandbox(sandboxID); err != nil {
logrus.Warnf("could not load new pod sandbox %s: %v, ignoring", sandboxID, err)
} else {
logrus.Debugf("loaded new pod sandbox %s", sandboxID, err)
}
}
for containerID := range newPodContainers {
// load this container
if err = s.loadContainer(containerID); err != nil {
logrus.Warnf("could not load new sandbox container %s: %v, ignoring", containerID, err)
} else {
logrus.Debugf("loaded new pod container %s", containerID, err)
}
}
return nil
} }
func (s *Server) reservePodName(id, name string) (string, error) { func (s *Server) reservePodName(id, name string) (string, error) {
@ -294,17 +442,35 @@ func seccompEnabled() bool {
return enabled return enabled
} }
// Shutdown attempts to shut down the server's storage cleanly
func (s *Server) Shutdown() error {
_, err := s.store.Shutdown(false)
return err
}
// New creates a new Server with options provided // New creates a new Server with options provided
func New(config *Config) (*Server, error) { func New(config *Config) (*Server, error) {
if err := os.MkdirAll(config.ImageDir, 0755); err != nil { store, err := sstorage.GetStore(sstorage.StoreOptions{
RunRoot: config.RunRoot,
GraphRoot: config.Root,
GraphDriverName: config.Storage,
GraphDriverOptions: config.StorageOptions,
})
if err != nil {
return nil, err return nil, err
} }
if err := os.MkdirAll(config.SandboxDir, 0755); err != nil { imageService, err := storage.GetImageService(store, config.DefaultTransport)
if err != nil {
return nil, err return nil, err
} }
r, err := oci.New(config.Runtime, config.ContainerDir, config.Conmon, config.ConmonEnv, config.CgroupManager) storageRuntimeService := storage.GetRuntimeService(imageService, config.PauseImage)
if err != nil {
return nil, err
}
r, err := oci.New(config.Runtime, config.Conmon, config.ConmonEnv, config.CgroupManager)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -316,6 +482,9 @@ func New(config *Config) (*Server, error) {
} }
s := &Server{ s := &Server{
runtime: r, runtime: r,
store: store,
images: imageService,
storage: storageRuntimeService,
netPlugin: netPlugin, netPlugin: netPlugin,
config: *config, config: *config,
state: &serverState{ state: &serverState{
@ -346,6 +515,9 @@ func New(config *Config) (*Server, error) {
s.podNameIndex = registrar.NewRegistrar() s.podNameIndex = registrar.NewRegistrar()
s.ctrIDIndex = truncindex.NewTruncIndex([]string{}) s.ctrIDIndex = truncindex.NewTruncIndex([]string{})
s.ctrNameIndex = registrar.NewRegistrar() s.ctrNameIndex = registrar.NewRegistrar()
s.imageContext = &types.SystemContext{
SignaturePolicyPath: config.ImageConfig.SignaturePolicyPath,
}
s.restore() s.restore()

6
test/bin2img/Makefile Normal file
View file

@ -0,0 +1,6 @@
bin2img: $(wildcard *.go)
go build -o $@
.PHONY: clean
clean:
rm -f bin2img

225
test/bin2img/bin2img.go Normal file
View file

@ -0,0 +1,225 @@
package main
import (
"archive/tar"
"bytes"
"encoding/json"
"io"
"os"
"runtime"
"github.com/Sirupsen/logrus"
"github.com/containers/image/storage"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/reexec"
sstorage "github.com/containers/storage/storage"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/urfave/cli"
)
func main() {
if reexec.Init() {
return
}
app := cli.NewApp()
app.Name = "bin2img"
app.Usage = "barebones image builder"
app.Version = "0.0.1"
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "turn on debug logging",
},
cli.StringFlag{
Name: "root",
Usage: "graph root directory",
},
cli.StringFlag{
Name: "runroot",
Usage: "run root directory",
},
cli.StringFlag{
Name: "storage-driver",
Usage: "storage driver",
},
cli.StringSliceFlag{
Name: "storage-option",
Usage: "storage option",
},
cli.StringFlag{
Name: "image-name",
Usage: "set image name",
Value: "kubernetes/pause",
},
cli.StringFlag{
Name: "source-binary",
Usage: "source binary",
Value: "../../pause/pause",
},
cli.StringFlag{
Name: "image-binary",
Usage: "image binary",
Value: "/pause",
},
}
app.Action = func(c *cli.Context) error {
debug := c.GlobalBool("debug")
rootDir := c.GlobalString("root")
runrootDir := c.GlobalString("runroot")
storageDriver := c.GlobalString("storage-driver")
storageOptions := c.GlobalStringSlice("storage-option")
imageName := c.GlobalString("image-name")
sourceBinary := c.GlobalString("source-binary")
imageBinary := c.GlobalString("image-binary")
if debug {
logrus.SetLevel(logrus.DebugLevel)
} else {
logrus.SetLevel(logrus.ErrorLevel)
}
if rootDir == "" && runrootDir != "" {
logrus.Errorf("must set --root and --runroot, or neither")
os.Exit(1)
}
if rootDir != "" && runrootDir == "" {
logrus.Errorf("must set --root and --runroot, or neither")
os.Exit(1)
}
storeOptions := sstorage.DefaultStoreOptions
if rootDir != "" && runrootDir != "" {
storeOptions.GraphDriverName = storageDriver
storeOptions.GraphDriverOptions = storageOptions
storeOptions.GraphRoot = rootDir
storeOptions.RunRoot = runrootDir
}
store, err := sstorage.GetStore(storeOptions)
if err != nil {
logrus.Errorf("error opening storage: %v", err)
os.Exit(1)
}
defer store.Shutdown(false)
layerBuffer := &bytes.Buffer{}
binary, err := os.Open(sourceBinary)
if err != nil {
logrus.Errorf("error opening image binary: %v", err)
os.Exit(1)
}
binInfo, err := binary.Stat()
if err != nil {
logrus.Errorf("error statting image binary: %v", err)
os.Exit(1)
}
archive := tar.NewWriter(layerBuffer)
err = archive.WriteHeader(&tar.Header{
Name: imageBinary,
Size: binInfo.Size(),
Mode: 0555,
ModTime: binInfo.ModTime(),
Typeflag: tar.TypeReg,
Uname: "root",
Gname: "root",
})
if err != nil {
logrus.Errorf("error writing archive header: %v", err)
os.Exit(1)
}
_, err = io.Copy(archive, binary)
if err != nil {
logrus.Errorf("error archiving image binary: %v", err)
os.Exit(1)
}
archive.Close()
binary.Close()
layerInfo := types.BlobInfo{
Digest: digest.Canonical.FromBytes(layerBuffer.Bytes()),
Size: int64(layerBuffer.Len()),
}
ref, err := storage.Transport.ParseStoreReference(store, imageName)
if err != nil {
logrus.Errorf("error parsing image name: %v", err)
os.Exit(1)
}
img, err := ref.NewImageDestination(nil)
if err != nil {
logrus.Errorf("error preparing to write image: %v", err)
os.Exit(1)
}
defer img.Close()
layer, err := img.PutBlob(layerBuffer, layerInfo)
if err != nil {
logrus.Errorf("error preparing to write image: %v", err)
os.Exit(1)
}
config := &v1.Image{
Architecture: runtime.GOARCH,
OS: runtime.GOOS,
Config: v1.ImageConfig{
User: "root",
Entrypoint: []string{imageBinary},
},
RootFS: v1.RootFS{
Type: "layers",
DiffIDs: []string{
layer.Digest.String(),
},
},
}
cbytes, err := json.Marshal(config)
if err != nil {
logrus.Errorf("error encoding configuration: %v", err)
os.Exit(1)
}
configInfo := types.BlobInfo{
Digest: digest.Canonical.FromBytes(cbytes),
Size: int64(len(cbytes)),
}
configInfo, err = img.PutBlob(bytes.NewBuffer(cbytes), configInfo)
if err != nil {
logrus.Errorf("error saving configuration: %v", err)
os.Exit(1)
}
manifest := &v1.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
MediaType: v1.MediaTypeImageManifest,
},
Config: v1.Descriptor{
MediaType: v1.MediaTypeImageConfig,
Digest: configInfo.Digest.String(),
Size: int64(len(cbytes)),
},
Layers: []v1.Descriptor{{
MediaType: v1.MediaTypeImageLayer,
Digest: layer.Digest.String(),
Size: layer.Size,
}},
}
mbytes, err := json.Marshal(manifest)
if err != nil {
logrus.Errorf("error encoding manifest: %v", err)
os.Exit(1)
}
err = img.PutManifest(mbytes)
if err != nil {
logrus.Errorf("error saving manifest: %v", err)
os.Exit(1)
}
err = img.Commit()
if err != nil {
logrus.Errorf("error committing image: %v", err)
os.Exit(1)
}
return nil
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}

6
test/copyimg/Makefile Normal file
View file

@ -0,0 +1,6 @@
copyimg: $(wildcard *.go)
go build -o $@
.PHONY: clean
clean:
rm -f copyimg

198
test/copyimg/copyimg.go Normal file
View file

@ -0,0 +1,198 @@
package main
import (
"os"
"github.com/Sirupsen/logrus"
"github.com/containers/image/copy"
"github.com/containers/image/signature"
"github.com/containers/image/storage"
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/reexec"
sstorage "github.com/containers/storage/storage"
"github.com/urfave/cli"
)
func main() {
if reexec.Init() {
return
}
app := cli.NewApp()
app.Name = "copyimg"
app.Usage = "barebones image copier"
app.Version = "0.0.1"
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "turn on debug logging",
},
cli.StringFlag{
Name: "root",
Usage: "graph root directory",
},
cli.StringFlag{
Name: "runroot",
Usage: "run root directory",
},
cli.StringFlag{
Name: "storage-driver",
Usage: "storage driver",
},
cli.StringSliceFlag{
Name: "storage-option",
Usage: "storage option",
},
cli.StringFlag{
Name: "signature-policy",
Usage: "signature policy",
},
cli.StringFlag{
Name: "image-name",
Usage: "set image name",
},
cli.StringFlag{
Name: "add-name",
Usage: "name to add to image",
},
cli.StringFlag{
Name: "import-from",
Usage: "import source",
},
cli.StringFlag{
Name: "export-to",
Usage: "export target",
},
}
app.Action = func(c *cli.Context) error {
var store sstorage.Store
var ref, importRef, exportRef types.ImageReference
var err error
debug := c.GlobalBool("debug")
rootDir := c.GlobalString("root")
runrootDir := c.GlobalString("runroot")
storageDriver := c.GlobalString("storage-driver")
storageOptions := c.GlobalStringSlice("storage-option")
signaturePolicy := c.GlobalString("signature-policy")
imageName := c.GlobalString("image-name")
addName := c.GlobalString("add-name")
importFrom := c.GlobalString("import-from")
exportTo := c.GlobalString("export-to")
if debug {
logrus.SetLevel(logrus.DebugLevel)
} else {
logrus.SetLevel(logrus.ErrorLevel)
}
if imageName != "" {
if rootDir == "" && runrootDir != "" {
logrus.Errorf("must set --root and --runroot, or neither")
os.Exit(1)
}
if rootDir != "" && runrootDir == "" {
logrus.Errorf("must set --root and --runroot, or neither")
os.Exit(1)
}
storeOptions := sstorage.DefaultStoreOptions
if rootDir != "" && runrootDir != "" {
storeOptions.GraphDriverName = storageDriver
storeOptions.GraphDriverOptions = storageOptions
storeOptions.GraphRoot = rootDir
storeOptions.RunRoot = runrootDir
}
store, err = sstorage.GetStore(storeOptions)
if err != nil {
logrus.Errorf("error opening storage: %v", err)
os.Exit(1)
}
defer store.Shutdown(false)
storage.Transport.SetStore(store)
ref, err = storage.Transport.ParseStoreReference(store, imageName)
if err != nil {
logrus.Errorf("error parsing image name: %v", err)
os.Exit(1)
}
}
systemContext := types.SystemContext{
SignaturePolicyPath: signaturePolicy,
}
policy, err := signature.DefaultPolicy(&systemContext)
if err != nil {
logrus.Errorf("error loading signature policy: %v", err)
os.Exit(1)
}
policyContext, err := signature.NewPolicyContext(policy)
if err != nil {
logrus.Errorf("error loading signature policy: %v", err)
os.Exit(1)
}
defer policyContext.Destroy()
options := &copy.Options{}
if importFrom != "" {
importRef, err = transports.ParseImageName(importFrom)
if err != nil {
logrus.Errorf("error parsing image name %v: %v", importFrom, err)
os.Exit(1)
}
}
if exportTo != "" {
exportRef, err = transports.ParseImageName(exportTo)
if err != nil {
logrus.Errorf("error parsing image name %v: %v", exportTo, err)
os.Exit(1)
}
}
if imageName != "" {
if importFrom != "" {
err = copy.Image(policyContext, ref, importRef, options)
if err != nil {
logrus.Errorf("error importing %s: %v", importFrom, err)
os.Exit(1)
}
}
if addName != "" {
destImage, err := storage.Transport.GetStoreImage(store, ref)
if err != nil {
logrus.Errorf("error finding image: %v", err)
os.Exit(1)
}
names := append(destImage.Names, imageName, addName)
err = store.SetNames(destImage.ID, names)
if err != nil {
logrus.Errorf("error adding name to %s: %v", imageName, err)
os.Exit(1)
}
}
if exportTo != "" {
err = copy.Image(policyContext, exportRef, ref, options)
if err != nil {
logrus.Errorf("error exporting %s: %v", exportTo, err)
os.Exit(1)
}
}
} else {
if importFrom != "" && exportTo != "" {
err = copy.Image(policyContext, exportRef, importRef, options)
if err != nil {
logrus.Errorf("error copying %s to %s: %v", importFrom, exportTo, err)
os.Exit(1)
}
}
}
return nil
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}

View file

@ -36,6 +36,10 @@ APPARMOR_TEST_PROFILE_NAME=${APPARMOR_TEST_PROFILE_NAME:-apparmor-test-deny-writ
BOOT_CONFIG_FILE_PATH=${BOOT_CONFIG_FILE_PATH:-/boot/config-`uname -r`} BOOT_CONFIG_FILE_PATH=${BOOT_CONFIG_FILE_PATH:-/boot/config-`uname -r`}
# Path of apparmor parameters file. # Path of apparmor parameters file.
APPARMOR_PARAMETERS_FILE_PATH=${APPARMOR_PARAMETERS_FILE_PATH:-/sys/module/apparmor/parameters/enabled} APPARMOR_PARAMETERS_FILE_PATH=${APPARMOR_PARAMETERS_FILE_PATH:-/sys/module/apparmor/parameters/enabled}
# Path of the bin2img binary.
BIN2IMG_BINARY=${BIN2IMG_BINARY:-${OCID_ROOT}/cri-o/test/bin2img/bin2img}
# Path of the copyimg binary.
COPYIMG_BINARY=${COPYIMG_BINARY:-${OCID_ROOT}/cri-o/test/copyimg/copyimg}
TESTDIR=$(mktemp -d) TESTDIR=$(mktemp -d)
if [ -e /usr/sbin/selinuxenabled ] && /usr/sbin/selinuxenabled; then if [ -e /usr/sbin/selinuxenabled ] && /usr/sbin/selinuxenabled; then
@ -56,10 +60,20 @@ mkdir -p $OCID_CNI_CONFIG
PATH=$PATH:$TESTDIR PATH=$PATH:$TESTDIR
# Make sure we have a copy of the redis:latest image.
if ! [ -d "$TESTDATA"/redis-image ]; then
mkdir -p "$TESTDATA"/redis-image
if ! "$COPYIMG_BINARY" --import-from=docker://redis --export-to=dir:"$TESTDATA"/redis-image --signature-policy="$INTEGRATION_ROOT"/policy.json ; then
echo "Error pulling docker://redis"
rm -fr "$TESTDATA"/redis-image
exit 1
fi
fi
# Run ocid using the binary specified by $OCID_BINARY. # Run ocid using the binary specified by $OCID_BINARY.
# This must ONLY be run on engines created with `start_ocid`. # This must ONLY be run on engines created with `start_ocid`.
function ocid() { function ocid() {
"$OCID_BINARY" "$@" "$OCID_BINARY" --listen "$OCID_SOCKET" "$@"
} }
# Run ocic using the binary specified by $OCID_BINARY. # Run ocic using the binary specified by $OCID_BINARY.
@ -112,9 +126,19 @@ function start_ocid() {
apparmor="$APPARMOR_PROFILE" apparmor="$APPARMOR_PROFILE"
fi fi
"$OCID_BINARY" --conmon "$CONMON_BINARY" --pause "$PAUSE_BINARY" --listen "$OCID_SOCKET" --runtime "$RUNC_BINARY" --root "$TESTDIR/ocid" --sandboxdir "$TESTDIR/sandboxes" --containerdir "$TESTDIR/ocid/containers" --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$OCID_CNI_CONFIG" config >$OCID_CONFIG # Don't forget: bin2img, copyimg, and ocid have their own default drivers, so if you override any, you probably need to override them all
if ! [ "$3" = "--no-pause-image" ] ; then
"$BIN2IMG_BINARY" --root "$TESTDIR/ocid" --runroot "$TESTDIR/ocid-run" --source-binary "$PAUSE_BINARY"
fi
"$COPYIMG_BINARY" --root "$TESTDIR/ocid" --runroot "$TESTDIR/ocid-run" --image-name=redis --import-from=dir:"$TESTDATA"/redis-image --add-name=docker://docker.io/library/redis:latest
"$OCID_BINARY" --conmon "$CONMON_BINARY" --listen "$OCID_SOCKET" --runtime "$RUNC_BINARY" --root "$TESTDIR/ocid" --runroot "$TESTDIR/ocid-run" --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$OCID_CNI_CONFIG" --signature-policy "$INTEGRATION_ROOT"/policy.json config >$OCID_CONFIG
"$OCID_BINARY" --debug --config "$OCID_CONFIG" & OCID_PID=$! "$OCID_BINARY" --debug --config "$OCID_CONFIG" & OCID_PID=$!
wait_until_reachable wait_until_reachable
run ocic image status --id=redis
if [ "$status" -ne 0 ] ; then
ocic image pull docker://redis:latest
fi
} }
function cleanup_ctrs() { function cleanup_ctrs() {
@ -130,6 +154,18 @@ function cleanup_ctrs() {
fi fi
} }
function cleanup_images() {
run ocic image list --quiet
if [ "$status" -eq 0 ]; then
if [ "$output" != "" ]; then
printf '%s\n' "$output" | while IFS= read -r line
do
ocic image remove --id "$line"
done
fi
fi
}
function cleanup_pods() { function cleanup_pods() {
run ocic pod list --quiet run ocic pod list --quiet
if [ "$status" -eq 0 ]; then if [ "$status" -eq 0 ]; then
@ -147,6 +183,7 @@ function cleanup_pods() {
function stop_ocid() { function stop_ocid() {
if [ "$OCID_PID" != "" ]; then if [ "$OCID_PID" != "" ]; then
kill "$OCID_PID" >/dev/null 2>&1 kill "$OCID_PID" >/dev/null 2>&1
wait "$OCID_PID"
rm -f "$OCID_CONFIG" rm -f "$OCID_CONFIG"
fi fi
} }

93
test/image.bats Normal file
View file

@ -0,0 +1,93 @@
#!/usr/bin/env bats
load helpers
IMAGE=kubernetes/pause
function teardown() {
cleanup_test
}
@test "image pull" {
start_ocid "" "" --no-pause-image
run ocic image pull "$IMAGE"
echo "$output"
[ "$status" -eq 0 ]
cleanup_images
stop_ocid
}
@test "image list with filter" {
start_ocid "" "" --no-pause-image
run ocic image pull "$IMAGE"
echo "$output"
[ "$status" -eq 0 ]
run ocic image list --quiet "$IMAGE"
echo "$output"
[ "$status" -eq 0 ]
printf '%s\n' "$output" | while IFS= read -r id; do
run ocic image remove --id "$id"
echo "$output"
[ "$status" -eq 0 ]
done
run ocic image list --quiet
echo "$output"
[ "$status" -eq 0 ]
printf '%s\n' "$output" | while IFS= read -r id; do
echo "$id"
status=1
done
cleanup_images
stop_ocid
}
@test "image list/remove" {
start_ocid "" "" --no-pause-image
run ocic image pull "$IMAGE"
echo "$output"
[ "$status" -eq 0 ]
run ocic image list --quiet
echo "$output"
[ "$status" -eq 0 ]
printf '%s\n' "$output" | while IFS= read -r id; do
run ocic image remove --id "$id"
echo "$output"
[ "$status" -eq 0 ]
done
run ocic image list --quiet
echo "$output"
[ "$status" -eq 0 ]
printf '%s\n' "$output" | while IFS= read -r id; do
echo "$id"
status=1
done
cleanup_images
stop_ocid
}
@test "image status/remove" {
start_ocid "" "" --no-pause-image
run ocic image pull "$IMAGE"
echo "$output"
[ "$status" -eq 0 ]
run ocic image list --quiet
echo "$output"
[ "$status" -eq 0 ]
printf '%s\n' "$output" | while IFS= read -r id; do
run ocic image status --id "$id"
echo "$output"
[ "$status" -eq 0 ]
run ocic image remove --id "$id"
echo "$output"
[ "$status" -eq 0 ]
done
run ocic image list --quiet
echo "$output"
[ "$status" -eq 0 ]
printf '%s\n' "$output" | while IFS= read -r id; do
echo "$id"
status=1
done
cleanup_images
stop_ocid
}

7
test/policy.json Normal file
View file

@ -0,0 +1,7 @@
{
"default": [
{
"type": "insecureAcceptAnything"
}
]
}

View file

@ -4,13 +4,9 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"os"
"os/exec" "os/exec"
"path/filepath"
"strings" "strings"
"syscall" "syscall"
"github.com/Sirupsen/logrus"
) )
// ExecCmd executes a command with args and returns its output as a string along // ExecCmd executes a command with args and returns its output as a string along
@ -54,74 +50,7 @@ func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
return return
} }
// CreateFakeRootfs creates a fake rootfs for test. // StatusToExitCode converts wait status code to an exit code
func CreateFakeRootfs(dir string, image string) error { func StatusToExitCode(status int) int {
if len(image) <= 9 || image[:9] != "docker://" { return ((status) & 0xff00) >> 8
return fmt.Errorf("CreateFakeRootfs only support docker images currently")
}
rootfs := filepath.Join(dir, "rootfs")
if err := os.MkdirAll(rootfs, 0755); err != nil {
return err
}
// docker export $(docker create image[9:]) | tar -C rootfs -xf -
return dockerExport(image[9:], rootfs)
}
// CreateInfraRootfs creates a rootfs similar to CreateFakeRootfs, but only
// copies a single binary from the host into the rootfs. This is all done
// without Docker, and is only used currently for the pause container which is
// required for all sandboxes.
func CreateInfraRootfs(dir string, src string) error {
rootfs := filepath.Join(dir, "rootfs")
if err := os.MkdirAll(rootfs, 0755); err != nil {
return err
}
dest := filepath.Join(rootfs, filepath.Base(src))
logrus.Debugf("copying infra rootfs binary: %v -> %v", src, dest)
in, err := os.OpenFile(src, os.O_RDONLY, 0755)
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0755)
if err != nil {
return err
}
defer out.Close()
if _, err := io.Copy(out, in); err != nil {
return err
}
return out.Sync()
}
func dockerExport(image string, rootfs string) error {
out, err := ExecCmd("docker", "create", image)
if err != nil {
return err
}
container := out[:strings.Index(out, "\n")]
cmd := fmt.Sprintf("docker export %s | tar -C %s -xf -", container, rootfs)
if _, err := ExecCmd("/bin/bash", "-c", cmd); err != nil {
err1 := dockerRemove(container)
if err1 == nil {
return err
}
return fmt.Errorf("%v; %v", err, err1)
}
return dockerRemove(container)
}
func dockerRemove(container string) error {
_, err := ExecCmd("docker", "rm", container)
return err
} }

View file

@ -9,3 +9,4 @@ install:
script: script:
- export PATH="$PATH:$HOME/gopath/bin" - export PATH="$PATH:$HOME/gopath/bin"
- make test - make test

View file

@ -1,2 +1,3 @@
Compatible with TOML version Compatible with TOML version
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) [v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)

View file

@ -11,3 +11,4 @@
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO. 0. You just DO WHAT THE FUCK YOU WANT TO.

View file

@ -16,3 +16,4 @@ tags:
push: push:
git push origin master git push origin master
git push github master git push github master

View file

@ -1,9 +1,9 @@
## TOML parser and encoder for Go with reflection ## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml` reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data `encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.) representations. (There is an example of this below.)
Spec: https://github.com/mojombo/toml Spec: https://github.com/mojombo/toml
@ -87,7 +87,7 @@ type TOML struct {
### Using the `encoding.TextUnmarshaler` interface ### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into Here's an example that automatically parses duration strings into
`time.Duration` values: `time.Duration` values:
```toml ```toml
@ -120,7 +120,7 @@ for _, s := range favorites.Song {
} }
``` ```
And you'll also need a `duration` type that satisfies the And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface: `encoding.TextUnmarshaler` interface:
```go ```go
@ -217,3 +217,4 @@ Note that a case insensitive match will be tried if an exact match can't be
found. found.
A working example of the above can be found in `_examples/example.{go,toml}`. A working example of the above can be found in `_examples/example.{go,toml}`.

View file

@ -11,3 +11,4 @@
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO. 0. You just DO WHAT THE FUCK YOU WANT TO.

View file

@ -11,3 +11,4 @@
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO. 0. You just DO WHAT THE FUCK YOU WANT TO.

View file

@ -11,3 +11,4 @@
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO. 0. You just DO WHAT THE FUCK YOU WANT TO.

View file

@ -43,6 +43,9 @@ type CNIConfig struct {
Path []string Path []string
} }
// CNIConfig implements the CNI interface
var _ CNI = &CNIConfig{}
// AddNetwork executes the plugin with the ADD command // AddNetwork executes the plugin with the ADD command
func (c *CNIConfig) AddNetwork(net *NetworkConfig, rt *RuntimeConf) (*types.Result, error) { func (c *CNIConfig) AddNetwork(net *NetworkConfig, rt *RuntimeConf) (*types.Result, error) {
pluginPath, err := invoke.FindInPath(net.Network.Type, c.Path) pluginPath, err := invoke.FindInPath(net.Network.Type, c.Path)

View file

@ -55,7 +55,8 @@ func ConfFiles(dir string) ([]string, error) {
if f.IsDir() { if f.IsDir() {
continue continue
} }
if filepath.Ext(f.Name()) == ".conf" { fileExt := filepath.Ext(f.Name())
if fileExt == ".conf" || fileExt == ".json" {
confFiles = append(confFiles, filepath.Join(dir, f.Name())) confFiles = append(confFiles, filepath.Join(dir, f.Name()))
} }
} }

View file

@ -47,6 +47,9 @@ type Args struct {
Path string Path string
} }
// Args implements the CNIArgs interface
var _ CNIArgs = &Args{}
func (args *Args) AsEnv() []string { func (args *Args) AsEnv() []string {
env := os.Environ() env := os.Environ()
pluginArgsStr := args.PluginArgsStr pluginArgsStr := args.PluginArgsStr

View file

@ -62,6 +62,9 @@ type netNS struct {
closed bool closed bool
} }
// netNS implements the NetNS interface
var _ NetNS = &netNS{}
func getCurrentThreadNetNSPath() string { func getCurrentThreadNetNSPath() string {
// /proc/self/ns/net returns the namespace of the main thread, not // /proc/self/ns/net returns the namespace of the main thread, not
// of whatever thread this goroutine is running on. Make sure we // of whatever thread this goroutine is running on. Make sure we

View file

@ -36,6 +36,9 @@ type pluginInfo struct {
SupportedVersions_ []string `json:"supportedVersions,omitempty"` SupportedVersions_ []string `json:"supportedVersions,omitempty"`
} }
// pluginInfo implements the PluginInfo interface
var _ PluginInfo = &pluginInfo{}
func (p *pluginInfo) Encode(w io.Writer) error { func (p *pluginInfo) Encode(w io.Writer) error {
return json.NewEncoder(w).Encode(p) return json.NewEncoder(w).Encode(p)
} }

View file

@ -4,9 +4,10 @@ import (
"bytes" "bytes"
"compress/bzip2" "compress/bzip2"
"compress/gzip" "compress/gzip"
"errors"
"io" "io"
"github.com/pkg/errors"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
) )

View file

@ -3,7 +3,6 @@ package copy
import ( import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -17,7 +16,8 @@ import (
"github.com/containers/image/signature" "github.com/containers/image/signature"
"github.com/containers/image/transports" "github.com/containers/image/transports"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. // preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
@ -51,15 +51,15 @@ type imageCopier struct {
// and set validationFailed to true if the source stream does not match expectedDigest. // and set validationFailed to true if the source stream does not match expectedDigest.
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
if err := expectedDigest.Validate(); err != nil { if err := expectedDigest.Validate(); err != nil {
return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest) return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
} }
digestAlgorithm := expectedDigest.Algorithm() digestAlgorithm := expectedDigest.Algorithm()
if !digestAlgorithm.Available() { if !digestAlgorithm.Available() {
return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
} }
return &digestingReader{ return &digestingReader{
source: source, source: source,
digester: digestAlgorithm.New(), digester: digestAlgorithm.Digester(),
expectedDigest: expectedDigest, expectedDigest: expectedDigest,
validationFailed: false, validationFailed: false,
}, nil }, nil
@ -72,14 +72,14 @@ func (d *digestingReader) Read(p []byte) (int, error) {
// Coverage: This should not happen, the hash.Hash interface requires // Coverage: This should not happen, the hash.Hash interface requires
// d.digest.Write to never return an error, and the io.Writer interface // d.digest.Write to never return an error, and the io.Writer interface
// requires n2 == len(input) if no error is returned. // requires n2 == len(input) if no error is returned.
return 0, fmt.Errorf("Error updating digest during verification: %d vs. %d, %v", n2, n, err) return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n)
} }
} }
if err == io.EOF { if err == io.EOF {
actualDigest := d.digester.Digest() actualDigest := d.digester.Digest()
if actualDigest != d.expectedDigest { if actualDigest != d.expectedDigest {
d.validationFailed = true d.validationFailed = true
return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
} }
} }
return n, err return n, err
@ -106,14 +106,14 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
dest, err := destRef.NewImageDestination(options.DestinationCtx) dest, err := destRef.NewImageDestination(options.DestinationCtx)
if err != nil { if err != nil {
return fmt.Errorf("Error initializing destination %s: %v", transports.ImageName(destRef), err) return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
} }
defer dest.Close() defer dest.Close()
destSupportedManifestMIMETypes := dest.SupportedManifestMIMETypes() destSupportedManifestMIMETypes := dest.SupportedManifestMIMETypes()
rawSource, err := srcRef.NewImageSource(options.SourceCtx, destSupportedManifestMIMETypes) rawSource, err := srcRef.NewImageSource(options.SourceCtx, destSupportedManifestMIMETypes)
if err != nil { if err != nil {
return fmt.Errorf("Error initializing source %s: %v", transports.ImageName(srcRef), err) return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
} }
unparsedImage := image.UnparsedFromSource(rawSource) unparsedImage := image.UnparsedFromSource(rawSource)
defer func() { defer func() {
@ -124,17 +124,17 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
// Please keep this policy check BEFORE reading any other information about the image. // Please keep this policy check BEFORE reading any other information about the image.
if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
return fmt.Errorf("Source image rejected: %v", err) return errors.Wrap(err, "Source image rejected")
} }
src, err := image.FromUnparsedImage(unparsedImage) src, err := image.FromUnparsedImage(unparsedImage)
if err != nil { if err != nil {
return fmt.Errorf("Error initializing image from source %s: %v", transports.ImageName(srcRef), err) return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef))
} }
unparsedImage = nil unparsedImage = nil
defer src.Close() defer src.Close()
if src.IsMultiImage() { if src.IsMultiImage() {
return fmt.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef)) return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef))
} }
var sigs [][]byte var sigs [][]byte
@ -144,14 +144,14 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
writeReport("Getting image source signatures\n") writeReport("Getting image source signatures\n")
s, err := src.Signatures() s, err := src.Signatures()
if err != nil { if err != nil {
return fmt.Errorf("Error reading signatures: %v", err) return errors.Wrap(err, "Error reading signatures")
} }
sigs = s sigs = s
} }
if len(sigs) != 0 { if len(sigs) != 0 {
writeReport("Checking if image destination supports signatures\n") writeReport("Checking if image destination supports signatures\n")
if err := dest.SupportsSignatures(); err != nil { if err := dest.SupportsSignatures(); err != nil {
return fmt.Errorf("Can not copy signatures: %v", err) return errors.Wrap(err, "Can not copy signatures")
} }
} }
@ -182,17 +182,17 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
pendingImage := src pendingImage := src
if !reflect.DeepEqual(manifestUpdates, types.ManifestUpdateOptions{InformationOnly: manifestUpdates.InformationOnly}) { if !reflect.DeepEqual(manifestUpdates, types.ManifestUpdateOptions{InformationOnly: manifestUpdates.InformationOnly}) {
if !canModifyManifest { if !canModifyManifest {
return fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") return errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
} }
manifestUpdates.InformationOnly.Destination = dest manifestUpdates.InformationOnly.Destination = dest
pendingImage, err = src.UpdatedImage(manifestUpdates) pendingImage, err = src.UpdatedImage(manifestUpdates)
if err != nil { if err != nil {
return fmt.Errorf("Error creating an updated image manifest: %v", err) return errors.Wrap(err, "Error creating an updated image manifest")
} }
} }
manifest, _, err := pendingImage.Manifest() manifest, _, err := pendingImage.Manifest()
if err != nil { if err != nil {
return fmt.Errorf("Error reading manifest: %v", err) return errors.Wrap(err, "Error reading manifest")
} }
if err := ic.copyConfig(pendingImage); err != nil { if err := ic.copyConfig(pendingImage); err != nil {
@ -202,33 +202,33 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
if options != nil && options.SignBy != "" { if options != nil && options.SignBy != "" {
mech, err := signature.NewGPGSigningMechanism() mech, err := signature.NewGPGSigningMechanism()
if err != nil { if err != nil {
return fmt.Errorf("Error initializing GPG: %v", err) return errors.Wrap(err, "Error initializing GPG")
} }
dockerReference := dest.Reference().DockerReference() dockerReference := dest.Reference().DockerReference()
if dockerReference == nil { if dockerReference == nil {
return fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference())) return errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
} }
writeReport("Signing manifest\n") writeReport("Signing manifest\n")
newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, options.SignBy) newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, options.SignBy)
if err != nil { if err != nil {
return fmt.Errorf("Error creating signature: %v", err) return errors.Wrap(err, "Error creating signature")
} }
sigs = append(sigs, newSig) sigs = append(sigs, newSig)
} }
writeReport("Writing manifest to image destination\n") writeReport("Writing manifest to image destination\n")
if err := dest.PutManifest(manifest); err != nil { if err := dest.PutManifest(manifest); err != nil {
return fmt.Errorf("Error writing manifest: %v", err) return errors.Wrap(err, "Error writing manifest")
} }
writeReport("Storing signatures\n") writeReport("Storing signatures\n")
if err := dest.PutSignatures(sigs); err != nil { if err := dest.PutSignatures(sigs); err != nil {
return fmt.Errorf("Error writing signatures: %v", err) return errors.Wrap(err, "Error writing signatures")
} }
if err := dest.Commit(); err != nil { if err := dest.Commit(); err != nil {
return fmt.Errorf("Error committing the finished image: %v", err) return errors.Wrap(err, "Error committing the finished image")
} }
return nil return nil
@ -293,14 +293,14 @@ func (ic *imageCopier) copyConfig(src types.Image) error {
fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest) fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest)
configBlob, err := src.ConfigBlob() configBlob, err := src.ConfigBlob()
if err != nil { if err != nil {
return fmt.Errorf("Error reading config blob %s: %v", srcInfo.Digest, err) return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
} }
destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false)
if err != nil { if err != nil {
return err return err
} }
if destInfo.Digest != srcInfo.Digest { if destInfo.Digest != srcInfo.Digest {
return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
} }
} }
return nil return nil
@ -319,7 +319,7 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest
// Check if we already have a blob with this digest // Check if we already have a blob with this digest
haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo) haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo)
if err != nil && err != types.ErrBlobNotFound { if err != nil && err != types.ErrBlobNotFound {
return types.BlobInfo{}, "", fmt.Errorf("Error checking for blob %s at destination: %v", srcInfo.Digest, err) return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
} }
// If we already have a cached diffID for this blob, we don't need to compute it // If we already have a cached diffID for this blob, we don't need to compute it
diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "") diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "")
@ -327,13 +327,13 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest
if haveBlob && !diffIDIsNeeded { if haveBlob && !diffIDIsNeeded {
// Check the blob sizes match, if we were given a size this time // Check the blob sizes match, if we were given a size this time
if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize { if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
return types.BlobInfo{}, "", fmt.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size) return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
} }
srcInfo.Size = extantBlobSize srcInfo.Size = extantBlobSize
// Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
blobinfo, err := ic.dest.ReapplyBlob(srcInfo) blobinfo, err := ic.dest.ReapplyBlob(srcInfo)
if err != nil { if err != nil {
return types.BlobInfo{}, "", fmt.Errorf("Error reapplying blob %s at destination: %v", srcInfo.Digest, err) return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
} }
fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest) fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest)
return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err
@ -343,7 +343,7 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest
fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest) fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest)
srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo) srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo)
if err != nil { if err != nil {
return types.BlobInfo{}, "", fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err) return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
} }
defer srcStream.Close() defer srcStream.Close()
@ -356,7 +356,7 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest
if diffIDIsNeeded { if diffIDIsNeeded {
diffIDResult = <-diffIDChan diffIDResult = <-diffIDChan
if diffIDResult.err != nil { if diffIDResult.err != nil {
return types.BlobInfo{}, "", fmt.Errorf("Error computing layer DiffID: %v", diffIDResult.err) return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
} }
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
@ -441,7 +441,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
// read stream to the end, and validation does not happen. // read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
if err != nil { if err != nil {
return types.BlobInfo{}, fmt.Errorf("Error preparing to verify blob %s: %v", srcInfo.Digest, err) return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
} }
var destStream io.Reader = digestingReader var destStream io.Reader = digestingReader
@ -449,7 +449,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by detectCompression. // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by detectCompression.
decompressor, destStream, err := detectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform decompressor, destStream, err := detectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform
if err != nil { if err != nil {
return types.BlobInfo{}, fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err) return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
} }
isCompressed := decompressor != nil isCompressed := decompressor != nil
@ -492,7 +492,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
// === Finally, send the layer stream to dest. // === Finally, send the layer stream to dest.
uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo) uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo)
if err != nil { if err != nil {
return types.BlobInfo{}, fmt.Errorf("Error writing blob: %v", err) return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
} }
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer
@ -503,15 +503,15 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
_, err := io.Copy(ioutil.Discard, originalLayerReader) _, err := io.Copy(ioutil.Discard, originalLayerReader)
if err != nil { if err != nil {
return types.BlobInfo{}, fmt.Errorf("Error reading input blob %s: %v", srcInfo.Digest, err) return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest)
} }
} }
if digestingReader.validationFailed { // Coverage: This should never happen. if digestingReader.validationFailed { // Coverage: This should never happen.
return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
} }
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
} }
return uploadedInfo, nil return uploadedInfo, nil
} }
@ -542,7 +542,7 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s
_, srcType, err := src.Manifest() _, srcType, err := src.Manifest()
if err != nil { // This should have been cached?! if err != nil { // This should have been cached?!
return fmt.Errorf("Error reading manifest: %v", err) return errors.Wrap(err, "Error reading manifest")
} }
if _, ok := supportedByDest[srcType]; ok { if _, ok := supportedByDest[srcType]; ok {
logrus.Debugf("Manifest MIME type %s is declared supported by the destination", srcType) logrus.Debugf("Manifest MIME type %s is declared supported by the destination", srcType)

View file

@ -1,13 +1,13 @@
package directory package directory
import ( import (
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
type dirImageDestination struct { type dirImageDestination struct {
@ -69,7 +69,7 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
} }
}() }()
digester := digest.Canonical.New() digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash()) tee := io.TeeReader(stream, digester.Hash())
size, err := io.Copy(blobFile, tee) size, err := io.Copy(blobFile, tee)
@ -78,7 +78,7 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
} }
computedDigest := digester.Digest() computedDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size { if inputInfo.Size != -1 && size != inputInfo.Size {
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
} }
if err := blobFile.Sync(); err != nil { if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
@ -96,7 +96,7 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
if info.Digest == "" { if info.Digest == "" {
return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
} }
blobPath := d.ref.layerPath(info.Digest) blobPath := d.ref.layerPath(info.Digest)
finfo, err := os.Stat(blobPath) finfo, err := os.Stat(blobPath)

View file

@ -1,14 +1,14 @@
package directory package directory
import ( import (
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
type dirImageSource struct { type dirImageSource struct {
@ -42,7 +42,7 @@ func (s *dirImageSource) GetManifest() ([]byte, string, error) {
} }
func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
return nil, "", fmt.Errorf(`Getting target manifest not supported by "dir:"`) return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`)
} }
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).

View file

@ -1,16 +1,17 @@
package directory package directory
import ( import (
"errors"
"fmt" "fmt"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/pkg/errors"
"github.com/containers/image/directory/explicitfilepath" "github.com/containers/image/directory/explicitfilepath"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/image" "github.com/containers/image/image"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
) )
// Transport is an ImageTransport for directory paths. // Transport is an ImageTransport for directory paths.
@ -33,7 +34,7 @@ func (t dirTransport) ParseReference(reference string) (types.ImageReference, er
// scope passed to this function will not be "", that value is always allowed. // scope passed to this function will not be "", that value is always allowed.
func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
if !strings.HasPrefix(scope, "/") { if !strings.HasPrefix(scope, "/") {
return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope) return errors.Errorf("Invalid scope %s: Must be an absolute path", scope)
} }
// Refuse also "/", otherwise "/" and "" would have the same semantics, // Refuse also "/", otherwise "/" and "" would have the same semantics,
// and "" could be unexpectedly shadowed by the "/" entry. // and "" could be unexpectedly shadowed by the "/" entry.
@ -42,7 +43,7 @@ func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
} }
cleaned := filepath.Clean(scope) cleaned := filepath.Clean(scope)
if cleaned != scope { if cleaned != scope {
return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
} }
return nil return nil
} }
@ -153,7 +154,7 @@ func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.Ima
// DeleteImage deletes the named image from the registry, if supported. // DeleteImage deletes the named image from the registry, if supported.
func (ref dirReference) DeleteImage(ctx *types.SystemContext) error { func (ref dirReference) DeleteImage(ctx *types.SystemContext) error {
return fmt.Errorf("Deleting images not implemented for dir: images") return errors.Errorf("Deleting images not implemented for dir: images")
} }
// manifestPath returns a path for the manifest within a directory using our conventions. // manifestPath returns a path for the manifest within a directory using our conventions.

View file

@ -1,9 +1,10 @@
package explicitfilepath package explicitfilepath
import ( import (
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"github.com/pkg/errors"
) )
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. // ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
@ -25,14 +26,14 @@ func ResolvePathToFullyExplicit(path string) (string, error) {
// This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed.
// We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components
// in the resulting path, and especially not at the end. // in the resulting path, and especially not at the end.
return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path) return "", errors.Errorf("Unexpectedly missing special filename component in %s", path)
} }
resolvedPath := filepath.Join(resolvedParent, file) resolvedPath := filepath.Join(resolvedParent, file)
// As a sanity check, ensure that there are no "." or ".." components. // As a sanity check, ensure that there are no "." or ".." components.
cleanedResolvedPath := filepath.Clean(resolvedPath) cleanedResolvedPath := filepath.Clean(resolvedPath)
if cleanedResolvedPath != resolvedPath { if cleanedResolvedPath != resolvedPath {
// Coverage: This should never happen. // Coverage: This should never happen.
return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
} }
return resolvedPath, nil return resolvedPath, nil
default: // err != nil, unrecognized default: // err != nil, unrecognized

View file

@ -4,7 +4,6 @@ import (
"archive/tar" "archive/tar"
"bytes" "bytes"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -15,8 +14,9 @@ import (
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
"github.com/docker/engine-api/client" "github.com/docker/engine-api/client"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -36,16 +36,16 @@ type daemonImageDestination struct {
// newImageDestination returns a types.ImageDestination for the specified image reference. // newImageDestination returns a types.ImageDestination for the specified image reference.
func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
if ref.ref == nil { if ref.ref == nil {
return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
} }
namedTaggedRef, ok := ref.ref.(reference.NamedTagged) namedTaggedRef, ok := ref.ref.(reference.NamedTagged)
if !ok { if !ok {
return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
} }
c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
if err != nil { if err != nil {
return nil, fmt.Errorf("Error initializing docker engine client: %v", err) return nil, errors.Wrap(err, "Error initializing docker engine client")
} }
reader, writer := io.Pipe() reader, writer := io.Pipe()
@ -84,7 +84,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
resp, err := c.ImageLoad(ctx, reader, true) resp, err := c.ImageLoad(ctx, reader, true)
if err != nil { if err != nil {
err = fmt.Errorf("Error saving image to docker engine: %v", err) err = errors.Wrap(err, "Error saving image to docker engine")
return return
} }
defer resp.Body.Close() defer resp.Body.Close()
@ -123,7 +123,7 @@ func (d *daemonImageDestination) SupportedManifestMIMETypes() []string {
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *daemonImageDestination) SupportsSignatures() error { func (d *daemonImageDestination) SupportsSignatures() error {
return fmt.Errorf("Storing signatures for docker-daemon: destinations is not supported") return errors.Errorf("Storing signatures for docker-daemon: destinations is not supported")
} }
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
@ -170,7 +170,7 @@ func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
logrus.Debugf("… streaming done") logrus.Debugf("… streaming done")
} }
digester := digest.Canonical.New() digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash()) tee := io.TeeReader(stream, digester.Hash())
if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil { if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
@ -181,7 +181,7 @@ func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
func (d *daemonImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { func (d *daemonImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
if info.Digest == "" { if info.Digest == "" {
return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
} }
if blob, ok := d.blobs[info.Digest]; ok { if blob, ok := d.blobs[info.Digest]; ok {
return true, blob.Size, nil return true, blob.Size, nil
@ -196,10 +196,10 @@ func (d *daemonImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInf
func (d *daemonImageDestination) PutManifest(m []byte) error { func (d *daemonImageDestination) PutManifest(m []byte) error {
var man schema2Manifest var man schema2Manifest
if err := json.Unmarshal(m, &man); err != nil { if err := json.Unmarshal(m, &man); err != nil {
return fmt.Errorf("Error parsing manifest: %v", err) return errors.Wrap(err, "Error parsing manifest")
} }
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
return fmt.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
} }
layerPaths := []string{} layerPaths := []string{}
@ -280,14 +280,14 @@ func (d *daemonImageDestination) sendFile(path string, expectedSize int64, strea
return err return err
} }
if size != expectedSize { if size != expectedSize {
return fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
} }
return nil return nil
} }
func (d *daemonImageDestination) PutSignatures(signatures [][]byte) error { func (d *daemonImageDestination) PutSignatures(signatures [][]byte) error {
if len(signatures) != 0 { if len(signatures) != 0 {
return fmt.Errorf("Storing signatures for docker-daemon: destinations is not supported") return errors.Errorf("Storing signatures for docker-daemon: destinations is not supported")
} }
return nil return nil
} }

View file

@ -4,7 +4,6 @@ import (
"archive/tar" "archive/tar"
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -12,8 +11,9 @@ import (
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
"github.com/docker/engine-api/client" "github.com/docker/engine-api/client"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -49,13 +49,13 @@ type layerInfo struct {
func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) { func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
if err != nil { if err != nil {
return nil, fmt.Errorf("Error initializing docker engine client: %v", err) return nil, errors.Wrap(err, "Error initializing docker engine client")
} }
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
// Either way ImageSave should create a tarball with exactly one image. // Either way ImageSave should create a tarball with exactly one image.
inputStream, err := c.ImageSave(context.TODO(), []string{ref.StringWithinTransport()}) inputStream, err := c.ImageSave(context.TODO(), []string{ref.StringWithinTransport()})
if err != nil { if err != nil {
return nil, fmt.Errorf("Error loading image from docker engine: %v", err) return nil, errors.Wrap(err, "Error loading image from docker engine")
} }
defer inputStream.Close() defer inputStream.Close()
@ -145,7 +145,7 @@ func (s *daemonImageSource) openTarComponent(componentPath string) (io.ReadClose
} }
if !header.FileInfo().Mode().IsRegular() { if !header.FileInfo().Mode().IsRegular() {
return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name) return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
} }
succeeded = true succeeded = true
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
@ -174,7 +174,7 @@ func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Heade
func (s *daemonImageSource) readTarComponent(path string) ([]byte, error) { func (s *daemonImageSource) readTarComponent(path string) ([]byte, error) {
file, err := s.openTarComponent(path) file, err := s.openTarComponent(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error loading tar component %s: %v", path, err) return nil, errors.Wrapf(err, "Error loading tar component %s", path)
} }
defer file.Close() defer file.Close()
bytes, err := ioutil.ReadAll(file) bytes, err := ioutil.ReadAll(file)
@ -203,7 +203,7 @@ func (s *daemonImageSource) ensureCachedDataIsPresent() error {
} }
var parsedConfig dockerImage // Most fields ommitted, we only care about layer DiffIDs. var parsedConfig dockerImage // Most fields ommitted, we only care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
return fmt.Errorf("Error decoding tar config %s: %v", tarManifest.Config, err) return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
} }
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig) knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
@ -229,10 +229,10 @@ func (s *daemonImageSource) loadTarManifest() (*manifestItem, error) {
} }
var items []manifestItem var items []manifestItem
if err := json.Unmarshal(bytes, &items); err != nil { if err := json.Unmarshal(bytes, &items); err != nil {
return nil, fmt.Errorf("Error decoding tar manifest.json: %v", err) return nil, errors.Wrap(err, "Error decoding tar manifest.json")
} }
if len(items) != 1 { if len(items) != 1 {
return nil, fmt.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items)) return nil, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items))
} }
return &items[0], nil return &items[0], nil
} }
@ -240,7 +240,7 @@ func (s *daemonImageSource) loadTarManifest() (*manifestItem, error) {
func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedConfig *dockerImage) (map[diffID]*layerInfo, error) { func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedConfig *dockerImage) (map[diffID]*layerInfo, error) {
// Collect layer data available in manifest and config. // Collect layer data available in manifest and config.
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
} }
knownLayers := map[diffID]*layerInfo{} knownLayers := map[diffID]*layerInfo{}
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
@ -253,7 +253,7 @@ func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedCo
} }
layerPath := tarManifest.Layers[i] layerPath := tarManifest.Layers[i]
if _, ok := unknownLayerSizes[layerPath]; ok { if _, ok := unknownLayerSizes[layerPath]; ok {
return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
} }
li := &layerInfo{ // A new element in each iteration li := &layerInfo{ // A new element in each iteration
path: layerPath, path: layerPath,
@ -284,7 +284,7 @@ func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedCo
} }
} }
if len(unknownLayerSizes) != 0 { if len(unknownLayerSizes) != 0 {
return nil, fmt.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
} }
return knownLayers, nil return knownLayers, nil
@ -310,7 +310,7 @@ func (s *daemonImageSource) GetManifest() ([]byte, string, error) {
for _, diffID := range s.orderedDiffIDList { for _, diffID := range s.orderedDiffIDList {
li, ok := s.knownLayers[diffID] li, ok := s.knownLayers[diffID]
if !ok { if !ok {
return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID) return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
} }
m.Layers = append(m.Layers, distributionDescriptor{ m.Layers = append(m.Layers, distributionDescriptor{
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
@ -331,7 +331,7 @@ func (s *daemonImageSource) GetManifest() ([]byte, string, error) {
// out of a manifest list. // out of a manifest list.
func (s *daemonImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { func (s *daemonImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
// How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType. // How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
return nil, "", fmt.Errorf(`Manifest lists are not supported by "docker-daemon:"`) return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
} }
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
@ -352,7 +352,7 @@ func (s *daemonImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
return stream, li.size, nil return stream, li.size, nil
} }
return nil, 0, fmt.Errorf("Unknown blob %s", info.Digest) return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
} }
// GetSignatures returns the image's signatures. It may use a remote (= slow) service. // GetSignatures returns the image's signatures. It may use a remote (= slow) service.

View file

@ -1,13 +1,12 @@
package daemon package daemon
import ( import (
"errors" "github.com/pkg/errors"
"fmt"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/image" "github.com/containers/image/image"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
) )
// Transport is an ImageTransport for images managed by a local Docker daemon. // Transport is an ImageTransport for images managed by a local Docker daemon.
@ -52,11 +51,11 @@ func ParseReference(refString string) (types.ImageReference, error) {
// digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag).
// reference.ParseIDOrReference interprets such strings as digests. // reference.ParseIDOrReference interprets such strings as digests.
if dgst, err := digest.ParseDigest(refString); err == nil { if dgst, err := digest.Parse(refString); err == nil {
// The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name.
// Other digest references are ambiguous, so refuse them. // Other digest references are ambiguous, so refuse them.
if dgst.Algorithm() != digest.Canonical { if dgst.Algorithm() != digest.Canonical {
return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
} }
return NewReference(dgst, nil) return NewReference(dgst, nil)
} }
@ -66,7 +65,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
return nil, err return nil, err
} }
if ref.Name() == digest.Canonical.String() { if ref.Name() == digest.Canonical.String() {
return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
} }
return NewReference("", ref) return NewReference("", ref)
} }
@ -78,14 +77,14 @@ func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference,
} }
if ref != nil { if ref != nil {
if reference.IsNameOnly(ref) { if reference.IsNameOnly(ref) {
return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", ref.String()) return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", ref.String())
} }
// A github.com/distribution/reference value can have a tag and a digest at the same time! // A github.com/distribution/reference value can have a tag and a digest at the same time!
// docker/reference does not handle that, so fail. // docker/reference does not handle that, so fail.
_, isTagged := ref.(reference.NamedTagged) _, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical) _, isDigested := ref.(reference.Canonical)
if isTagged && isDigested { if isTagged && isDigested {
return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported") return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
} }
} }
return daemonReference{ return daemonReference{
@ -175,5 +174,5 @@ func (ref daemonReference) DeleteImage(ctx *types.SystemContext) error {
// Should this just untag the image? Should this stop running containers? // Should this just untag the image? Should this stop running containers?
// The semantics is not quite as clear as for remote repositories. // The semantics is not quite as clear as for remote repositories.
// The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant.
return fmt.Errorf("Deleting images not implemented for docker-daemon: images") return errors.Errorf("Deleting images not implemented for docker-daemon: images")
} }

View file

@ -1,6 +1,6 @@
package daemon package daemon
import "github.com/docker/distribution/digest" import "github.com/opencontainers/go-digest"
// Various data structures. // Various data structures.

View file

@ -4,7 +4,6 @@ import (
"crypto/tls" "crypto/tls"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -20,6 +19,7 @@ import (
"github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/homedir"
"github.com/docker/go-connections/sockets" "github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig" "github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
) )
const ( const (
@ -101,7 +101,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
if strings.HasSuffix(f.Name(), ".crt") { if strings.HasSuffix(f.Name(), ".crt") {
systemPool, err := tlsconfig.SystemCertPool() systemPool, err := tlsconfig.SystemCertPool()
if err != nil { if err != nil {
return fmt.Errorf("unable to get system cert pool: %v", err) return errors.Wrap(err, "unable to get system cert pool")
} }
tlsc.RootCAs = systemPool tlsc.RootCAs = systemPool
logrus.Debugf("crt: %s", fullPath) logrus.Debugf("crt: %s", fullPath)
@ -116,7 +116,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
keyName := certName[:len(certName)-5] + ".key" keyName := certName[:len(certName)-5] + ".key"
logrus.Debugf("cert: %s", fullPath) logrus.Debugf("cert: %s", fullPath)
if !hasFile(fs, keyName) { if !hasFile(fs, keyName) {
return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
} }
cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
if err != nil { if err != nil {
@ -129,7 +129,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
certName := keyName[:len(keyName)-4] + ".cert" certName := keyName[:len(keyName)-4] + ".cert"
logrus.Debugf("key: %s", fullPath) logrus.Debugf("key: %s", fullPath)
if !hasFile(fs, certName) { if !hasFile(fs, certName) {
return fmt.Errorf("missing client certificate %s for key %s", certName, keyName) return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
} }
} }
} }
@ -240,7 +240,7 @@ func (c *dockerClient) makeRequestToResolvedURL(method, url string, headers map[
func (c *dockerClient) setupRequestAuth(req *http.Request) error { func (c *dockerClient) setupRequestAuth(req *http.Request) error {
tokens := strings.SplitN(strings.TrimSpace(c.wwwAuthenticate), " ", 2) tokens := strings.SplitN(strings.TrimSpace(c.wwwAuthenticate), " ", 2)
if len(tokens) != 2 { if len(tokens) != 2 {
return fmt.Errorf("expected 2 tokens in WWW-Authenticate: %d, %s", len(tokens), c.wwwAuthenticate) return errors.Errorf("expected 2 tokens in WWW-Authenticate: %d, %s", len(tokens), c.wwwAuthenticate)
} }
switch tokens[0] { switch tokens[0] {
case "Basic": case "Basic":
@ -264,18 +264,48 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
return err return err
} }
chs := parseAuthHeader(res.Header) chs := parseAuthHeader(res.Header)
// We could end up in this "if" statement if the /v2/ call (during ping)
// returned 401 with a valid WWW-Authenticate=Bearer header.
// That doesn't **always** mean, however, that the specific API request
// (different from /v2/) actually needs to be authorized.
// One example of this _weird_ scenario happens with GCR.io docker
// registries.
if res.StatusCode != http.StatusUnauthorized || chs == nil || len(chs) == 0 { if res.StatusCode != http.StatusUnauthorized || chs == nil || len(chs) == 0 {
// no need for bearer? wtf? // With gcr.io, the /v2/ call returns a 401 with a valid WWW-Authenticate=Bearer
return nil // header but the repository could be _public_ (no authorization is needed).
// Hence, the registry response contains no challenges and the status
// code is not 401.
// We just skip this case as it's not standard on docker/distribution
// registries (https://github.com/docker/distribution/blob/master/docs/spec/api.md#api-version-check)
if res.StatusCode != http.StatusUnauthorized {
return nil
}
// gcr.io private repositories pull instead requires us to send user:pass pair in
// order to retrieve a token and setup the correct Bearer token.
// try again one last time with Basic Auth
testReq2 := *req
// Do not use the body stream, or we couldn't reuse it for the "real" call later.
testReq2.Body = nil
testReq2.ContentLength = 0
testReq2.SetBasicAuth(c.username, c.password)
res, err := c.client.Do(&testReq2)
if err != nil {
return err
}
chs = parseAuthHeader(res.Header)
if res.StatusCode != http.StatusUnauthorized || chs == nil || len(chs) == 0 {
// no need for bearer? wtf?
return nil
}
} }
// Arbitrarily use the first challenge, there is no reason to expect more than one. // Arbitrarily use the first challenge, there is no reason to expect more than one.
challenge := chs[0] challenge := chs[0]
if challenge.Scheme != "bearer" { // Another artifact of trying to handle WWW-Authenticate before it actually happens. if challenge.Scheme != "bearer" { // Another artifact of trying to handle WWW-Authenticate before it actually happens.
return fmt.Errorf("Unimplemented: WWW-Authenticate Bearer replaced by %#v", challenge.Scheme) return errors.Errorf("Unimplemented: WWW-Authenticate Bearer replaced by %#v", challenge.Scheme)
} }
realm, ok := challenge.Parameters["realm"] realm, ok := challenge.Parameters["realm"]
if !ok { if !ok {
return fmt.Errorf("missing realm in bearer auth challenge") return errors.Errorf("missing realm in bearer auth challenge")
} }
service, _ := challenge.Parameters["service"] // Will be "" if not present service, _ := challenge.Parameters["service"] // Will be "" if not present
scope, _ := challenge.Parameters["scope"] // Will be "" if not present scope, _ := challenge.Parameters["scope"] // Will be "" if not present
@ -286,7 +316,7 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
return nil return nil
} }
return fmt.Errorf("no handler for %s authentication", tokens[0]) return errors.Errorf("no handler for %s authentication", tokens[0])
// support docker bearer with authconfig's Auth string? see docker2aci // support docker bearer with authconfig's Auth string? see docker2aci
} }
@ -317,11 +347,11 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (string, err
defer res.Body.Close() defer res.Body.Close()
switch res.StatusCode { switch res.StatusCode {
case http.StatusUnauthorized: case http.StatusUnauthorized:
return "", fmt.Errorf("unable to retrieve auth token: 401 unauthorized") return "", errors.Errorf("unable to retrieve auth token: 401 unauthorized")
case http.StatusOK: case http.StatusOK:
break break
default: default:
return "", fmt.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL) return "", errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
} }
tokenBlob, err := ioutil.ReadAll(res.Body) tokenBlob, err := ioutil.ReadAll(res.Body)
if err != nil { if err != nil {
@ -365,7 +395,7 @@ func getAuth(ctx *types.SystemContext, registry string) (string, string, error)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return "", "", nil return "", "", nil
} }
return "", "", fmt.Errorf("%s - %v", oldDockerCfgPath, err) return "", "", errors.Wrap(err, oldDockerCfgPath)
} }
j, err := ioutil.ReadFile(oldDockerCfgPath) j, err := ioutil.ReadFile(oldDockerCfgPath)
@ -377,7 +407,7 @@ func getAuth(ctx *types.SystemContext, registry string) (string, string, error)
} }
} else if err != nil { } else if err != nil {
return "", "", fmt.Errorf("%s - %v", dockerCfgPath, err) return "", "", errors.Wrap(err, dockerCfgPath)
} }
// I'm feeling lucky // I'm feeling lucky
@ -414,7 +444,7 @@ func (c *dockerClient) ping() (*pingResponse, error) {
defer resp.Body.Close() defer resp.Body.Close()
logrus.Debugf("Ping %s status %d", scheme+"://"+c.registry+"/v2/", resp.StatusCode) logrus.Debugf("Ping %s status %d", scheme+"://"+c.registry+"/v2/", resp.StatusCode)
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
return nil, fmt.Errorf("error pinging repository, response code %d", resp.StatusCode) return nil, errors.Errorf("error pinging repository, response code %d", resp.StatusCode)
} }
pr := &pingResponse{} pr := &pingResponse{}
pr.WWWAuthenticate = resp.Header.Get("WWW-Authenticate") pr.WWWAuthenticate = resp.Header.Get("WWW-Authenticate")
@ -427,7 +457,7 @@ func (c *dockerClient) ping() (*pingResponse, error) {
pr, err = ping("http") pr, err = ping("http")
} }
if err != nil { if err != nil {
err = fmt.Errorf("pinging docker registry returned %+v", err) err = errors.Wrap(err, "pinging docker registry returned")
if c.ctx != nil && c.ctx.DockerDisableV1Ping { if c.ctx != nil && c.ctx.DockerDisableV1Ping {
return nil, err return nil, err
} }

View file

@ -7,6 +7,7 @@ import (
"github.com/containers/image/image" "github.com/containers/image/image"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/pkg/errors"
) )
// Image is a Docker-specific implementation of types.Image with a few extra methods // Image is a Docker-specific implementation of types.Image with a few extra methods
@ -46,7 +47,7 @@ func (i *Image) GetRepositoryTags() ([]string, error) {
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode != http.StatusOK { if res.StatusCode != http.StatusOK {
// print url also // print url also
return nil, fmt.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode) return nil, errors.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode)
} }
type tagsRes struct { type tagsRes struct {
Tags []string Tags []string

View file

@ -13,7 +13,8 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
type dockerImageDestination struct { type dockerImageDestination struct {
@ -57,7 +58,7 @@ func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *dockerImageDestination) SupportsSignatures() error { func (d *dockerImageDestination) SupportsSignatures() error {
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported") return errors.Errorf("Pushing signatures to a Docker Registry is not supported")
} }
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
@ -101,11 +102,11 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
return types.BlobInfo{Digest: inputInfo.Digest, Size: getBlobSize(res)}, nil return types.BlobInfo{Digest: inputInfo.Digest, Size: getBlobSize(res)}, nil
case http.StatusUnauthorized: case http.StatusUnauthorized:
logrus.Debugf("... not authorized") logrus.Debugf("... not authorized")
return types.BlobInfo{}, fmt.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName()) return types.BlobInfo{}, errors.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName())
case http.StatusNotFound: case http.StatusNotFound:
// noop // noop
default: default:
return types.BlobInfo{}, fmt.Errorf("failed to read from destination repository %s: %v", d.ref.ref.RemoteName(), http.StatusText(res.StatusCode)) return types.BlobInfo{}, errors.Errorf("failed to read from destination repository %s: %v", d.ref.ref.RemoteName(), http.StatusText(res.StatusCode))
} }
logrus.Debugf("... failed, status %d", res.StatusCode) logrus.Debugf("... failed, status %d", res.StatusCode)
} }
@ -120,14 +121,14 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode != http.StatusAccepted { if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res) logrus.Debugf("Error initiating layer upload, response %#v", *res)
return types.BlobInfo{}, fmt.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode) return types.BlobInfo{}, errors.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode)
} }
uploadLocation, err := res.Location() uploadLocation, err := res.Location()
if err != nil { if err != nil {
return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error()) return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
} }
digester := digest.Canonical.New() digester := digest.Canonical.Digester()
sizeCounter := &sizeCounter{} sizeCounter := &sizeCounter{}
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)) tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true) res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
@ -140,7 +141,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
uploadLocation, err = res.Location() uploadLocation, err = res.Location()
if err != nil { if err != nil {
return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error()) return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
} }
// FIXME: DELETE uploadLocation on failure // FIXME: DELETE uploadLocation on failure
@ -156,7 +157,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode != http.StatusCreated { if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res) logrus.Debugf("Error uploading layer, response %#v", *res)
return types.BlobInfo{}, fmt.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode) return types.BlobInfo{}, errors.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
} }
logrus.Debugf("Upload of layer %s complete", computedDigest) logrus.Debugf("Upload of layer %s complete", computedDigest)
@ -165,7 +166,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
if info.Digest == "" { if info.Digest == "" {
return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
} }
checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), info.Digest.String()) checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), info.Digest.String())
@ -181,7 +182,7 @@ func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, erro
return true, getBlobSize(res), nil return true, getBlobSize(res), nil
case http.StatusUnauthorized: case http.StatusUnauthorized:
logrus.Debugf("... not authorized") logrus.Debugf("... not authorized")
return false, -1, fmt.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName()) return false, -1, errors.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName())
case http.StatusNotFound: case http.StatusNotFound:
logrus.Debugf("... not present") logrus.Debugf("... not present")
return false, -1, types.ErrBlobNotFound return false, -1, types.ErrBlobNotFound
@ -225,7 +226,7 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
logrus.Debugf("Error body %s", string(body)) logrus.Debugf("Error body %s", string(body))
} }
logrus.Debugf("Error uploading manifest, status %d, %#v", res.StatusCode, res) logrus.Debugf("Error uploading manifest, status %d, %#v", res.StatusCode, res)
return fmt.Errorf("Error uploading manifest to %s, status %d", url, res.StatusCode) return errors.Errorf("Error uploading manifest to %s, status %d", url, res.StatusCode)
} }
return nil return nil
} }
@ -239,18 +240,18 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
return nil return nil
} }
if d.c.signatureBase == nil { if d.c.signatureBase == nil {
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported, and there is no applicable signature storage configured") return errors.Errorf("Pushing signatures to a Docker Registry is not supported, and there is no applicable signature storage configured")
} }
if d.manifestDigest.String() == "" { if d.manifestDigest.String() == "" {
// This shouldnt happen, ImageDestination users are required to call PutManifest before PutSignatures // This shouldnt happen, ImageDestination users are required to call PutManifest before PutSignatures
return fmt.Errorf("Unknown manifest digest, can't add signatures") return errors.Errorf("Unknown manifest digest, can't add signatures")
} }
for i, signature := range signatures { for i, signature := range signatures {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil { if url == nil {
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
} }
err := d.putOneSignature(url, signature) err := d.putOneSignature(url, signature)
if err != nil { if err != nil {
@ -265,7 +266,7 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
for i := len(signatures); ; i++ { for i := len(signatures); ; i++ {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil { if url == nil {
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
} }
missing, err := d.c.deleteOneSignature(url) missing, err := d.c.deleteOneSignature(url)
if err != nil { if err != nil {
@ -295,9 +296,9 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte)
return nil return nil
case "http", "https": case "http", "https":
return fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
default: default:
return fmt.Errorf("Unsupported scheme when writing signature to %s", url.String()) return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
} }
} }
@ -314,9 +315,9 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
return false, err return false, err
case "http", "https": case "http", "https":
return false, fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
default: default:
return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.String()) return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
} }
} }

View file

@ -13,8 +13,9 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
type dockerImageSource struct { type dockerImageSource struct {
@ -139,7 +140,7 @@ func (s *dockerImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64
resp, err = s.c.makeRequestToResolvedURL("GET", url, nil, nil, -1, false) resp, err = s.c.makeRequestToResolvedURL("GET", url, nil, nil, -1, false)
if err == nil { if err == nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
err = fmt.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode) err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
logrus.Debug(err) logrus.Debug(err)
continue continue
} }
@ -173,7 +174,7 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
} }
if res.StatusCode != http.StatusOK { if res.StatusCode != http.StatusOK {
// print url also // print url also
return nil, 0, fmt.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode) return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode)
} }
return res.Body, getBlobSize(res), nil return res.Body, getBlobSize(res), nil
} }
@ -195,7 +196,7 @@ func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
for i := 0; ; i++ { for i := 0; ; i++ {
url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
if url == nil { if url == nil {
return nil, fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
} }
signature, missing, err := s.getOneSignature(url) signature, missing, err := s.getOneSignature(url)
if err != nil { if err != nil {
@ -234,7 +235,7 @@ func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, mis
if res.StatusCode == http.StatusNotFound { if res.StatusCode == http.StatusNotFound {
return nil, true, nil return nil, true, nil
} else if res.StatusCode != http.StatusOK { } else if res.StatusCode != http.StatusOK {
return nil, false, fmt.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode) return nil, false, errors.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
} }
sig, err := ioutil.ReadAll(res.Body) sig, err := ioutil.ReadAll(res.Body)
if err != nil { if err != nil {
@ -243,7 +244,7 @@ func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, mis
return sig, false, nil return sig, false, nil
default: default:
return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", url.String()) return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String())
} }
} }
@ -276,9 +277,9 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
switch get.StatusCode { switch get.StatusCode {
case http.StatusOK: case http.StatusOK:
case http.StatusNotFound: case http.StatusNotFound:
return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
default: default:
return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
} }
digest := get.Header.Get("Docker-Content-Digest") digest := get.Header.Get("Docker-Content-Digest")
@ -297,7 +298,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
return err return err
} }
if delete.StatusCode != http.StatusAccepted { if delete.StatusCode != http.StatusAccepted {
return fmt.Errorf("Failed to delete %v: %s (%v)", deleteURL, string(body), delete.Status) return errors.Errorf("Failed to delete %v: %s (%v)", deleteURL, string(body), delete.Status)
} }
if c.signatureBase != nil { if c.signatureBase != nil {
@ -309,7 +310,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
for i := 0; ; i++ { for i := 0; ; i++ {
url := signatureStorageURL(c.signatureBase, manifestDigest, i) url := signatureStorageURL(c.signatureBase, manifestDigest, i)
if url == nil { if url == nil {
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
} }
missing, err := c.deleteOneSignature(url) missing, err := c.deleteOneSignature(url)
if err != nil { if err != nil {

View file

@ -7,6 +7,7 @@ import (
"github.com/containers/image/docker/policyconfiguration" "github.com/containers/image/docker/policyconfiguration"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/pkg/errors"
) )
// Transport is an ImageTransport for Docker registry-hosted images. // Transport is an ImageTransport for Docker registry-hosted images.
@ -42,7 +43,7 @@ type dockerReference struct {
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
func ParseReference(refString string) (types.ImageReference, error) { func ParseReference(refString string) (types.ImageReference, error) {
if !strings.HasPrefix(refString, "//") { if !strings.HasPrefix(refString, "//") {
return nil, fmt.Errorf("docker: image reference %s does not start with //", refString) return nil, errors.Errorf("docker: image reference %s does not start with //", refString)
} }
ref, err := reference.ParseNamed(strings.TrimPrefix(refString, "//")) ref, err := reference.ParseNamed(strings.TrimPrefix(refString, "//"))
if err != nil { if err != nil {
@ -55,7 +56,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). // NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
func NewReference(ref reference.Named) (types.ImageReference, error) { func NewReference(ref reference.Named) (types.ImageReference, error) {
if reference.IsNameOnly(ref) { if reference.IsNameOnly(ref) {
return nil, fmt.Errorf("Docker reference %s has neither a tag nor a digest", ref.String()) return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", ref.String())
} }
// A github.com/distribution/reference value can have a tag and a digest at the same time! // A github.com/distribution/reference value can have a tag and a digest at the same time!
// docker/reference does not handle that, so fail. // docker/reference does not handle that, so fail.
@ -64,7 +65,7 @@ func NewReference(ref reference.Named) (types.ImageReference, error) {
_, isTagged := ref.(reference.NamedTagged) _, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical) _, isDigested := ref.(reference.Canonical)
if isTagged && isDigested { if isTagged && isDigested {
return nil, fmt.Errorf("Docker references with both a tag and digest are currently not supported") return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported")
} }
return dockerReference{ return dockerReference{
ref: ref, ref: ref,
@ -151,5 +152,5 @@ func (ref dockerReference) tagOrDigest() (string, error) {
return ref.Tag(), nil return ref.Tag(), nil
} }
// This should not happen, NewReference above refuses reference.IsNameOnly values. // This should not happen, NewReference above refuses reference.IsNameOnly values.
return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", ref.ref.String()) return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", ref.ref.String())
} }

View file

@ -9,8 +9,9 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/docker/distribution/digest"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/types" "github.com/containers/image/types"
@ -60,12 +61,12 @@ func configuredSignatureStorageBase(ctx *types.SystemContext, ref dockerReferenc
url, err := url.Parse(topLevel) url, err := url.Parse(topLevel)
if err != nil { if err != nil {
return nil, fmt.Errorf("Invalid signature storage URL %s: %v", topLevel, err) return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
} }
// FIXME? Restrict to explicitly supported schemes? // FIXME? Restrict to explicitly supported schemes?
repo := ref.ref.FullName() // Note that this is without a tag or digest. repo := ref.ref.FullName() // Note that this is without a tag or digest.
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String()) return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
} }
url.Path = url.Path + "/" + repo url.Path = url.Path + "/" + repo
return url, nil return url, nil
@ -114,12 +115,12 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
var config registryConfiguration var config registryConfiguration
err = yaml.Unmarshal(configBytes, &config) err = yaml.Unmarshal(configBytes, &config)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error parsing %s: %v", configPath, err) return nil, errors.Wrapf(err, "Error parsing %s", configPath)
} }
if config.DefaultDocker != nil { if config.DefaultDocker != nil {
if mergedConfig.DefaultDocker != nil { if mergedConfig.DefaultDocker != nil {
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
dockerDefaultMergedFrom, configPath) dockerDefaultMergedFrom, configPath)
} }
mergedConfig.DefaultDocker = config.DefaultDocker mergedConfig.DefaultDocker = config.DefaultDocker
@ -128,7 +129,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
if _, ok := mergedConfig.Docker[nsName]; ok { if _, ok := mergedConfig.Docker[nsName]; ok {
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
nsName, nsMergedFrom[nsName], configPath) nsName, nsMergedFrom[nsName], configPath)
} }
mergedConfig.Docker[nsName] = nsConfig mergedConfig.Docker[nsName] = nsConfig

View file

@ -1,10 +1,10 @@
package policyconfiguration package policyconfiguration
import ( import (
"errors"
"fmt"
"strings" "strings"
"github.com/pkg/errors"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
) )
@ -17,9 +17,9 @@ func DockerReferenceIdentity(ref reference.Named) (string, error) {
digested, isDigested := ref.(reference.Canonical) digested, isDigested := ref.(reference.Canonical)
switch { switch {
case isTagged && isDigested: // This should not happen, docker/reference.ParseNamed drops the tag. case isTagged && isDigested: // This should not happen, docker/reference.ParseNamed drops the tag.
return "", fmt.Errorf("Unexpected Docker reference %s with both a name and a digest", ref.String()) return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", ref.String())
case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly()
return "", fmt.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", ref.String()) return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", ref.String())
case isTagged: case isTagged:
res = res + ":" + tagged.Tag() res = res + ":" + tagged.Tag()
case isDigested: case isDigested:

View file

@ -1,13 +1,16 @@
package reference package reference
import ( import (
"errors"
"fmt"
"regexp" "regexp"
"strings" "strings"
"github.com/docker/distribution/digest" // "opencontainers/go-digest" requires us to load the algorithms that we
// want to use into the binary (it calls .Available).
_ "crypto/sha256"
distreference "github.com/docker/distribution/reference" distreference "github.com/docker/distribution/reference"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
const ( const (
@ -55,14 +58,18 @@ type Canonical interface {
func ParseNamed(s string) (Named, error) { func ParseNamed(s string) (Named, error) {
named, err := distreference.ParseNamed(s) named, err := distreference.ParseNamed(s)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", s) return nil, errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", s)
} }
r, err := WithName(named.Name()) r, err := WithName(named.Name())
if err != nil { if err != nil {
return nil, err return nil, err
} }
if canonical, isCanonical := named.(distreference.Canonical); isCanonical { if canonical, isCanonical := named.(distreference.Canonical); isCanonical {
return WithDigest(r, canonical.Digest()) r, err := distreference.WithDigest(r, canonical.Digest())
if err != nil {
return nil, err
}
return &canonicalRef{namedRef{r}}, nil
} }
if tagged, isTagged := named.(distreference.NamedTagged); isTagged { if tagged, isTagged := named.(distreference.NamedTagged); isTagged {
return WithTag(r, tagged.Tag()) return WithTag(r, tagged.Tag())
@ -97,16 +104,6 @@ func WithTag(name Named, tag string) (NamedTagged, error) {
return &taggedRef{namedRef{r}}, nil return &taggedRef{namedRef{r}}, nil
} }
// WithDigest combines the name from "name" and the digest from "digest" to form
// a reference incorporating both the name and the digest.
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
r, err := distreference.WithDigest(name, digest)
if err != nil {
return nil, err
}
return &canonicalRef{namedRef{r}}, nil
}
type namedRef struct { type namedRef struct {
distreference.Named distreference.Named
} }
@ -133,7 +130,7 @@ func (r *taggedRef) Tag() string {
return r.namedRef.Named.(distreference.NamedTagged).Tag() return r.namedRef.Named.(distreference.NamedTagged).Tag()
} }
func (r *canonicalRef) Digest() digest.Digest { func (r *canonicalRef) Digest() digest.Digest {
return r.namedRef.Named.(distreference.Canonical).Digest() return digest.Digest(r.namedRef.Named.(distreference.Canonical).Digest())
} }
// WithDefaultTag adds a default tag to a reference if it only has a repo name. // WithDefaultTag adds a default tag to a reference if it only has a repo name.
@ -161,7 +158,7 @@ func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) {
if err := validateID(idOrRef); err == nil { if err := validateID(idOrRef); err == nil {
idOrRef = "sha256:" + idOrRef idOrRef = "sha256:" + idOrRef
} }
if dgst, err := digest.ParseDigest(idOrRef); err == nil { if dgst, err := digest.Parse(idOrRef); err == nil {
return dgst, nil, nil return dgst, nil, nil
} }
ref, err := ParseNamed(idOrRef) ref, err := ParseNamed(idOrRef)
@ -207,14 +204,14 @@ var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
func validateID(id string) error { func validateID(id string) error {
if ok := validHex.MatchString(id); !ok { if ok := validHex.MatchString(id); !ok {
return fmt.Errorf("image ID %q is invalid", id) return errors.Errorf("image ID %q is invalid", id)
} }
return nil return nil
} }
func validateName(name string) error { func validateName(name string) error {
if err := validateID(name); err == nil { if err := validateID(name); err == nil {
return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) return errors.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
} }
return nil return nil
} }

View file

@ -2,13 +2,12 @@ package image
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt"
"runtime" "runtime"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
type platformSpec struct { type platformSpec struct {
@ -54,10 +53,10 @@ func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (gen
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error computing manifest digest: %v", err) return nil, errors.Wrap(err, "Error computing manifest digest")
} }
if !matches { if !matches {
return nil, fmt.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest)
} }
return manifestInstanceFromBlob(src, manblob, mt) return manifestInstanceFromBlob(src, manblob, mt)

View file

@ -2,8 +2,6 @@ package image
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt"
"regexp" "regexp"
"strings" "strings"
"time" "time"
@ -11,7 +9,8 @@ import (
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
var ( var (
@ -54,7 +53,7 @@ func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
return nil, err return nil, err
} }
if mschema1.SchemaVersion != 1 { if mschema1.SchemaVersion != 1 {
return nil, fmt.Errorf("unsupported schema version %d", mschema1.SchemaVersion) return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion)
} }
if len(mschema1.FSLayers) != len(mschema1.History) { if len(mschema1.FSLayers) != len(mschema1.History) {
return nil, errors.New("length of history not equal to number of layers") return nil, errors.New("length of history not equal to number of layers")
@ -153,7 +152,7 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ
if options.LayerInfos != nil { if options.LayerInfos != nil {
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
if len(copy.FSLayers) != len(options.LayerInfos) { if len(copy.FSLayers) != len(options.LayerInfos) {
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos)) return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
} }
for i, info := range options.LayerInfos { for i, info := range options.LayerInfos {
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
@ -171,7 +170,7 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ
case manifest.DockerV2Schema2MediaType: case manifest.DockerV2Schema2MediaType:
return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs)
default: default:
return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType)
} }
return memoryImageFromManifest(&copy), nil return memoryImageFromManifest(&copy), nil
@ -211,7 +210,7 @@ func fixManifestLayers(manifest *manifestSchema1) error {
for _, img := range imgs { for _, img := range imgs {
// skip IDs that appear after each other, we handle those later // skip IDs that appear after each other, we handle those later
if _, exists := idmap[img.ID]; img.ID != lastID && exists { if _, exists := idmap[img.ID]; img.ID != lastID && exists {
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
} }
lastID = img.ID lastID = img.ID
idmap[lastID] = struct{}{} idmap[lastID] = struct{}{}
@ -222,7 +221,7 @@ func fixManifestLayers(manifest *manifestSchema1) error {
manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...) manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
manifest.History = append(manifest.History[:i], manifest.History[i+1:]...) manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
} else if imgs[i].Parent != imgs[i+1].ID { } else if imgs[i].Parent != imgs[i+1].ID {
return fmt.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
} }
} }
return nil return nil
@ -230,7 +229,7 @@ func fixManifestLayers(manifest *manifestSchema1) error {
func validateV1ID(id string) error { func validateV1ID(id string) error {
if ok := validHex.MatchString(id); !ok { if ok := validHex.MatchString(id); !ok {
return fmt.Errorf("image ID %q is invalid", id) return errors.Errorf("image ID %q is invalid", id)
} }
return nil return nil
} }
@ -239,16 +238,16 @@ func validateV1ID(id string) error {
func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) { func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) {
if len(m.History) == 0 { if len(m.History) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
} }
if len(m.History) != len(m.FSLayers) { if len(m.History) != len(m.FSLayers) {
return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers)) return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
} }
if len(uploadedLayerInfos) != len(m.FSLayers) { if len(uploadedLayerInfos) != len(m.FSLayers) {
return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers)) return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
} }
if len(layerDiffIDs) != len(m.FSLayers) { if len(layerDiffIDs) != len(m.FSLayers) {
return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers)) return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
} }
rootFS := rootFS{ rootFS := rootFS{
@ -263,7 +262,7 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
var v1compat v1Compatibility var v1compat v1Compatibility
if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil { if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil {
return nil, fmt.Errorf("Error decoding history entry %d: %v", v1Index, err) return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
} }
history[v2Index] = imageHistory{ history[v2Index] = imageHistory{
Created: v1compat.Created, Created: v1compat.Created,

View file

@ -5,14 +5,14 @@ import (
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"strings" "strings"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) // gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
@ -82,7 +82,7 @@ func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
func (m *manifestSchema2) ConfigBlob() ([]byte, error) { func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
if m.configBlob == nil { if m.configBlob == nil {
if m.src == nil { if m.src == nil {
return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
} }
stream, _, err := m.src.GetBlob(types.BlobInfo{ stream, _, err := m.src.GetBlob(types.BlobInfo{
Digest: m.ConfigDescriptor.Digest, Digest: m.ConfigDescriptor.Digest,
@ -99,7 +99,7 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
} }
computedDigest := digest.FromBytes(blob) computedDigest := digest.FromBytes(blob)
if computedDigest != m.ConfigDescriptor.Digest { if computedDigest != m.ConfigDescriptor.Digest {
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
} }
m.configBlob = blob m.configBlob = blob
} }
@ -152,7 +152,7 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
copy := *m // NOTE: This is not a deep copy, it still shares slices etc. copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
if options.LayerInfos != nil { if options.LayerInfos != nil {
if len(copy.LayersDescriptors) != len(options.LayerInfos) { if len(copy.LayersDescriptors) != len(options.LayerInfos) {
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
} }
copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
for i, info := range options.LayerInfos { for i, info := range options.LayerInfos {
@ -167,7 +167,7 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType: case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType:
return copy.convertToManifestSchema1(options.InformationOnly.Destination) return copy.convertToManifestSchema1(options.InformationOnly.Destination)
default: default:
return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType) return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType)
} }
return memoryImageFromManifest(&copy), nil return memoryImageFromManifest(&copy), nil
@ -193,7 +193,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
haveGzippedEmptyLayer := false haveGzippedEmptyLayer := false
if len(imageConfig.History) == 0 { if len(imageConfig.History) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
} }
for v2Index, historyEntry := range imageConfig.History { for v2Index, historyEntry := range imageConfig.History {
parentV1ID = v1ID parentV1ID = v1ID
@ -205,17 +205,17 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
logrus.Debugf("Uploading empty layer during conversion to schema 1") logrus.Debugf("Uploading empty layer during conversion to schema 1")
info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))}) info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))})
if err != nil { if err != nil {
return nil, fmt.Errorf("Error uploading empty layer: %v", err) return nil, errors.Wrap(err, "Error uploading empty layer")
} }
if info.Digest != gzippedEmptyLayerDigest { if info.Digest != gzippedEmptyLayerDigest {
return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest) return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest)
} }
haveGzippedEmptyLayer = true haveGzippedEmptyLayer = true
} }
blobDigest = gzippedEmptyLayerDigest blobDigest = gzippedEmptyLayerDigest
} else { } else {
if nonemptyLayerIndex >= len(m.LayersDescriptors) { if nonemptyLayerIndex >= len(m.LayersDescriptors) {
return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors)) return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors))
} }
blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest
nonemptyLayerIndex++ nonemptyLayerIndex++
@ -239,7 +239,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
v1CompatibilityBytes, err := json.Marshal(&fakeImage) v1CompatibilityBytes, err := json.Marshal(&fakeImage)
if err != nil { if err != nil {
return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
} }
fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest} fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest}

View file

@ -3,8 +3,8 @@ package image
import ( import (
"time" "time"
"github.com/docker/distribution/digest"
"github.com/docker/engine-api/types/strslice" "github.com/docker/engine-api/types/strslice"
"github.com/opencontainers/go-digest"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"

View file

@ -1,7 +1,7 @@
package image package image
import ( import (
"errors" "github.com/pkg/errors"
"github.com/containers/image/types" "github.com/containers/image/types"
) )
@ -37,11 +37,7 @@ func (i *memoryImage) Close() {
// Size returns the size of the image as stored, if known, or -1 if not. // Size returns the size of the image as stored, if known, or -1 if not.
func (i *memoryImage) Size() (int64, error) { func (i *memoryImage) Size() (int64, error) {
s, err := i.serialize() return -1, nil
if err != nil {
return -1, err
}
return int64(len(s)), nil
} }
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.

View file

@ -2,13 +2,13 @@ package image
import ( import (
"encoding/json" "encoding/json"
"fmt"
"io/ioutil" "io/ioutil"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
) )
type manifestOCI1 struct { type manifestOCI1 struct {
@ -59,7 +59,7 @@ func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
func (m *manifestOCI1) ConfigBlob() ([]byte, error) { func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
if m.configBlob == nil { if m.configBlob == nil {
if m.src == nil { if m.src == nil {
return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
} }
stream, _, err := m.src.GetBlob(types.BlobInfo{ stream, _, err := m.src.GetBlob(types.BlobInfo{
Digest: m.ConfigDescriptor.Digest, Digest: m.ConfigDescriptor.Digest,
@ -76,7 +76,7 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
} }
computedDigest := digest.FromBytes(blob) computedDigest := digest.FromBytes(blob)
if computedDigest != m.ConfigDescriptor.Digest { if computedDigest != m.ConfigDescriptor.Digest {
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
} }
m.configBlob = blob m.configBlob = blob
} }
@ -125,7 +125,7 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
copy := *m // NOTE: This is not a deep copy, it still shares slices etc. copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
if options.LayerInfos != nil { if options.LayerInfos != nil {
if len(copy.LayersDescriptors) != len(options.LayerInfos) { if len(copy.LayersDescriptors) != len(options.LayerInfos) {
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
} }
copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
for i, info := range options.LayerInfos { for i, info := range options.LayerInfos {
@ -139,7 +139,7 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
case manifest.DockerV2Schema2MediaType: case manifest.DockerV2Schema2MediaType:
return copy.convertToManifestSchema2() return copy.convertToManifestSchema2()
default: default:
return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType) return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType)
} }
return memoryImageFromManifest(&copy), nil return memoryImageFromManifest(&copy), nil

View file

@ -1,11 +1,10 @@
package image package image
import ( import (
"fmt"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/pkg/errors"
) )
// UnparsedImage implements types.UnparsedImage . // UnparsedImage implements types.UnparsedImage .
@ -56,10 +55,10 @@ func (i *UnparsedImage) Manifest() ([]byte, string, error) {
digest := canonical.Digest() digest := canonical.Digest()
matches, err := manifest.MatchesDigest(m, digest) matches, err := manifest.MatchesDigest(m, digest)
if err != nil { if err != nil {
return nil, "", fmt.Errorf("Error computing manifest digest: %v", err) return nil, "", errors.Wrap(err, "Error computing manifest digest")
} }
if !matches { if !matches {
return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest) return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
} }
} }
} }

View file

@ -3,8 +3,8 @@ package manifest
import ( import (
"encoding/json" "encoding/json"
"github.com/docker/distribution/digest"
"github.com/docker/libtrust" "github.com/docker/libtrust"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
) )

View file

@ -2,16 +2,16 @@ package layout
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"github.com/pkg/errors"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
) )
@ -44,7 +44,7 @@ func (d *ociImageDestination) SupportedManifestMIMETypes() []string {
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *ociImageDestination) SupportsSignatures() error { func (d *ociImageDestination) SupportsSignatures() error {
return fmt.Errorf("Pushing signatures for OCI images is not supported") return errors.Errorf("Pushing signatures for OCI images is not supported")
} }
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
@ -80,7 +80,7 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
} }
}() }()
digester := digest.Canonical.New() digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash()) tee := io.TeeReader(stream, digester.Hash())
size, err := io.Copy(blobFile, tee) size, err := io.Copy(blobFile, tee)
@ -89,7 +89,7 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
} }
computedDigest := digester.Digest() computedDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size { if inputInfo.Size != -1 && size != inputInfo.Size {
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
} }
if err := blobFile.Sync(); err != nil { if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
@ -114,7 +114,7 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
if info.Digest == "" { if info.Digest == "" {
return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
} }
blobPath, err := d.ref.blobPath(info.Digest) blobPath, err := d.ref.blobPath(info.Digest)
if err != nil { if err != nil {
@ -169,7 +169,7 @@ func createManifest(m []byte) ([]byte, string, error) {
case imgspecv1.MediaTypeImageManifest: case imgspecv1.MediaTypeImageManifest:
return m, mt, nil return m, mt, nil
} }
return nil, "", fmt.Errorf("unrecognized manifest media type %q", mt) return nil, "", errors.Errorf("unrecognized manifest media type %q", mt)
} }
func (d *ociImageDestination) PutManifest(m []byte) error { func (d *ociImageDestination) PutManifest(m []byte) error {
@ -227,7 +227,7 @@ func ensureParentDirectoryExists(path string) error {
func (d *ociImageDestination) PutSignatures(signatures [][]byte) error { func (d *ociImageDestination) PutSignatures(signatures [][]byte) error {
if len(signatures) != 0 { if len(signatures) != 0 {
return fmt.Errorf("Pushing signatures for OCI images is not supported") return errors.Errorf("Pushing signatures for OCI images is not supported")
} }
return nil return nil
} }

View file

@ -8,7 +8,7 @@ import (
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
) )

View file

@ -1,7 +1,6 @@
package layout package layout
import ( import (
"errors"
"fmt" "fmt"
"path/filepath" "path/filepath"
"regexp" "regexp"
@ -11,7 +10,8 @@ import (
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/image" "github.com/containers/image/image"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
// Transport is an ImageTransport for OCI directories. // Transport is an ImageTransport for OCI directories.
@ -43,16 +43,16 @@ func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
dir = scope[:sep] dir = scope[:sep]
tag := scope[sep+1:] tag := scope[sep+1:]
if !refRegexp.MatchString(tag) { if !refRegexp.MatchString(tag) {
return fmt.Errorf("Invalid tag %s", tag) return errors.Errorf("Invalid tag %s", tag)
} }
} }
if strings.Contains(dir, ":") { if strings.Contains(dir, ":") {
return fmt.Errorf("Invalid OCI reference %s: path contains a colon", scope) return errors.Errorf("Invalid OCI reference %s: path contains a colon", scope)
} }
if !strings.HasPrefix(dir, "/") { if !strings.HasPrefix(dir, "/") {
return fmt.Errorf("Invalid scope %s: must be an absolute path", scope) return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
} }
// Refuse also "/", otherwise "/" and "" would have the same semantics, // Refuse also "/", otherwise "/" and "" would have the same semantics,
// and "" could be unexpectedly shadowed by the "/" entry. // and "" could be unexpectedly shadowed by the "/" entry.
@ -62,7 +62,7 @@ func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
} }
cleaned := filepath.Clean(dir) cleaned := filepath.Clean(dir)
if cleaned != dir { if cleaned != dir {
return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
} }
return nil return nil
} }
@ -106,10 +106,10 @@ func NewReference(dir, tag string) (types.ImageReference, error) {
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
// from being ambiguous with values of PolicyConfigurationIdentity. // from being ambiguous with values of PolicyConfigurationIdentity.
if strings.Contains(resolved, ":") { if strings.Contains(resolved, ":") {
return nil, fmt.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, tag, resolved) return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, tag, resolved)
} }
if !refRegexp.MatchString(tag) { if !refRegexp.MatchString(tag) {
return nil, fmt.Errorf("Invalid tag %s", tag) return nil, errors.Errorf("Invalid tag %s", tag)
} }
return ociReference{dir: dir, resolvedDir: resolved, tag: tag}, nil return ociReference{dir: dir, resolvedDir: resolved, tag: tag}, nil
} }
@ -191,7 +191,7 @@ func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.Ima
// DeleteImage deletes the named image from the registry, if supported. // DeleteImage deletes the named image from the registry, if supported.
func (ref ociReference) DeleteImage(ctx *types.SystemContext) error { func (ref ociReference) DeleteImage(ctx *types.SystemContext) error {
return fmt.Errorf("Deleting images not implemented for oci: images") return errors.Errorf("Deleting images not implemented for oci: images")
} }
// ociLayoutPathPath returns a path for the oci-layout within a directory using OCI conventions. // ociLayoutPathPath returns a path for the oci-layout within a directory using OCI conventions.
@ -202,7 +202,7 @@ func (ref ociReference) ociLayoutPath() string {
// blobPath returns a path for a blob within a directory using OCI image-layout conventions. // blobPath returns a path for a blob within a directory using OCI image-layout conventions.
func (ref ociReference) blobPath(digest digest.Digest) (string, error) { func (ref ociReference) blobPath(digest digest.Digest) (string, error) {
if err := digest.Validate(); err != nil { if err := digest.Validate(); err != nil {
return "", fmt.Errorf("unexpected digest reference %s: %v", digest, err) return "", errors.Wrapf(err, "unexpected digest reference %s", digest)
} }
return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil
} }

View file

@ -4,7 +4,6 @@ import (
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net" "net"
@ -18,6 +17,7 @@ import (
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
"github.com/imdario/mergo" "github.com/imdario/mergo"
"github.com/pkg/errors"
utilerrors "k8s.io/kubernetes/pkg/util/errors" utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/homedir" "k8s.io/kubernetes/pkg/util/homedir"
utilnet "k8s.io/kubernetes/pkg/util/net" utilnet "k8s.io/kubernetes/pkg/util/net"
@ -348,20 +348,20 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err
if len(clusterInfo.Server) == 0 { if len(clusterInfo.Server) == 0 {
if len(clusterName) == 0 { if len(clusterName) == 0 {
validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined"))
} else { } else {
validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName))
} }
} }
// Make sure CA data and CA file aren't both specified // Make sure CA data and CA file aren't both specified
if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
} }
if len(clusterInfo.CertificateAuthority) != 0 { if len(clusterInfo.CertificateAuthority) != 0 {
clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) clientCertCA, err := os.Open(clusterInfo.CertificateAuthority)
defer clientCertCA.Close() defer clientCertCA.Close()
if err != nil { if err != nil {
validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
} }
} }
@ -385,36 +385,36 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
// Make sure cert data and file aren't both specified // Make sure cert data and file aren't both specified
if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
} }
// Make sure key data and file aren't both specified // Make sure key data and file aren't both specified
if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
} }
// Make sure a key is specified // Make sure a key is specified
if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
} }
if len(authInfo.ClientCertificate) != 0 { if len(authInfo.ClientCertificate) != 0 {
clientCertFile, err := os.Open(authInfo.ClientCertificate) clientCertFile, err := os.Open(authInfo.ClientCertificate)
defer clientCertFile.Close() defer clientCertFile.Close()
if err != nil { if err != nil {
validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
} }
} }
if len(authInfo.ClientKey) != 0 { if len(authInfo.ClientKey) != 0 {
clientKeyFile, err := os.Open(authInfo.ClientKey) clientKeyFile, err := os.Open(authInfo.ClientKey)
defer clientKeyFile.Close() defer clientKeyFile.Close()
if err != nil { if err != nil {
validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
} }
} }
} }
// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
if (len(methods) > 1) && (!usingAuthPath) { if (len(methods) > 1) && (!usingAuthPath) {
validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
} }
return validationErrors return validationErrors
@ -518,7 +518,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
continue continue
} }
if err != nil { if err != nil {
errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err)) errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename))
continue continue
} }
@ -623,7 +623,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
} }
base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
if err != nil { if err != nil {
return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin)
} }
if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil {
@ -636,7 +636,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
} }
base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
if err != nil { if err != nil {
return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin)
} }
if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil {
@ -706,7 +706,7 @@ func restClientFor(config *restConfig) (*url.URL, *http.Client, error) {
// Kubernetes API. // Kubernetes API.
func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
if host == "" { if host == "" {
return nil, fmt.Errorf("host must be a URL or a host:port pair") return nil, errors.Errorf("host must be a URL or a host:port pair")
} }
base := host base := host
hostURL, err := url.Parse(base) hostURL, err := url.Parse(base)
@ -723,7 +723,7 @@ func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
return nil, err return nil, err
} }
if hostURL.Path != "" && hostURL.Path != "/" { if hostURL.Path != "" && hostURL.Path != "/" {
return nil, fmt.Errorf("host must be a URL or a host:port pair: %q", base) return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base)
} }
} }
@ -793,7 +793,7 @@ func transportNew(config *restConfig) (http.RoundTripper, error) {
// REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains.
if len(config.Username) != 0 && len(config.BearerToken) != 0 { if len(config.Username) != 0 && len(config.BearerToken) != 0 {
return nil, fmt.Errorf("username/password or bearer token may be set, but not both") return nil, errors.Errorf("username/password or bearer token may be set, but not both")
} }
return rt, nil return rt, nil
@ -832,7 +832,7 @@ func tlsConfigFor(c *restConfig) (*tls.Config, error) {
return nil, nil return nil, nil
} }
if c.HasCA() && c.Insecure { if c.HasCA() && c.Insecure {
return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed") return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed")
} }
if err := loadTLSFiles(c); err != nil { if err := loadTLSFiles(c); err != nil {
return nil, err return nil, err

View file

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"crypto/rand" "crypto/rand"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -17,7 +16,8 @@ import (
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/containers/image/version" "github.com/containers/image/version"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
) )
// openshiftClient is configuration for dealing with a single image stream, for reading or writing. // openshiftClient is configuration for dealing with a single image stream, for reading or writing.
@ -124,7 +124,7 @@ func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]
if statusValid { if statusValid {
return nil, errors.New(status.Message) return nil, errors.New(status.Message)
} }
return nil, fmt.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body)) return nil, errors.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body))
} }
return body, nil return body, nil
@ -151,7 +151,7 @@ func (c *openshiftClient) getImage(imageStreamImageName string) (*image, error)
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
parts := strings.SplitN(ref, "/", 2) parts := strings.SplitN(ref, "/", 2)
if len(parts) != 2 { if len(parts) != 2 {
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref) return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref)
} }
return c.ref.dockerReference.Hostname() + "/" + parts[1], nil return c.ref.dockerReference.Hostname() + "/" + parts[1], nil
} }
@ -267,7 +267,7 @@ func (s *openshiftImageSource) ensureImageIsResolved() error {
} }
} }
if te == nil { if te == nil {
return fmt.Errorf("No matching tag found") return errors.Errorf("No matching tag found")
} }
logrus.Debugf("tag event %#v", te) logrus.Debugf("tag event %#v", te)
dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
@ -386,7 +386,7 @@ func (d *openshiftImageDestination) PutManifest(m []byte) error {
func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error { func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error {
if d.imageStreamImageName == "" { if d.imageStreamImageName == "" {
return fmt.Errorf("Internal error: Unknown manifest digest, can't add signatures") return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures")
} }
// Because image signatures are a shared resource in Atomic Registry, the default upload // Because image signatures are a shared resource in Atomic Registry, the default upload
// always adds signatures. Eventually we should also allow removing signatures. // always adds signatures. Eventually we should also allow removing signatures.
@ -418,7 +418,7 @@ sigExists:
randBytes := make([]byte, 16) randBytes := make([]byte, 16)
n, err := rand.Read(randBytes) n, err := rand.Read(randBytes)
if err != nil || n != 16 { if err != nil || n != 16 {
return fmt.Errorf("Error generating random signature ID: %v, len %d", err, n) return errors.Wrapf(err, "Error generating random signature len %d", n)
} }
signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes) signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes)
if _, ok := existingSigNames[signatureName]; !ok { if _, ok := existingSigNames[signatureName]; !ok {

View file

@ -9,6 +9,7 @@ import (
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
genericImage "github.com/containers/image/image" genericImage "github.com/containers/image/image"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/pkg/errors"
) )
// Transport is an ImageTransport for OpenShift registry-hosted images. // Transport is an ImageTransport for OpenShift registry-hosted images.
@ -36,7 +37,7 @@ var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
// scope passed to this function will not be "", that value is always allowed. // scope passed to this function will not be "", that value is always allowed.
func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error {
if scopeRegexp.FindStringIndex(scope) == nil { if scopeRegexp.FindStringIndex(scope) == nil {
return fmt.Errorf("Invalid scope name %s", scope) return errors.Errorf("Invalid scope name %s", scope)
} }
return nil return nil
} }
@ -52,11 +53,11 @@ type openshiftReference struct {
func ParseReference(ref string) (types.ImageReference, error) { func ParseReference(ref string) (types.ImageReference, error) {
r, err := reference.ParseNamed(ref) r, err := reference.ParseNamed(ref)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse image reference %q, %v", ref, err) return nil, errors.Wrapf(err, "failed to parse image reference %q", ref)
} }
tagged, ok := r.(reference.NamedTagged) tagged, ok := r.(reference.NamedTagged)
if !ok { if !ok {
return nil, fmt.Errorf("invalid image reference %s, %#v", ref, r) return nil, errors.Errorf("invalid image reference %s, %#v", ref, r)
} }
return NewReference(tagged) return NewReference(tagged)
} }
@ -65,7 +66,7 @@ func ParseReference(ref string) (types.ImageReference, error) {
func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) {
r := strings.SplitN(dockerRef.RemoteName(), "/", 3) r := strings.SplitN(dockerRef.RemoteName(), "/", 3)
if len(r) != 2 { if len(r) != 2 {
return nil, fmt.Errorf("invalid image reference %s", dockerRef.String()) return nil, errors.Errorf("invalid image reference %s", dockerRef.String())
} }
return openshiftReference{ return openshiftReference{
namespace: r[0], namespace: r[0],
@ -146,5 +147,5 @@ func (ref openshiftReference) NewImageDestination(ctx *types.SystemContext) (typ
// DeleteImage deletes the named image from the registry, if supported. // DeleteImage deletes the named image from the registry, if supported.
func (ref openshiftReference) DeleteImage(ctx *types.SystemContext) error { func (ref openshiftReference) DeleteImage(ctx *types.SystemContext) error {
return fmt.Errorf("Deleting images not implemented for atomic: images") return errors.Errorf("Deleting images not implemented for atomic: images")
} }

View file

@ -6,7 +6,7 @@ import (
"fmt" "fmt"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
) )
// SignDockerManifest returns a signature for manifest as the specified dockerReference, // SignDockerManifest returns a signature for manifest as the specified dockerReference,

View file

@ -76,9 +76,6 @@ func (m gpgSigningMechanism) ImportKeysFromBytes(blob []byte) ([]string, error)
func (m gpgSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { func (m gpgSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
key, err := m.ctx.GetKey(keyIdentity, true) key, err := m.ctx.GetKey(keyIdentity, true)
if err != nil { if err != nil {
if e, ok := err.(gpgme.Error); ok && e.Code() == gpgme.ErrorEOF {
return nil, fmt.Errorf("key %q not found", keyIdentity)
}
return nil, err return nil, err
} }
inputData, err := gpgme.NewDataBytes(input) inputData, err := gpgme.NewDataBytes(input)

View file

@ -15,11 +15,12 @@ package signature
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"path/filepath" "path/filepath"
"github.com/pkg/errors"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/transports" "github.com/containers/image/transports"
"github.com/containers/image/types" "github.com/containers/image/types"
@ -405,7 +406,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
case !gotKeyPath && !gotKeyData: case !gotKeyPath && !gotKeyData:
return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified")
default: // Coverage: This should never happen default: // Coverage: This should never happen
return fmt.Errorf("Impossible keyPath/keyData presence combination!?") return errors.Errorf("Impossible keyPath/keyData presence combination!?")
} }
if err != nil { if err != nil {
return err return err

View file

@ -6,10 +6,9 @@
package signature package signature
import ( import (
"fmt"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/pkg/errors"
) )
// PolicyRequirementError is an explanatory text for rejecting a signature or an image. // PolicyRequirementError is an explanatory text for rejecting a signature or an image.
@ -95,7 +94,7 @@ const (
// changeContextState changes pc.state, or fails if the state is unexpected // changeContextState changes pc.state, or fails if the state is unexpected
func (pc *PolicyContext) changeState(expected, new policyContextState) error { func (pc *PolicyContext) changeState(expected, new policyContextState) error {
if pc.state != expected { if pc.state != expected {
return fmt.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
} }
pc.state = new pc.state = new
return nil return nil

View file

@ -3,15 +3,16 @@
package signature package signature
import ( import (
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"strings" "strings"
"github.com/pkg/errors"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
) )
func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
@ -19,10 +20,10 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig [
case SBKeyTypeGPGKeys: case SBKeyTypeGPGKeys:
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
// FIXME? Reject this at policy parsing time already? // FIXME? Reject this at policy parsing time already?
return sarRejected, nil, fmt.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
default: default:
// This should never happen, newPRSignedBy ensures KeyType.IsValid() // This should never happen, newPRSignedBy ensures KeyType.IsValid()
return sarRejected, nil, fmt.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
} }
if pr.KeyPath != "" && pr.KeyData != nil { if pr.KeyPath != "" && pr.KeyData != nil {
@ -116,7 +117,7 @@ func (pr *prSignedBy) isRunningImageAllowed(image types.UnparsedImage) (bool, er
// Huh?! This should not happen at all; treat it as any other invalid value. // Huh?! This should not happen at all; treat it as any other invalid value.
fallthrough fallthrough
default: default:
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
} }
rejections = append(rejections, reason) rejections = append(rejections, reason)
} }

View file

@ -4,12 +4,13 @@ package signature
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"time" "time"
"github.com/pkg/errors"
"github.com/containers/image/version" "github.com/containers/image/version"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
) )
const ( const (

View file

@ -3,12 +3,12 @@ package storage
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"errors"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"time" "time"
"github.com/pkg/errors"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/image" "github.com/containers/image/image"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
@ -16,7 +16,7 @@ import (
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/storage" "github.com/containers/storage/storage"
ddigest "github.com/docker/distribution/digest" ddigest "github.com/opencontainers/go-digest"
) )
var ( var (
@ -78,7 +78,7 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) {
} }
img, err := imageRef.transport.store.GetImage(id) img, err := imageRef.transport.store.GetImage(id)
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading image %q: %v", id, err) return nil, errors.Wrapf(err, "error reading image %q", id)
} }
image := &storageImageSource{ image := &storageImageSource{
imageRef: imageRef, imageRef: imageRef,
@ -90,7 +90,7 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) {
SignatureSizes: []int{}, SignatureSizes: []int{},
} }
if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
return nil, fmt.Errorf("error decoding metadata for source image: %v", err) return nil, errors.Wrap(err, "error decoding metadata for source image")
} }
return image, nil return image, nil
} }
@ -150,10 +150,10 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
// Set up to read the whole blob (the initial snippet, plus the rest) // Set up to read the whole blob (the initial snippet, plus the rest)
// while digesting it with either the default, or the passed-in digest, // while digesting it with either the default, or the passed-in digest,
// if one was specified. // if one was specified.
hasher := ddigest.Canonical.New() hasher := ddigest.Canonical.Digester()
if digest.Validate() == nil { if digest.Validate() == nil {
if a := digest.Algorithm(); a.Available() { if a := digest.Algorithm(); a.Available() {
hasher = a.New() hasher = a.Digester()
} }
} }
hash := "" hash := ""
@ -278,7 +278,7 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
if blobinfo.Digest == "" { if blobinfo.Digest == "" {
return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
} }
for _, blob := range s.BlobList { for _, blob := range s.BlobList {
if blob.Digest == blobinfo.Digest { if blob.Digest == blobinfo.Digest {
@ -468,7 +468,7 @@ func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64,
} }
if layer.Metadata != "" { if layer.Metadata != "" {
if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
return nil, -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err) return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID)
} }
} }
if layerMeta.CompressedSize <= 0 { if layerMeta.CompressedSize <= 0 {
@ -504,7 +504,7 @@ func (s *storageImageSource) GetSignatures() (signatures [][]byte, err error) {
offset += length offset += length
} }
if offset != len(signature) { if offset != len(signature) {
return nil, fmt.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset)
} }
return sigslice, nil return sigslice, nil
} }
@ -513,12 +513,12 @@ func (s *storageImageSource) getSize() (int64, error) {
var sum int64 var sum int64
names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id) names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id)
if err != nil { if err != nil {
return -1, fmt.Errorf("error reading image %q: %v", s.imageRef.id, err) return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id)
} }
for _, name := range names { for _, name := range names {
bigSize, err := s.imageRef.transport.store.GetImageBigDataSize(s.imageRef.id, name) bigSize, err := s.imageRef.transport.store.GetImageBigDataSize(s.imageRef.id, name)
if err != nil { if err != nil {
return -1, fmt.Errorf("error reading data blob size %q for %q: %v", name, s.imageRef.id, err) return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id)
} }
sum += bigSize sum += bigSize
} }
@ -536,11 +536,11 @@ func (s *storageImageSource) getSize() (int64, error) {
} }
if layer.Metadata != "" { if layer.Metadata != "" {
if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
return -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err) return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID)
} }
} }
if layerMeta.Size < 0 { if layerMeta.Size < 0 {
return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID) return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID)
} }
sum += layerMeta.Size sum += layerMeta.Size
} }

View file

@ -1,17 +1,18 @@
package storage package storage
import ( import (
"errors"
"path/filepath" "path/filepath"
"regexp" "regexp"
"strings" "strings"
"github.com/pkg/errors"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/containers/storage/storage" "github.com/containers/storage/storage"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
ddigest "github.com/docker/distribution/digest" ddigest "github.com/opencontainers/go-digest"
) )
var ( var (
@ -94,7 +95,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
return nil, err return nil, err
} }
} }
sum, err = digest.ParseDigest("sha256:" + refInfo[1]) sum, err = digest.Parse("sha256:" + refInfo[1])
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -265,7 +266,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
if err != nil { if err != nil {
return err return err
} }
_, err = ddigest.ParseDigest("sha256:" + scopeInfo[1]) _, err = ddigest.Parse("sha256:" + scopeInfo[1])
if err != nil { if err != nil {
return err return err
} }

View file

@ -11,6 +11,7 @@ import (
"github.com/containers/image/openshift" "github.com/containers/image/openshift"
"github.com/containers/image/storage" "github.com/containers/image/storage"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/pkg/errors"
) )
// KnownTransports is a registry of known ImageTransport instances. // KnownTransports is a registry of known ImageTransport instances.
@ -40,11 +41,11 @@ func init() {
func ParseImageName(imgName string) (types.ImageReference, error) { func ParseImageName(imgName string) (types.ImageReference, error) {
parts := strings.SplitN(imgName, ":", 2) parts := strings.SplitN(imgName, ":", 2)
if len(parts) != 2 { if len(parts) != 2 {
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
} }
transport, ok := KnownTransports[parts[0]] transport, ok := KnownTransports[parts[0]]
if !ok { if !ok {
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
} }
return transport.ParseReference(parts[1]) return transport.ParseReference(parts[1])
} }

View file

@ -1,12 +1,13 @@
package types package types
import ( import (
"errors"
"io" "io"
"time" "time"
"github.com/pkg/errors"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
) )
// ImageTransport is a top-level namespace for ways to to store/load an image. // ImageTransport is a top-level namespace for ways to to store/load an image.

View file

@ -11,9 +11,9 @@ import (
// Due to the way cgo works this has to be in a separate file, as devmapper.go has // Due to the way cgo works this has to be in a separate file, as devmapper.go has
// definitions in the cgo block, which is incompatible with using "//export" // definitions in the cgo block, which is incompatible with using "//export"
// DevmapperLogCallback exports the devmapper log callback for cgo. // StorageDevmapperLogCallback exports the devmapper log callback for cgo.
//export DevmapperLogCallback //export StorageDevmapperLogCallback
func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { func StorageDevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) {
msg := C.GoString(message) msg := C.GoString(message)
if level < 7 { if level < 7 {
if strings.Contains(msg, "busy") { if strings.Contains(msg, "busy") {

View file

@ -8,7 +8,7 @@ package devicemapper
#include <linux/fs.h> // FIXME: present only for BLKGETSIZE64, maybe we can remove it? #include <linux/fs.h> // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
// FIXME: Can't we find a way to do the logging in pure Go? // FIXME: Can't we find a way to do the logging in pure Go?
extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...)
{ {
@ -19,7 +19,7 @@ static void log_cb(int level, const char *file, int line, int dm_errno_or_class,
vsnprintf(buffer, 256, f, ap); vsnprintf(buffer, 256, f, ap);
va_end(ap); va_end(ap);
DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer);
} }
static void log_with_errno_init() static void log_with_errno_init()

View file

@ -1,4 +1,4 @@
// +build !autogen // +build !containersstorageautogen
// Package storageversion is auto-generated at build-time // Package storageversion is auto-generated at build-time
package storageversion package storageversion

View file

@ -1,3 +1,18 @@
// Copyright 2014 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code forked from Docker project // Code forked from Docker project
package daemon package daemon
@ -7,11 +22,14 @@ import (
) )
// SdNotify sends a message to the init daemon. It is common to ignore the error. // SdNotify sends a message to the init daemon. It is common to ignore the error.
// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET`
// will be unconditionally unset.
//
// It returns one of the following: // It returns one of the following:
// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset) // (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset)
// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data) // (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data)
// (true, nil) - notification supported, data has been sent // (true, nil) - notification supported, data has been sent
func SdNotify(state string) (sent bool, err error) { func SdNotify(unsetEnvironment bool, state string) (sent bool, err error) {
socketAddr := &net.UnixAddr{ socketAddr := &net.UnixAddr{
Name: os.Getenv("NOTIFY_SOCKET"), Name: os.Getenv("NOTIFY_SOCKET"),
Net: "unixgram", Net: "unixgram",
@ -22,6 +40,13 @@ func SdNotify(state string) (sent bool, err error) {
return false, nil return false, nil
} }
if unsetEnvironment {
err = os.Unsetenv("NOTIFY_SOCKET")
}
if err != nil {
return false, err
}
conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
// Error connecting to NOTIFY_SOCKET // Error connecting to NOTIFY_SOCKET
if err != nil { if err != nil {

72
vendor/github.com/coreos/go-systemd/daemon/watchdog.go generated vendored Normal file
View file

@ -0,0 +1,72 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daemon
import (
"fmt"
"os"
"strconv"
"time"
)
// SdWatchdogEnabled return watchdog information for a service.
// Process should send daemon.SdNotify("WATCHDOG=1") every time / 2.
// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC`
// and `WATCHDOG_PID` will be unconditionally unset.
//
// It returns one of the following:
// (0, nil) - watchdog isn't enabled or we aren't the watched PID.
// (0, err) - an error happened (e.g. error converting time).
// (time, nil) - watchdog is enabled and we can send ping.
// time is delay before inactive service will be killed.
func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) {
wusec := os.Getenv("WATCHDOG_USEC")
wpid := os.Getenv("WATCHDOG_PID")
if unsetEnvironment {
wusecErr := os.Unsetenv("WATCHDOG_USEC")
wpidErr := os.Unsetenv("WATCHDOG_PID")
if wusecErr != nil {
return 0, wusecErr
}
if wpidErr != nil {
return 0, wpidErr
}
}
if wusec == "" {
return 0, nil
}
s, err := strconv.Atoi(wusec)
if err != nil {
return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err)
}
if s <= 0 {
return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number")
}
interval := time.Duration(s) * time.Microsecond
if wpid == "" {
return interval, nil
}
p, err := strconv.Atoi(wpid)
if err != nil {
return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err)
}
if os.Getpid() != p {
return 0, nil
}
return interval, nil
}

Some files were not shown because too many files have changed in this diff Show more