This commit is contained in:
Antonio Murdaca 2016-09-19 14:46:28 +00:00 committed by GitHub
commit ac56cbc912
477 changed files with 56465 additions and 6650 deletions

View file

@ -8,6 +8,7 @@ import (
"os"
"time"
"github.com/Sirupsen/logrus"
pb "github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"github.com/urfave/cli"
"golang.org/x/net/context"
@ -233,7 +234,7 @@ func main() {
podSandboxCommand,
containerCommand,
runtimeVersionCommand,
pullImageCommand,
imagesCommand,
}
app.Flags = []cli.Flag{
@ -250,6 +251,72 @@ func main() {
}
}
var imagesCommand = cli.Command{
Name: "image",
Subcommands: []cli.Command{
pullImageCommand,
removeImageCommand,
listImagesCommand,
},
}
var removeImageCommand = cli.Command{
Name: "delete",
Usage: "delete an image",
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection()
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewImageServiceClient(conn)
if err := RemoveImage(client, context.Args().Get(0)); err != nil {
return err
}
return nil
},
}
func RemoveImage(client pb.ImageServiceClient, image string) error {
_, err := client.RemoveImage(context.Background(), &pb.RemoveImageRequest{Image: &pb.ImageSpec{Image: &image}})
if err != nil {
return err
}
return nil
}
var listImagesCommand = cli.Command{
Name: "list",
Usage: "list images",
Action: func(context *cli.Context) error {
// Set up a connection to the server.
conn, err := getClientConnection()
if err != nil {
return fmt.Errorf("failed to connect: %v", err)
}
defer conn.Close()
client := pb.NewImageServiceClient(conn)
if err := ListImages(client); err != nil {
return err
}
return nil
},
}
func ListImages(client pb.ImageServiceClient) error {
resp, err := client.ListImages(context.Background(), &pb.ListImagesRequest{})
if err != nil {
return fmt.Errorf("listing images failed: %v", err)
}
for _, i := range resp.Images {
logrus.Println(i.String())
}
return nil
}
func PullImage(client pb.ImageServiceClient, image string) error {
_, err := client.PullImage(context.Background(), &pb.PullImageRequest{Image: &pb.ImageSpec{Image: &image}})
if err != nil {
@ -258,9 +325,9 @@ func PullImage(client pb.ImageServiceClient, image string) error {
return nil
}
// try this with ./ocic pullimage docker://busybox
// try this with ./ocic pullimage busybox
var pullImageCommand = cli.Command{
Name: "pullimage",
Name: "pull",
Usage: "pull an image",
Action: func(context *cli.Context) error {
// Set up a connection to the server.

View file

@ -7,6 +7,8 @@ import (
"os"
"github.com/Sirupsen/logrus"
sreexec "github.com/containers/storage/pkg/reexec"
dreexec "github.com/docker/docker/pkg/reexec"
"github.com/kubernetes-incubator/ocid/server"
"github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"github.com/urfave/cli"
@ -18,6 +20,13 @@ const (
)
func main() {
if sreexec.Init() {
return
}
if dreexec.Init() {
return
}
app := cli.NewApp()
app.Name = "ocic"
app.Usage = "client for ocid"

100
glide.lock generated
View file

@ -1,5 +1,5 @@
hash: e1c100879f58ef4902c4e13d6710505f0ec62fbb5eaa237abeff8faf0ddf30f1
updated: 2016-09-17T15:49:39.168380014+02:00
hash: 06a42cd1074b800b7cff0bfa89b648dcff008fce0172bf1f5f42548ccb4baa26
updated: 2016-09-19T16:39:43.965362487+02:00
imports:
- name: github.com/containernetworking/cni
version: 9d5e6e60e79491207834ae8439e80c943db65a69
@ -8,54 +8,89 @@ imports:
- pkg/invoke
- pkg/types
- name: github.com/containers/image
version: f6f11ab5cf8b1e70ef4aa3f8b6fdb4b671d16abd
version: ba9c203ae29e53c7beac408141598fbc63df1ead
repo: git@github.com:nalind/image
subpackages:
- directory
- image
- transports
- directory/explicitfilepath
- types
- manifest
- signatures
- copy
- storage
- docker
- signature
- types
- docker/policyconfiguration
- manifest
- version
- oci/layout
- openshift
- docker/policyconfiguration
- version
- directory/explicitfilepath
- name: github.com/containers/storage
version: 32264f11f3b997e8d2ba032a6a13ebdfcddc2897
subpackages:
- storage
- pkg/reexec
- drivers
- drivers/register
- pkg/archive
- pkg/idtools
- pkg/ioutils
- pkg/stringid
- storageversion
- pkg/chrootarchive
- pkg/mount
- pkg/plugins
- drivers/aufs
- drivers/btrfs
- drivers/devmapper
- drivers/overlay
- drivers/overlay2
- drivers/vfs
- drivers/windows
- drivers/zfs
- pkg/fileutils
- pkg/pools
- pkg/promise
- pkg/system
- pkg/longpath
- pkg/random
- pkg/plugins/transport
- pkg/directory
- pkg/parsers
- pkg/devicemapper
- pkg/loopback
- pkg/parsers/kernel
- name: github.com/docker/distribution
version: cd27f179f2c10c5d300e6d09025b538c475b0d51
subpackages:
- digest
- reference
- context
- uuid
- name: github.com/docker/docker
version: b9f10c951893f9a00865890a5232e85d770c1087
version: 23cf638307f030cd8d48c9efc21feec18a6f88f8
subpackages:
- pkg/jsonlog
- pkg/jsonmessage
- pkg/longpath
- pkg/mount
- pkg/stdcopy
- pkg/symlink
- pkg/system
- pkg/term
- pkg/term/windows
- pkg/reexec
- pkg/homedir
- reference
- image
- image/v1
- pkg/homedir
- pkg/ioutils
- layer
- pkg/version
- pkg/longpath
- daemon/graphdriver
- pkg/archive
- pkg/idtools
- pkg/ioutils
- pkg/stringid
- pkg/chrootarchive
- pkg/mount
- pkg/plugins
- pkg/fileutils
- pkg/pools
- pkg/promise
- pkg/system
- pkg/random
- pkg/reexec
- pkg/plugins/transport
- name: github.com/docker/engine-api
version: dea108d3aa0c67d7162a3fd8aa65f38a430019fd
@ -120,14 +155,29 @@ imports:
subpackages:
- jsonpb
- proto
- name: github.com/gorilla/context
version: 215affda49addc4c8ef7e2534915df2c8c35c6cd
- name: github.com/gorilla/mux
version: 8096f47503459bcc74d1f4c487b7e6e42e5746b5
- name: github.com/imdario/mergo
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
- name: github.com/kubernetes/kubernetes
version: ff3ca3d616518087dc20180f69bb4038379f1028
subpackages:
- pkg/kubelet/api/v1alpha1/runtime
- name: github.com/mattn/go-shellwords
version: 525bedee691b5a8df547cb5cf9f86b7fb1883e24
- name: github.com/Microsoft/go-winio
version: 8f9387ea7efabb228a981b9c381142be7667967f
subpackages:
- archive/tar
- backuptar
- name: github.com/Microsoft/hcsshim
version: d8e08e7d31d4f441646638b35f423a760d6dfbcd
- name: github.com/mistifyio/go-zfs
version: 1b4ae6fb4e77b095934d4430860ff202060169f8
- name: github.com/mtrmac/gpgme
version: 7486dcd030936581e863bb69602ef411920d7fc9
- name: github.com/opencontainers/image-spec
version: 287772a24ab02d5d2fe3fbbba4f13dff9b9ce003
subpackages:
@ -160,6 +210,8 @@ imports:
version: 7dab1a245d3e13d0ad03b77409d0bf7e00a6ada4
subpackages:
- specs-go
- name: github.com/pborman/uuid
version: ca53cad383cad2479bbba7f7a1a05797ec1386e4
- name: github.com/rajatchopra/ocicni
version: daea0034a4bc2d193d9bf5031bace0403094011a
- name: github.com/Sirupsen/logrus
@ -187,7 +239,7 @@ imports:
- internal/timeseries
- proxy
- name: golang.org/x/sys
version: 9c60d1c508f5134d1ca726b4641db998f2523357
version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
subpackages:
- unix
- name: google.golang.org/grpc
@ -204,7 +256,7 @@ imports:
- name: gopkg.in/yaml.v2
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
- name: k8s.io/kubernetes
version: 8fd414537b5143ab039cb910590237cabf4af783
version: 87b40782c9c1cc82ba42f75de4cf23126297e0a3
subpackages:
- pkg/util/errors
- pkg/util/homedir

View file

@ -3,10 +3,15 @@ import:
- package: github.com/Sirupsen/logrus
version: ~0.10.0
- package: github.com/containers/image
version: add-storage-transport
repo: git@github.com:nalind/image
subpackages:
- directory
- image
- transports
- signatures
- copy
- storage
- package: github.com/kubernetes/kubernetes
version: ~1.5.0-alpha.0
subpackages:
@ -26,3 +31,11 @@ import:
- context
- package: google.golang.org/grpc
version: ~1.0.1-GA
- package: github.com/containers/storage
subpackages:
- storage
- pkg/reexec
- package: github.com/docker/docker
version: ~1.12.1
subpackages:
- pkg/reexec

View file

@ -2,11 +2,11 @@ package server
import (
"errors"
"os"
"path/filepath"
"github.com/containers/image/directory"
"github.com/containers/image/image"
ic "github.com/containers/image/copy"
"github.com/containers/image/docker"
"github.com/containers/image/signature"
"github.com/containers/image/storage"
"github.com/containers/image/transports"
pb "github.com/kubernetes/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"golang.org/x/net/context"
@ -14,18 +14,34 @@ import (
// ListImages lists existing images.
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
// TODO
// containers/storage will take care of this by looking inside /var/lib/ocid/images
// and listing images.
return nil, nil
images, err := s.storage.Images()
if err != nil {
return nil, err
}
resp := pb.ListImagesResponse{}
for _, image := range images {
idCopy := image.ID
resp.Images = append(resp.Images, &pb.Image{
Id: &idCopy,
RepoTags: image.Names,
})
}
return &resp, nil
}
// ImageStatus returns the status of the image.
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
// TODO
// containers/storage will take care of this by looking inside /var/lib/ocid/images
// and getting the image status
return nil, nil
image, err := s.storage.GetImage(*(req.Image.Image))
if err != nil {
return nil, err
}
resp := pb.ImageStatusResponse{}
idCopy := image.ID
resp.Image = &pb.Image{
Id: &idCopy,
RepoTags: image.Names,
}
return &resp, nil
}
// PullImage pulls a image with authentication config.
@ -36,64 +52,39 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P
}
// TODO(runcom): deal with AuthConfig in req.GetAuth()
// TODO(mrunalp,runcom): why do we need the SandboxConfig here?
// how do we pull in a specified sandbox?
tr, err := transports.ParseImageName(img)
if err != nil {
return nil, err
}
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
src, err := tr.NewImageSource(nil, nil)
if err != nil {
return nil, err
}
i := image.FromSource(src)
blobs, err := i.BlobDigests()
// FIXME: pretend that the image coming from k8s is in docker format
// because k8s haven't yet figured out a way for multiple image
// formats - https://github.com/kubernetes/features/issues/63
sr, err := transports.ParseImageName(docker.Transport.Name() + "://" + img)
if err != nil {
return nil, err
}
if err := os.Mkdir(filepath.Join(imageStore, tr.StringWithinTransport()), 0755); err != nil {
return nil, err
}
dir, err := directory.NewReference(filepath.Join(imageStore, tr.StringWithinTransport()))
dr, err := transports.ParseImageName(storage.Transport.Name() + ":" + sr.DockerReference().String())
if err != nil {
return nil, err
}
// TODO(runcom): figure out the ImageContext story in containers/image instead of passing ("", true)
dest, err := dir.NewImageDestination(nil)
if err != nil {
return nil, err
}
// save blobs (layer + config for docker v2s2, layers only for docker v2s1 [the config is in the manifest])
for _, b := range blobs {
// TODO(runcom,nalin): we need do-then-commit to later purge on error
r, _, err := src.GetBlob(b)
if err != nil {
return nil, err
}
if _, _, err := dest.PutBlob(r, b, -1); err != nil {
r.Close()
return nil, err
}
r.Close()
}
// save manifest
m, _, err := i.Manifest()
if err != nil {
return nil, err
}
if err := dest.PutManifest(m); err != nil {
return nil, err
}
// TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://)
policy, err := signature.DefaultPolicy(s.imageContext)
if err != nil {
return nil, err
}
pc, err := signature.NewPolicyContext(policy)
if err != nil {
return nil, err
}
err = ic.Image(s.imageContext, pc, dr, sr, &ic.Options{})
if err != nil {
return nil, err
}
return &pb.PullImageResponse{}, nil
}
// RemoveImage removes the image.
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
return nil, nil
_, err := s.storage.DeleteImage(*(req.Image.Image), true)
return &pb.RemoveImageResponse{}, err
}

View file

@ -5,6 +5,9 @@ import (
"os"
"sync"
imagestorage "github.com/containers/image/storage"
imagetypes "github.com/containers/image/types"
"github.com/containers/storage/storage"
"github.com/kubernetes-incubator/ocid/oci"
"github.com/kubernetes-incubator/ocid/utils"
"github.com/rajatchopra/ocicni"
@ -13,6 +16,8 @@ import (
const (
runtimeAPIVersion = "v1alpha1"
imageStore = "/var/lib/ocid/images"
storageRun = "/var/run/ocid/storage"
storageGraph = "/var/lib/ocid/storage"
)
// Server implements the RuntimeService and ImageService
@ -22,6 +27,8 @@ type Server struct {
stateLock sync.Mutex
state *serverState
netPlugin ocicni.CNIPlugin
imageContext *imagetypes.SystemContext
storage storage.Store
}
// New creates a new Server with options provided
@ -44,6 +51,11 @@ func New(runtimePath, sandboxDir, containerDir string) (*Server, error) {
}
sandboxes := make(map[string]*sandbox)
containers := make(map[string]*oci.Container)
store, err := storage.MakeStore(storageRun, storageGraph, "", []string{}, nil, nil)
if err != nil {
return nil, err
}
imagestorage.Transport.SetStore(store)
netPlugin, err := ocicni.InitCNI("")
if err != nil {
return nil, err
@ -52,10 +64,17 @@ func New(runtimePath, sandboxDir, containerDir string) (*Server, error) {
runtime: r,
netPlugin: netPlugin,
sandboxDir: sandboxDir,
storage: store,
state: &serverState{
sandboxes: sandboxes,
containers: containers,
},
imageContext: &imagetypes.SystemContext{
RootForImplicitAbsolutePaths: "",
SignaturePolicyPath: "",
DockerCertPath: "",
DockerInsecureSkipTLSVerify: false,
},
}, nil
}

View file

@ -1,4 +1,4 @@
Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved.
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

View file

@ -0,0 +1,342 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tar implements access to tar archives.
// It aims to cover most of the variations, including those produced
// by GNU and BSD tars.
//
// References:
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
package tar
import (
"bytes"
"errors"
"fmt"
"os"
"path"
"time"
)
const (
blockSize = 512
// Types
TypeReg = '0' // regular file
TypeRegA = '\x00' // regular file
TypeLink = '1' // hard link
TypeSymlink = '2' // symbolic link
TypeChar = '3' // character device node
TypeBlock = '4' // block device node
TypeDir = '5' // directory
TypeFifo = '6' // fifo node
TypeCont = '7' // reserved
TypeXHeader = 'x' // extended header
TypeXGlobalHeader = 'g' // global extended header
TypeGNULongName = 'L' // Next file has a long name
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
TypeGNUSparse = 'S' // sparse file
)
// A Header represents a single header in a tar archive.
// Some fields may not be populated.
type Header struct {
Name string // name of header file entry
Mode int64 // permission and mode bits
Uid int // user id of owner
Gid int // group id of owner
Size int64 // length in bytes
ModTime time.Time // modified time
Typeflag byte // type of header entry
Linkname string // target name of link
Uname string // user name of owner
Gname string // group name of owner
Devmajor int64 // major number of character or block device
Devminor int64 // minor number of character or block device
AccessTime time.Time // access time
ChangeTime time.Time // status change time
Xattrs map[string]string
Winheaders map[string]string
}
// File name constants from the tar spec.
const (
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
)
// FileInfo returns an os.FileInfo for the Header.
func (h *Header) FileInfo() os.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements os.FileInfo.
type headerFileInfo struct {
h *Header
}
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
func (fi headerFileInfo) Sys() interface{} { return fi.h }
// Name returns the base name of the file.
func (fi headerFileInfo) Name() string {
if fi.IsDir() {
return path.Base(path.Clean(fi.h.Name))
}
return path.Base(fi.h.Name)
}
// Mode returns the permission and mode bits for the headerFileInfo.
func (fi headerFileInfo) Mode() (mode os.FileMode) {
// Set file permission bits.
mode = os.FileMode(fi.h.Mode).Perm()
// Set setuid, setgid and sticky bits.
if fi.h.Mode&c_ISUID != 0 {
// setuid
mode |= os.ModeSetuid
}
if fi.h.Mode&c_ISGID != 0 {
// setgid
mode |= os.ModeSetgid
}
if fi.h.Mode&c_ISVTX != 0 {
// sticky
mode |= os.ModeSticky
}
// Set file mode bits.
// clear perm, setuid, setgid and sticky bits.
m := os.FileMode(fi.h.Mode) &^ 07777
if m == c_ISDIR {
// directory
mode |= os.ModeDir
}
if m == c_ISFIFO {
// named pipe (FIFO)
mode |= os.ModeNamedPipe
}
if m == c_ISLNK {
// symbolic link
mode |= os.ModeSymlink
}
if m == c_ISBLK {
// device file
mode |= os.ModeDevice
}
if m == c_ISCHR {
// Unix character device
mode |= os.ModeDevice
mode |= os.ModeCharDevice
}
if m == c_ISSOCK {
// Unix domain socket
mode |= os.ModeSocket
}
switch fi.h.Typeflag {
case TypeSymlink:
// symbolic link
mode |= os.ModeSymlink
case TypeChar:
// character device node
mode |= os.ModeDevice
mode |= os.ModeCharDevice
case TypeBlock:
// block device node
mode |= os.ModeDevice
case TypeDir:
// directory
mode |= os.ModeDir
case TypeFifo:
// fifo node
mode |= os.ModeNamedPipe
}
return mode
}
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi os.FileInfo, h *Header) error
// Mode constants from the tar spec.
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
// Keywords for the PAX Extended Header
const (
paxAtime = "atime"
paxCharset = "charset"
paxComment = "comment"
paxCtime = "ctime" // please note that ctime is not a valid pax header.
paxGid = "gid"
paxGname = "gname"
paxLinkpath = "linkpath"
paxMtime = "mtime"
paxPath = "path"
paxSize = "size"
paxUid = "uid"
paxUname = "uname"
paxXattr = "SCHILY.xattr."
paxWindows = "MSWINDOWS."
paxNone = ""
)
// FileInfoHeader creates a partially-populated Header from fi.
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
// Because os.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("tar: FileInfo is nil")
}
fm := fi.Mode()
h := &Header{
Name: fi.Name(),
ModTime: fi.ModTime(),
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
}
switch {
case fm.IsRegular():
h.Mode |= c_ISREG
h.Typeflag = TypeReg
h.Size = fi.Size()
case fi.IsDir():
h.Typeflag = TypeDir
h.Mode |= c_ISDIR
h.Name += "/"
case fm&os.ModeSymlink != 0:
h.Typeflag = TypeSymlink
h.Mode |= c_ISLNK
h.Linkname = link
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
h.Mode |= c_ISCHR
h.Typeflag = TypeChar
} else {
h.Mode |= c_ISBLK
h.Typeflag = TypeBlock
}
case fm&os.ModeNamedPipe != 0:
h.Typeflag = TypeFifo
h.Mode |= c_ISFIFO
case fm&os.ModeSocket != 0:
h.Mode |= c_ISSOCK
default:
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
}
if fm&os.ModeSetuid != 0 {
h.Mode |= c_ISUID
}
if fm&os.ModeSetgid != 0 {
h.Mode |= c_ISGID
}
if fm&os.ModeSticky != 0 {
h.Mode |= c_ISVTX
}
// If possible, populate additional fields from OS-specific
// FileInfo fields.
if sys, ok := fi.Sys().(*Header); ok {
// This FileInfo came from a Header (not the OS). Use the
// original Header to populate all remaining fields.
h.Uid = sys.Uid
h.Gid = sys.Gid
h.Uname = sys.Uname
h.Gname = sys.Gname
h.AccessTime = sys.AccessTime
h.ChangeTime = sys.ChangeTime
if sys.Xattrs != nil {
h.Xattrs = make(map[string]string)
for k, v := range sys.Xattrs {
h.Xattrs[k] = v
}
}
if sys.Typeflag == TypeLink {
// hard link
h.Typeflag = TypeLink
h.Size = 0
h.Linkname = sys.Linkname
}
}
if sysStat != nil {
return h, sysStat(fi, h)
}
return h, nil
}
var zeroBlock = make([]byte, blockSize)
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
// We compute and return both.
func checksum(header []byte) (unsigned int64, signed int64) {
for i := 0; i < len(header); i++ {
if i == 148 {
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
unsigned += ' ' * 8
signed += ' ' * 8
i += 7
continue
}
unsigned += int64(header[i])
signed += int64(int8(header[i]))
}
return
}
type slicer []byte
func (sp *slicer) next(n int) (b []byte) {
s := *sp
b, *sp = s[0:n], s[n:]
return
}
func isASCII(s string) bool {
for _, c := range s {
if c >= 0x80 {
return false
}
}
return true
}
func toASCII(s string) string {
if isASCII(s) {
return s
}
var buf bytes.Buffer
for _, c := range s {
if c < 0x80 {
buf.WriteByte(byte(c))
}
}
return buf.String()
}
// isHeaderOnlyType checks if the given type flag is of the type that has no
// data section even if a size is specified.
func isHeaderOnlyType(flag byte) bool {
switch flag {
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
return true
default:
return false
}
}

View file

@ -0,0 +1,996 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
// TODO(dsymonds):
// - pax extensions
import (
"bytes"
"errors"
"io"
"io/ioutil"
"math"
"os"
"strconv"
"strings"
"time"
)
var (
ErrHeader = errors.New("archive/tar: invalid tar header")
)
const maxNanoSecondIntSize = 9
// A Reader provides sequential access to the contents of a tar archive.
// A tar archive consists of a sequence of files.
// The Next method advances to the next file in the archive (including the first),
// and then it can be treated as an io.Reader to access the file's data.
type Reader struct {
r io.Reader
err error
pad int64 // amount of padding (ignored) after current file entry
curr numBytesReader // reader for current file entry
hdrBuff [blockSize]byte // buffer to use in readHeader
}
type parser struct {
err error // Last error seen
}
// A numBytesReader is an io.Reader with a numBytes method, returning the number
// of bytes remaining in the underlying encoded data.
type numBytesReader interface {
io.Reader
numBytes() int64
}
// A regFileReader is a numBytesReader for reading file data from a tar archive.
type regFileReader struct {
r io.Reader // underlying reader
nb int64 // number of unread bytes for current file entry
}
// A sparseFileReader is a numBytesReader for reading sparse file data from a
// tar archive.
type sparseFileReader struct {
rfr numBytesReader // Reads the sparse-encoded file data
sp []sparseEntry // The sparse map for the file
pos int64 // Keeps track of file position
total int64 // Total size of the file
}
// A sparseEntry holds a single entry in a sparse file's sparse map.
//
// Sparse files are represented using a series of sparseEntrys.
// Despite the name, a sparseEntry represents an actual data fragment that
// references data found in the underlying archive stream. All regions not
// covered by a sparseEntry are logically filled with zeros.
//
// For example, if the underlying raw file contains the 10-byte data:
// var compactData = "abcdefgh"
//
// And the sparse map has the following entries:
// var sp = []sparseEntry{
// {offset: 2, numBytes: 5} // Data fragment for [2..7]
// {offset: 18, numBytes: 3} // Data fragment for [18..21]
// }
//
// Then the content of the resulting sparse file with a "real" size of 25 is:
// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
type sparseEntry struct {
offset int64 // Starting position of the fragment
numBytes int64 // Length of the fragment
}
// Keywords for GNU sparse files in a PAX extended header
const (
paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
paxGNUSparseOffset = "GNU.sparse.offset"
paxGNUSparseNumBytes = "GNU.sparse.numbytes"
paxGNUSparseMap = "GNU.sparse.map"
paxGNUSparseName = "GNU.sparse.name"
paxGNUSparseMajor = "GNU.sparse.major"
paxGNUSparseMinor = "GNU.sparse.minor"
paxGNUSparseSize = "GNU.sparse.size"
paxGNUSparseRealSize = "GNU.sparse.realsize"
)
// Keywords for old GNU sparse headers
const (
oldGNUSparseMainHeaderOffset = 386
oldGNUSparseMainHeaderIsExtendedOffset = 482
oldGNUSparseMainHeaderNumEntries = 4
oldGNUSparseExtendedHeaderIsExtendedOffset = 504
oldGNUSparseExtendedHeaderNumEntries = 21
oldGNUSparseOffsetSize = 12
oldGNUSparseNumBytesSize = 12
)
// NewReader creates a new Reader reading from r.
func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
// Next advances to the next entry in the tar archive.
//
// io.EOF is returned at the end of the input.
func (tr *Reader) Next() (*Header, error) {
if tr.err != nil {
return nil, tr.err
}
var hdr *Header
var extHdrs map[string]string
// Externally, Next iterates through the tar archive as if it is a series of
// files. Internally, the tar format often uses fake "files" to add meta
// data that describes the next file. These meta data "files" should not
// normally be visible to the outside. As such, this loop iterates through
// one or more "header files" until it finds a "normal file".
loop:
for {
tr.err = tr.skipUnread()
if tr.err != nil {
return nil, tr.err
}
hdr = tr.readHeader()
if tr.err != nil {
return nil, tr.err
}
// Check for PAX/GNU special headers and files.
switch hdr.Typeflag {
case TypeXHeader:
extHdrs, tr.err = parsePAX(tr)
if tr.err != nil {
return nil, tr.err
}
continue loop // This is a meta header affecting the next header
case TypeGNULongName, TypeGNULongLink:
var realname []byte
realname, tr.err = ioutil.ReadAll(tr)
if tr.err != nil {
return nil, tr.err
}
// Convert GNU extensions to use PAX headers.
if extHdrs == nil {
extHdrs = make(map[string]string)
}
var p parser
switch hdr.Typeflag {
case TypeGNULongName:
extHdrs[paxPath] = p.parseString(realname)
case TypeGNULongLink:
extHdrs[paxLinkpath] = p.parseString(realname)
}
if p.err != nil {
tr.err = p.err
return nil, tr.err
}
continue loop // This is a meta header affecting the next header
default:
mergePAX(hdr, extHdrs)
// Check for a PAX format sparse file
sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
if err != nil {
tr.err = err
return nil, err
}
if sp != nil {
// Current file is a PAX format GNU sparse file.
// Set the current file reader to a sparse file reader.
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
if tr.err != nil {
return nil, tr.err
}
}
break loop // This is a file, so stop
}
}
return hdr, nil
}
// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
// be treated as a regular file.
func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
var sparseFormat string
// Check for sparse format indicators
major, majorOk := headers[paxGNUSparseMajor]
minor, minorOk := headers[paxGNUSparseMinor]
sparseName, sparseNameOk := headers[paxGNUSparseName]
_, sparseMapOk := headers[paxGNUSparseMap]
sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
// Identify which, if any, sparse format applies from which PAX headers are set
if majorOk && minorOk {
sparseFormat = major + "." + minor
} else if sparseNameOk && sparseMapOk {
sparseFormat = "0.1"
} else if sparseSizeOk {
sparseFormat = "0.0"
} else {
// Not a PAX format GNU sparse file.
return nil, nil
}
// Check for unknown sparse format
if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
return nil, nil
}
// Update hdr from GNU sparse PAX headers
if sparseNameOk {
hdr.Name = sparseName
}
if sparseSizeOk {
realSize, err := strconv.ParseInt(sparseSize, 10, 0)
if err != nil {
return nil, ErrHeader
}
hdr.Size = realSize
} else if sparseRealSizeOk {
realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
if err != nil {
return nil, ErrHeader
}
hdr.Size = realSize
}
// Set up the sparse map, according to the particular sparse format in use
var sp []sparseEntry
var err error
switch sparseFormat {
case "0.0", "0.1":
sp, err = readGNUSparseMap0x1(headers)
case "1.0":
sp, err = readGNUSparseMap1x0(tr.curr)
}
return sp, err
}
// mergePAX merges well known headers according to PAX standard.
// In general headers with the same name as those found
// in the header struct overwrite those found in the header
// struct with higher precision or longer values. Esp. useful
// for name and linkname fields.
func mergePAX(hdr *Header, headers map[string]string) error {
for k, v := range headers {
switch k {
case paxPath:
hdr.Name = v
case paxLinkpath:
hdr.Linkname = v
case paxGname:
hdr.Gname = v
case paxUname:
hdr.Uname = v
case paxUid:
uid, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Uid = int(uid)
case paxGid:
gid, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Gid = int(gid)
case paxAtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.AccessTime = t
case paxMtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.ModTime = t
case paxCtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.ChangeTime = t
case paxSize:
size, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Size = int64(size)
default:
if strings.HasPrefix(k, paxXattr) {
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
}
hdr.Xattrs[k[len(paxXattr):]] = v
} else if strings.HasPrefix(k, paxWindows) {
if hdr.Winheaders == nil {
hdr.Winheaders = make(map[string]string)
}
hdr.Winheaders[k[len(paxWindows):]] = v
}
}
}
return nil
}
// parsePAXTime takes a string of the form %d.%d as described in
// the PAX specification.
func parsePAXTime(t string) (time.Time, error) {
buf := []byte(t)
pos := bytes.IndexByte(buf, '.')
var seconds, nanoseconds int64
var err error
if pos == -1 {
seconds, err = strconv.ParseInt(t, 10, 0)
if err != nil {
return time.Time{}, err
}
} else {
seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
if err != nil {
return time.Time{}, err
}
nano_buf := string(buf[pos+1:])
// Pad as needed before converting to a decimal.
// For example .030 -> .030000000 -> 30000000 nanoseconds
if len(nano_buf) < maxNanoSecondIntSize {
// Right pad
nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
} else if len(nano_buf) > maxNanoSecondIntSize {
// Right truncate
nano_buf = nano_buf[:maxNanoSecondIntSize]
}
nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
if err != nil {
return time.Time{}, err
}
}
ts := time.Unix(seconds, nanoseconds)
return ts, nil
}
// parsePAX parses PAX headers.
// If an extended header (type 'x') is invalid, ErrHeader is returned
func parsePAX(r io.Reader) (map[string]string, error) {
buf, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
sbuf := string(buf)
// For GNU PAX sparse format 0.0 support.
// This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
var sparseMap bytes.Buffer
headers := make(map[string]string)
// Each record is constructed as
// "%d %s=%s\n", length, keyword, value
for len(sbuf) > 0 {
key, value, residual, err := parsePAXRecord(sbuf)
if err != nil {
return nil, ErrHeader
}
sbuf = residual
keyStr := string(key)
if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
// GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
sparseMap.WriteString(value)
sparseMap.Write([]byte{','})
} else {
// Normal key. Set the value in the headers map.
headers[keyStr] = string(value)
}
}
if sparseMap.Len() != 0 {
// Add sparse info to headers, chopping off the extra comma
sparseMap.Truncate(sparseMap.Len() - 1)
headers[paxGNUSparseMap] = sparseMap.String()
}
return headers, nil
}
// parsePAXRecord parses the input PAX record string into a key-value pair.
// If parsing is successful, it will slice off the currently read record and
// return the remainder as r.
//
// A PAX record is of the following form:
// "%d %s=%s\n" % (size, key, value)
func parsePAXRecord(s string) (k, v, r string, err error) {
// The size field ends at the first space.
sp := strings.IndexByte(s, ' ')
if sp == -1 {
return "", "", s, ErrHeader
}
// Parse the first token as a decimal integer.
n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
if perr != nil || n < 5 || int64(len(s)) < n {
return "", "", s, ErrHeader
}
// Extract everything between the space and the final newline.
rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
if nl != "\n" {
return "", "", s, ErrHeader
}
// The first equals separates the key from the value.
eq := strings.IndexByte(rec, '=')
if eq == -1 {
return "", "", s, ErrHeader
}
return rec[:eq], rec[eq+1:], rem, nil
}
// parseString parses bytes as a NUL-terminated C-style string.
// If a NUL byte is not found then the whole slice is returned as a string.
func (*parser) parseString(b []byte) string {
n := 0
for n < len(b) && b[n] != 0 {
n++
}
return string(b[0:n])
}
// parseNumeric parses the input as being encoded in either base-256 or octal.
// This function may return negative numbers.
// If parsing fails or an integer overflow occurs, err will be set.
func (p *parser) parseNumeric(b []byte) int64 {
// Check for base-256 (binary) format first.
// If the first bit is set, then all following bits constitute a two's
// complement encoded number in big-endian byte order.
if len(b) > 0 && b[0]&0x80 != 0 {
// Handling negative numbers relies on the following identity:
// -a-1 == ^a
//
// If the number is negative, we use an inversion mask to invert the
// data bytes and treat the value as an unsigned number.
var inv byte // 0x00 if positive or zero, 0xff if negative
if b[0]&0x40 != 0 {
inv = 0xff
}
var x uint64
for i, c := range b {
c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
if i == 0 {
c &= 0x7f // Ignore signal bit in first byte
}
if (x >> 56) > 0 {
p.err = ErrHeader // Integer overflow
return 0
}
x = x<<8 | uint64(c)
}
if (x >> 63) > 0 {
p.err = ErrHeader // Integer overflow
return 0
}
if inv == 0xff {
return ^int64(x)
}
return int64(x)
}
// Normal case is base-8 (octal) format.
return p.parseOctal(b)
}
func (p *parser) parseOctal(b []byte) int64 {
// Because unused fields are filled with NULs, we need
// to skip leading NULs. Fields may also be padded with
// spaces or NULs.
// So we remove leading and trailing NULs and spaces to
// be sure.
b = bytes.Trim(b, " \x00")
if len(b) == 0 {
return 0
}
x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
if perr != nil {
p.err = ErrHeader
}
return int64(x)
}
// skipUnread skips any unread bytes in the existing file entry, as well as any
// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
// encountered in the data portion; it is okay to hit io.EOF in the padding.
//
// Note that this function still works properly even when sparse files are being
// used since numBytes returns the bytes remaining in the underlying io.Reader.
func (tr *Reader) skipUnread() error {
dataSkip := tr.numBytes() // Number of data bytes to skip
totalSkip := dataSkip + tr.pad // Total number of bytes to skip
tr.curr, tr.pad = nil, 0
// If possible, Seek to the last byte before the end of the data section.
// Do this because Seek is often lazy about reporting errors; this will mask
// the fact that the tar stream may be truncated. We can rely on the
// io.CopyN done shortly afterwards to trigger any IO errors.
var seekSkipped int64 // Number of bytes skipped via Seek
if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
// Not all io.Seeker can actually Seek. For example, os.Stdin implements
// io.Seeker, but calling Seek always returns an error and performs
// no action. Thus, we try an innocent seek to the current position
// to see if Seek is really supported.
pos1, err := sr.Seek(0, os.SEEK_CUR)
if err == nil {
// Seek seems supported, so perform the real Seek.
pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR)
if err != nil {
tr.err = err
return tr.err
}
seekSkipped = pos2 - pos1
}
}
var copySkipped int64 // Number of bytes skipped via CopyN
copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip {
tr.err = io.ErrUnexpectedEOF
}
return tr.err
}
func (tr *Reader) verifyChecksum(header []byte) bool {
if tr.err != nil {
return false
}
var p parser
given := p.parseOctal(header[148:156])
unsigned, signed := checksum(header)
return p.err == nil && (given == unsigned || given == signed)
}
// readHeader reads the next block header and assumes that the underlying reader
// is already aligned to a block boundary.
//
// The err will be set to io.EOF only when one of the following occurs:
// * Exactly 0 bytes are read and EOF is hit.
// * Exactly 1 block of zeros is read and EOF is hit.
// * At least 2 blocks of zeros are read.
func (tr *Reader) readHeader() *Header {
header := tr.hdrBuff[:]
copy(header, zeroBlock)
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
return nil // io.EOF is okay here
}
// Two blocks of zero bytes marks the end of the archive.
if bytes.Equal(header, zeroBlock[0:blockSize]) {
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
return nil // io.EOF is okay here
}
if bytes.Equal(header, zeroBlock[0:blockSize]) {
tr.err = io.EOF
} else {
tr.err = ErrHeader // zero block and then non-zero block
}
return nil
}
if !tr.verifyChecksum(header) {
tr.err = ErrHeader
return nil
}
// Unpack
var p parser
hdr := new(Header)
s := slicer(header)
hdr.Name = p.parseString(s.next(100))
hdr.Mode = p.parseNumeric(s.next(8))
hdr.Uid = int(p.parseNumeric(s.next(8)))
hdr.Gid = int(p.parseNumeric(s.next(8)))
hdr.Size = p.parseNumeric(s.next(12))
hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0)
s.next(8) // chksum
hdr.Typeflag = s.next(1)[0]
hdr.Linkname = p.parseString(s.next(100))
// The remainder of the header depends on the value of magic.
// The original (v7) version of tar had no explicit magic field,
// so its magic bytes, like the rest of the block, are NULs.
magic := string(s.next(8)) // contains version field as well.
var format string
switch {
case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
if string(header[508:512]) == "tar\x00" {
format = "star"
} else {
format = "posix"
}
case magic == "ustar \x00": // old GNU tar
format = "gnu"
}
switch format {
case "posix", "gnu", "star":
hdr.Uname = p.parseString(s.next(32))
hdr.Gname = p.parseString(s.next(32))
devmajor := s.next(8)
devminor := s.next(8)
if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
hdr.Devmajor = p.parseNumeric(devmajor)
hdr.Devminor = p.parseNumeric(devminor)
}
var prefix string
switch format {
case "posix", "gnu":
prefix = p.parseString(s.next(155))
case "star":
prefix = p.parseString(s.next(131))
hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0)
hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0)
}
if len(prefix) > 0 {
hdr.Name = prefix + "/" + hdr.Name
}
}
if p.err != nil {
tr.err = p.err
return nil
}
nb := hdr.Size
if isHeaderOnlyType(hdr.Typeflag) {
nb = 0
}
if nb < 0 {
tr.err = ErrHeader
return nil
}
// Set the current file reader.
tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
tr.curr = &regFileReader{r: tr.r, nb: nb}
// Check for old GNU sparse format entry.
if hdr.Typeflag == TypeGNUSparse {
// Get the real size of the file.
hdr.Size = p.parseNumeric(header[483:495])
if p.err != nil {
tr.err = p.err
return nil
}
// Read the sparse map.
sp := tr.readOldGNUSparseMap(header)
if tr.err != nil {
return nil
}
// Current file is a GNU sparse file. Update the current file reader.
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
if tr.err != nil {
return nil
}
}
return hdr
}
// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
// then one or more extension headers are used to store the rest of the sparse map.
func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
var p parser
isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
spCap := oldGNUSparseMainHeaderNumEntries
if isExtended {
spCap += oldGNUSparseExtendedHeaderNumEntries
}
sp := make([]sparseEntry, 0, spCap)
s := slicer(header[oldGNUSparseMainHeaderOffset:])
// Read the four entries from the main tar header
for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
if p.err != nil {
tr.err = p.err
return nil
}
if offset == 0 && numBytes == 0 {
break
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
for isExtended {
// There are more entries. Read an extension header and parse its entries.
sparseHeader := make([]byte, blockSize)
if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
return nil
}
isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
s = slicer(sparseHeader)
for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
if p.err != nil {
tr.err = p.err
return nil
}
if offset == 0 && numBytes == 0 {
break
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
}
return sp
}
// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
// version 1.0. The format of the sparse map consists of a series of
// newline-terminated numeric fields. The first field is the number of entries
// and is always present. Following this are the entries, consisting of two
// fields (offset, numBytes). This function must stop reading at the end
// boundary of the block containing the last newline.
//
// Note that the GNU manual says that numeric values should be encoded in octal
// format. However, the GNU tar utility itself outputs these values in decimal.
// As such, this library treats values as being encoded in decimal.
func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
var cntNewline int64
var buf bytes.Buffer
var blk = make([]byte, blockSize)
// feedTokens copies data in numBlock chunks from r into buf until there are
// at least cnt newlines in buf. It will not read more blocks than needed.
var feedTokens = func(cnt int64) error {
for cntNewline < cnt {
if _, err := io.ReadFull(r, blk); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return err
}
buf.Write(blk)
for _, c := range blk {
if c == '\n' {
cntNewline++
}
}
}
return nil
}
// nextToken gets the next token delimited by a newline. This assumes that
// at least one newline exists in the buffer.
var nextToken = func() string {
cntNewline--
tok, _ := buf.ReadString('\n')
return tok[:len(tok)-1] // Cut off newline
}
// Parse for the number of entries.
// Use integer overflow resistant math to check this.
if err := feedTokens(1); err != nil {
return nil, err
}
numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
return nil, ErrHeader
}
// Parse for all member entries.
// numEntries is trusted after this since a potential attacker must have
// committed resources proportional to what this library used.
if err := feedTokens(2 * numEntries); err != nil {
return nil, err
}
sp := make([]sparseEntry, 0, numEntries)
for i := int64(0); i < numEntries; i++ {
offset, err := strconv.ParseInt(nextToken(), 10, 64)
if err != nil {
return nil, ErrHeader
}
numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
if err != nil {
return nil, ErrHeader
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
return sp, nil
}
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
// version 0.1. The sparse map is stored in the PAX headers.
func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
// Get number of entries.
// Use integer overflow resistant math to check this.
numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
return nil, ErrHeader
}
// There should be two numbers in sparseMap for each entry.
sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
if int64(len(sparseMap)) != 2*numEntries {
return nil, ErrHeader
}
// Loop through the entries in the sparse map.
// numEntries is trusted now.
sp := make([]sparseEntry, 0, numEntries)
for i := int64(0); i < numEntries; i++ {
offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
if err != nil {
return nil, ErrHeader
}
numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
if err != nil {
return nil, ErrHeader
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
return sp, nil
}
// numBytes returns the number of bytes left to read in the current file's entry
// in the tar archive, or 0 if there is no current file.
func (tr *Reader) numBytes() int64 {
if tr.curr == nil {
// No current file, so no bytes
return 0
}
return tr.curr.numBytes()
}
// Read reads from the current entry in the tar archive.
// It returns 0, io.EOF when it reaches the end of that entry,
// until Next is called to advance to the next entry.
//
// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
// the Header.Size claims.
func (tr *Reader) Read(b []byte) (n int, err error) {
if tr.err != nil {
return 0, tr.err
}
if tr.curr == nil {
return 0, io.EOF
}
n, err = tr.curr.Read(b)
if err != nil && err != io.EOF {
tr.err = err
}
return
}
func (rfr *regFileReader) Read(b []byte) (n int, err error) {
if rfr.nb == 0 {
// file consumed
return 0, io.EOF
}
if int64(len(b)) > rfr.nb {
b = b[0:rfr.nb]
}
n, err = rfr.r.Read(b)
rfr.nb -= int64(n)
if err == io.EOF && rfr.nb > 0 {
err = io.ErrUnexpectedEOF
}
return
}
// numBytes returns the number of bytes left to read in the file's data in the tar archive.
func (rfr *regFileReader) numBytes() int64 {
return rfr.nb
}
// newSparseFileReader creates a new sparseFileReader, but validates all of the
// sparse entries before doing so.
func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
if total < 0 {
return nil, ErrHeader // Total size cannot be negative
}
// Validate all sparse entries. These are the same checks as performed by
// the BSD tar utility.
for i, s := range sp {
switch {
case s.offset < 0 || s.numBytes < 0:
return nil, ErrHeader // Negative values are never okay
case s.offset > math.MaxInt64-s.numBytes:
return nil, ErrHeader // Integer overflow with large length
case s.offset+s.numBytes > total:
return nil, ErrHeader // Region extends beyond the "real" size
case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
return nil, ErrHeader // Regions can't overlap and must be in order
}
}
return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
}
// readHole reads a sparse hole ending at endOffset.
func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
n64 := endOffset - sfr.pos
if n64 > int64(len(b)) {
n64 = int64(len(b))
}
n := int(n64)
for i := 0; i < n; i++ {
b[i] = 0
}
sfr.pos += n64
return n
}
// Read reads the sparse file data in expanded form.
func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
// Skip past all empty fragments.
for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
sfr.sp = sfr.sp[1:]
}
// If there are no more fragments, then it is possible that there
// is one last sparse hole.
if len(sfr.sp) == 0 {
// This behavior matches the BSD tar utility.
// However, GNU tar stops returning data even if sfr.total is unmet.
if sfr.pos < sfr.total {
return sfr.readHole(b, sfr.total), nil
}
return 0, io.EOF
}
// In front of a data fragment, so read a hole.
if sfr.pos < sfr.sp[0].offset {
return sfr.readHole(b, sfr.sp[0].offset), nil
}
// In a data fragment, so read from it.
// This math is overflow free since we verify that offset and numBytes can
// be safely added when creating the sparseFileReader.
endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
bytesLeft := endPos - sfr.pos // Bytes left in fragment
if int64(len(b)) > bytesLeft {
b = b[:bytesLeft]
}
n, err = sfr.rfr.Read(b)
sfr.pos += int64(n)
if err == io.EOF {
if sfr.pos < endPos {
err = io.ErrUnexpectedEOF // There was supposed to be more data
} else if sfr.pos < sfr.total {
err = nil // There is still an implicit sparse hole at the end
}
}
if sfr.pos == endPos {
sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
}
return n, err
}
// numBytes returns the number of bytes left to read in the sparse file's
// sparse-encoded data in the tar archive.
func (sfr *sparseFileReader) numBytes() int64 {
return sfr.rfr.numBytes()
}

View file

@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux dragonfly openbsd solaris
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atim.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctim.Unix())
}

View file

@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd netbsd
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atimespec.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctimespec.Unix())
}

View file

@ -0,0 +1,32 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
package tar
import (
"os"
"syscall"
)
func init() {
sysStat = statUnix
}
func statUnix(fi os.FileInfo, h *Header) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
h.Uid = int(sys.Uid)
h.Gid = int(sys.Gid)
// TODO(bradfitz): populate username & group. os/user
// doesn't cache LookupId lookups, and lacks group
// lookup functions.
h.AccessTime = statAtime(sys)
h.ChangeTime = statCtime(sys)
// TODO(bradfitz): major/minor device numbers?
return nil
}

View file

@ -0,0 +1,419 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
// TODO(dsymonds):
// - catch more errors (no first header, etc.)
import (
"bytes"
"errors"
"fmt"
"io"
"path"
"sort"
"strconv"
"strings"
"time"
)
var (
ErrWriteTooLong = errors.New("archive/tar: write too long")
ErrFieldTooLong = errors.New("archive/tar: header field too long")
ErrWriteAfterClose = errors.New("archive/tar: write after close")
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
)
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
// A tar archive consists of a sequence of files.
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
// writing at most hdr.Size bytes in total.
type Writer struct {
w io.Writer
err error
nb int64 // number of unwritten bytes for current file entry
pad int64 // amount of padding to write after current file entry
closed bool
usedBinary bool // whether the binary numeric field extension was used
preferPax bool // use pax header instead of binary numeric header
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
}
type formatter struct {
err error // Last error seen
}
// NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
// Flush finishes writing the current file (optional).
func (tw *Writer) Flush() error {
if tw.nb > 0 {
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
return tw.err
}
n := tw.nb + tw.pad
for n > 0 && tw.err == nil {
nr := n
if nr > blockSize {
nr = blockSize
}
var nw int
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
n -= int64(nw)
}
tw.nb = 0
tw.pad = 0
return tw.err
}
// Write s into b, terminating it with a NUL if there is room.
func (f *formatter) formatString(b []byte, s string) {
if len(s) > len(b) {
f.err = ErrFieldTooLong
return
}
ascii := toASCII(s)
copy(b, ascii)
if len(ascii) < len(b) {
b[len(ascii)] = 0
}
}
// Encode x as an octal ASCII string and write it into b with leading zeros.
func (f *formatter) formatOctal(b []byte, x int64) {
s := strconv.FormatInt(x, 8)
// leading zeros, but leave room for a NUL.
for len(s)+1 < len(b) {
s = "0" + s
}
f.formatString(b, s)
}
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
// encoding. Unlike octal encoding, base-256 encoding does not require that the
// string ends with a NUL character. Thus, all n bytes are available for output.
//
// If operating in binary mode, this assumes strict GNU binary mode; which means
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
// equivalent to the sign bit in two's complement form.
func fitsInBase256(n int, x int64) bool {
var binBits = uint(n-1) * 8
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
}
// Write x into b, as binary (GNUtar/star extension).
func (f *formatter) formatNumeric(b []byte, x int64) {
if fitsInBase256(len(b), x) {
for i := len(b) - 1; i >= 0; i-- {
b[i] = byte(x)
x >>= 8
}
b[0] |= 0x80 // Highest bit indicates binary format
return
}
f.formatOctal(b, 0) // Last resort, just write zero
f.err = ErrFieldTooLong
}
var (
minTime = time.Unix(0, 0)
// There is room for 11 octal digits (33 bits) of mtime.
maxTime = minTime.Add((1<<33 - 1) * time.Second)
)
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
func (tw *Writer) WriteHeader(hdr *Header) error {
return tw.writeHeader(hdr, true)
}
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
// As this method is called internally by writePax header to allow it to
// suppress writing the pax header.
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
if tw.closed {
return ErrWriteAfterClose
}
if tw.err == nil {
tw.Flush()
}
if tw.err != nil {
return tw.err
}
// a map to hold pax header records, if any are needed
paxHeaders := make(map[string]string)
// TODO(shanemhansen): we might want to use PAX headers for
// subsecond time resolution, but for now let's just capture
// too long fields or non ascii characters
var f formatter
var header []byte
// We need to select which scratch buffer to use carefully,
// since this method is called recursively to write PAX headers.
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
// already being used by the non-recursive call, so we must use paxHdrBuff.
header = tw.hdrBuff[:]
if !allowPax {
header = tw.paxHdrBuff[:]
}
copy(header, zeroBlock)
s := slicer(header)
// Wrappers around formatter that automatically sets paxHeaders if the
// argument extends beyond the capacity of the input byte slice.
var formatString = func(b []byte, s string, paxKeyword string) {
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
if needsPaxHeader {
paxHeaders[paxKeyword] = s
return
}
f.formatString(b, s)
}
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
// Try octal first.
s := strconv.FormatInt(x, 8)
if len(s) < len(b) {
f.formatOctal(b, x)
return
}
// If it is too long for octal, and PAX is preferred, use a PAX header.
if paxKeyword != paxNone && tw.preferPax {
f.formatOctal(b, 0)
s := strconv.FormatInt(x, 10)
paxHeaders[paxKeyword] = s
return
}
tw.usedBinary = true
f.formatNumeric(b, x)
}
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
pathHeaderBytes := s.next(fileNameSize)
formatString(pathHeaderBytes, hdr.Name, paxPath)
// Handle out of range ModTime carefully.
var modTime int64
if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
modTime = hdr.ModTime.Unix()
}
f.formatOctal(s.next(8), hdr.Mode) // 100:108
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
formatNumeric(s.next(12), modTime, paxNone) // 136:148 --- consider using pax for finer granularity
s.next(8) // chksum (148:156)
s.next(1)[0] = hdr.Typeflag // 156:157
formatString(s.next(100), hdr.Linkname, paxLinkpath)
copy(s.next(8), []byte("ustar\x0000")) // 257:265
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
prefixHeaderBytes := s.next(155)
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
if tw.usedBinary {
copy(header[257:265], []byte("ustar \x00"))
}
_, paxPathUsed := paxHeaders[paxPath]
// try to use a ustar header when only the name is too long
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
prefix, suffix, ok := splitUSTARPath(hdr.Name)
if ok {
// Since we can encode in USTAR format, disable PAX header.
delete(paxHeaders, paxPath)
// Update the path fields
formatString(pathHeaderBytes, suffix, paxNone)
formatString(prefixHeaderBytes, prefix, paxNone)
}
}
// The chksum field is terminated by a NUL and a space.
// This is different from the other octal fields.
chksum, _ := checksum(header)
f.formatOctal(header[148:155], chksum) // Never fails
header[155] = ' '
// Check if there were any formatting errors.
if f.err != nil {
tw.err = f.err
return tw.err
}
if allowPax {
for k, v := range hdr.Xattrs {
paxHeaders[paxXattr+k] = v
}
for k, v := range hdr.Winheaders {
paxHeaders[paxWindows+k] = v
}
}
if len(paxHeaders) > 0 {
if !allowPax {
return errInvalidHeader
}
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
return err
}
}
tw.nb = int64(hdr.Size)
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
_, tw.err = tw.w.Write(header)
return tw.err
}
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
// If the path is not splittable, then it will return ("", "", false).
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
length := len(name)
if length <= fileNameSize || !isASCII(name) {
return "", "", false
} else if length > fileNamePrefixSize+1 {
length = fileNamePrefixSize + 1
} else if name[length-1] == '/' {
length--
}
i := strings.LastIndex(name[:length], "/")
nlen := len(name) - i - 1 // nlen is length of suffix
plen := i // plen is length of prefix
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
return "", "", false
}
return name[:i], name[i+1:], true
}
// writePaxHeader writes an extended pax header to the
// archive.
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
// Prepare extended header
ext := new(Header)
ext.Typeflag = TypeXHeader
// Setting ModTime is required for reader parsing to
// succeed, and seems harmless enough.
ext.ModTime = hdr.ModTime
// The spec asks that we namespace our pseudo files
// with the current pid. However, this results in differing outputs
// for identical inputs. As such, the constant 0 is now used instead.
// golang.org/issue/12358
dir, file := path.Split(hdr.Name)
fullName := path.Join(dir, "PaxHeaders.0", file)
ascii := toASCII(fullName)
if len(ascii) > 100 {
ascii = ascii[:100]
}
ext.Name = ascii
// Construct the body
var buf bytes.Buffer
// Keys are sorted before writing to body to allow deterministic output.
var keys []string
for k := range paxHeaders {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
}
ext.Size = int64(len(buf.Bytes()))
if err := tw.writeHeader(ext, false); err != nil {
return err
}
if _, err := tw.Write(buf.Bytes()); err != nil {
return err
}
if err := tw.Flush(); err != nil {
return err
}
return nil
}
// formatPAXRecord formats a single PAX record, prefixing it with the
// appropriate length.
func formatPAXRecord(k, v string) string {
const padding = 3 // Extra padding for ' ', '=', and '\n'
size := len(k) + len(v) + padding
size += len(strconv.Itoa(size))
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
// Final adjustment if adding size field increased the record size.
if len(record) != size {
size = len(record)
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
}
return record
}
// Write writes to the current entry in the tar archive.
// Write returns the error ErrWriteTooLong if more than
// hdr.Size bytes are written after WriteHeader.
func (tw *Writer) Write(b []byte) (n int, err error) {
if tw.closed {
err = ErrWriteAfterClose
return
}
overwrite := false
if int64(len(b)) > tw.nb {
b = b[0:tw.nb]
overwrite = true
}
n, err = tw.w.Write(b)
tw.nb -= int64(n)
if err == nil && overwrite {
err = ErrWriteTooLong
return
}
tw.err = err
return
}
// Close closes the tar archive, flushing any unwritten
// data to the underlying writer.
func (tw *Writer) Close() error {
if tw.err != nil || tw.closed {
return tw.err
}
tw.Flush()
tw.closed = true
if tw.err != nil {
return tw.err
}
// trailer: two zero blocks
for i := 0; i < 2; i++ {
_, tw.err = tw.w.Write(zeroBlock)
if tw.err != nil {
break
}
}
return tw.err
}

362
vendor/github.com/Microsoft/go-winio/backuptar/tar.go generated vendored Normal file
View file

@ -0,0 +1,362 @@
package backuptar
import (
"errors"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
)
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
const (
hdrFileAttributes = "fileattr"
hdrAccessTime = "accesstime"
hdrChangeTime = "changetime"
hdrCreateTime = "createtime"
hdrWriteTime = "writetime"
hdrSecurityDescriptor = "sd"
hdrMountPoint = "mountpoint"
)
func writeZeroes(w io.Writer, count int64) error {
buf := make([]byte, 8192)
c := len(buf)
for i := int64(0); i < count; i += int64(c) {
if int64(c) > count-i {
c = int(count - i)
}
_, err := w.Write(buf[:c])
if err != nil {
return err
}
}
return nil
}
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
curOffset := int64(0)
for {
bhdr, err := br.Next()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
return err
}
if bhdr.Id != winio.BackupSparseBlock {
return fmt.Errorf("unexpected stream %d", bhdr.Id)
}
// archive/tar does not support writing sparse files
// so just write zeroes to catch up to the current offset.
err = writeZeroes(t, bhdr.Offset-curOffset)
if bhdr.Size == 0 {
break
}
n, err := io.Copy(t, br)
if err != nil {
return err
}
curOffset = bhdr.Offset + n
}
return nil
}
func win32TimeFromTar(key string, hdrs map[string]string, unixTime time.Time) syscall.Filetime {
if s, ok := hdrs[key]; ok {
n, err := strconv.ParseUint(s, 10, 64)
if err == nil {
return syscall.Filetime{uint32(n & 0xffffffff), uint32(n >> 32)}
}
}
return syscall.NsecToFiletime(unixTime.UnixNano())
}
func win32TimeToTar(ft syscall.Filetime) (string, time.Time) {
return fmt.Sprintf("%d", uint64(ft.LowDateTime)+(uint64(ft.HighDateTime)<<32)), time.Unix(0, ft.Nanoseconds())
}
// Writes a file to a tar writer using data from a Win32 backup stream.
//
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
//
// The additional Win32 metadata is:
//
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
//
// MSWINDOWS.accesstime: The last access time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.createtime: The creation time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.changetime: The creation time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.writetime: The creation time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.sd: The Win32 security descriptor, in SDDL (string) format
//
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
name = filepath.ToSlash(name)
hdr := &tar.Header{
Name: name,
Size: size,
Typeflag: tar.TypeReg,
Winheaders: make(map[string]string),
}
hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
hdr.Winheaders[hdrAccessTime], hdr.AccessTime = win32TimeToTar(fileInfo.LastAccessTime)
hdr.Winheaders[hdrChangeTime], hdr.ChangeTime = win32TimeToTar(fileInfo.ChangeTime)
hdr.Winheaders[hdrCreateTime], _ = win32TimeToTar(fileInfo.CreationTime)
hdr.Winheaders[hdrWriteTime], hdr.ModTime = win32TimeToTar(fileInfo.LastWriteTime)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
hdr.Mode |= c_ISDIR
hdr.Size = 0
hdr.Typeflag = tar.TypeDir
}
br := winio.NewBackupStreamReader(r)
var dataHdr *winio.BackupHeader
for dataHdr == nil {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupData:
hdr.Mode |= c_ISREG
dataHdr = bhdr
case winio.BackupSecurity:
sd, err := ioutil.ReadAll(br)
if err != nil {
return err
}
sddl, err := winio.SecurityDescriptorToSddl(sd)
if err != nil {
return err
}
hdr.Winheaders[hdrSecurityDescriptor] = sddl
case winio.BackupReparseData:
hdr.Mode |= c_ISLNK
hdr.Typeflag = tar.TypeSymlink
reparseBuffer, err := ioutil.ReadAll(br)
rp, err := winio.DecodeReparsePoint(reparseBuffer)
if err != nil {
return err
}
if rp.IsMountPoint {
hdr.Winheaders[hdrMountPoint] = "1"
}
hdr.Linkname = rp.Target
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
}
}
err := t.WriteHeader(hdr)
if err != nil {
return err
}
if dataHdr != nil {
// A data stream was found. Copy the data.
if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
if size != dataHdr.Size {
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
} else {
err = copySparse(t, br)
if err != nil {
return err
}
}
}
// Look for streams after the data stream. The only ones we handle are alternate data streams.
// Other streams may have metadata that could be serialized, but the tar header has already
// been written. In practice, this means that we don't get EA or TXF metadata.
for {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupAlternateData:
altName := bhdr.Name
if strings.HasSuffix(altName, ":$DATA") {
altName = altName[:len(altName)-len(":$DATA")]
}
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
hdr = &tar.Header{
Name: name + altName,
Mode: hdr.Mode,
Typeflag: tar.TypeReg,
Size: bhdr.Size,
ModTime: hdr.ModTime,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
err = t.WriteHeader(hdr)
if err != nil {
return err
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
} else {
// Unsupported for now, since the size of the alternate stream is not present
// in the backup stream until after the data has been read.
return errors.New("tar of sparse alternate data streams is unsupported")
}
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
}
}
return nil
}
// Retrieves basic Win32 file information from a tar header, using the additional metadata written by
// WriteTarFileFromBackupStream.
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
name = hdr.Name
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
size = hdr.Size
}
fileInfo = &winio.FileBasicInfo{
LastAccessTime: win32TimeFromTar(hdrAccessTime, hdr.Winheaders, hdr.AccessTime),
LastWriteTime: win32TimeFromTar(hdrWriteTime, hdr.Winheaders, hdr.ModTime),
ChangeTime: win32TimeFromTar(hdrChangeTime, hdr.Winheaders, hdr.ChangeTime),
CreationTime: win32TimeFromTar(hdrCreateTime, hdr.Winheaders, hdr.ModTime),
}
if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
attr, err := strconv.ParseUint(attrStr, 10, 32)
if err != nil {
return "", 0, nil, err
}
fileInfo.FileAttributes = uintptr(attr)
} else {
if hdr.Typeflag == tar.TypeDir {
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
}
}
return
}
// Writes a Win32 backup stream from the current tar file. Since this function may process multiple
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
// tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
bw := winio.NewBackupStreamWriter(w)
if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
sd, err := winio.SddlToSecurityDescriptor(sddl)
if err != nil {
return nil, err
}
bhdr := winio.BackupHeader{
Id: winio.BackupSecurity,
Size: int64(len(sd)),
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(sd)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeSymlink {
_, isMountPoint := hdr.Winheaders[hdrMountPoint]
rp := winio.ReparsePoint{
Target: hdr.Linkname,
IsMountPoint: isMountPoint,
}
reparse := winio.EncodeReparsePoint(&rp)
bhdr := winio.BackupHeader{
Id: winio.BackupReparseData,
Size: int64(len(reparse)),
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(reparse)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
bhdr := winio.BackupHeader{
Id: winio.BackupData,
Size: hdr.Size,
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
// Copy all the alternate data streams and return the next non-ADS header.
for {
ahdr, err := t.Next()
if err != nil {
return nil, err
}
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
return ahdr, nil
}
bhdr := winio.BackupHeader{
Id: winio.BackupAlternateData,
Size: ahdr.Size,
Name: ahdr.Name[len(hdr.Name)+1:] + ":$DATA",
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
}

21
vendor/github.com/Microsoft/hcsshim/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

28
vendor/github.com/Microsoft/hcsshim/activatelayer.go generated vendored Normal file
View file

@ -0,0 +1,28 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// ActivateLayer will find the layer with the given id and mount it's filesystem.
// For a read/write layer, the mounted filesystem will appear as a volume on the
// host, while a read-only layer is generally expected to be a no-op.
// An activated layer must later be deactivated via DeactivateLayer.
func ActivateLayer(info DriverInfo, id string) error {
title := "hcsshim::ActivateLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = activateLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+" - succeeded id=%s flavour=%d", id, info.Flavour)
return nil
}

172
vendor/github.com/Microsoft/hcsshim/baselayer.go generated vendored Normal file
View file

@ -0,0 +1,172 @@
package hcsshim
import (
"errors"
"os"
"path/filepath"
"syscall"
"github.com/Microsoft/go-winio"
)
type baseLayerWriter struct {
root string
f *os.File
bw *winio.BackupFileWriter
err error
hasUtilityVM bool
dirInfo []dirInfo
}
type dirInfo struct {
path string
fileInfo winio.FileBasicInfo
}
func (w *baseLayerWriter) closeCurrentFile() error {
if w.f != nil {
err := w.bw.Close()
err2 := w.f.Close()
w.f = nil
w.bw = nil
if err != nil {
return err
}
if err2 != nil {
return err2
}
}
return nil
}
func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) {
defer func() {
if err != nil {
w.err = err
}
}()
err = w.closeCurrentFile()
if err != nil {
return err
}
if filepath.ToSlash(name) == `UtilityVM/Files` {
w.hasUtilityVM = true
}
path := filepath.Join(w.root, name)
path, err = makeLongAbsPath(path)
if err != nil {
return err
}
var f *os.File
defer func() {
if f != nil {
f.Close()
}
}()
createmode := uint32(syscall.CREATE_NEW)
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
err := os.Mkdir(path, 0)
if err != nil && !os.IsExist(err) {
return err
}
createmode = syscall.OPEN_EXISTING
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
w.dirInfo = append(w.dirInfo, dirInfo{path, *fileInfo})
}
}
mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY)
f, err = winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createmode)
if err != nil {
return makeError(err, "Failed to OpenForBackup", path)
}
err = winio.SetFileBasicInfo(f, fileInfo)
if err != nil {
return makeError(err, "Failed to SetFileBasicInfo", path)
}
w.f = f
w.bw = winio.NewBackupFileWriter(f, true)
f = nil
return nil
}
func (w *baseLayerWriter) AddLink(name string, target string) (err error) {
defer func() {
if err != nil {
w.err = err
}
}()
err = w.closeCurrentFile()
if err != nil {
return err
}
linkpath, err := makeLongAbsPath(filepath.Join(w.root, name))
if err != nil {
return err
}
linktarget, err := makeLongAbsPath(filepath.Join(w.root, target))
if err != nil {
return err
}
return os.Link(linktarget, linkpath)
}
func (w *baseLayerWriter) Remove(name string) error {
return errors.New("base layer cannot have tombstones")
}
func (w *baseLayerWriter) Write(b []byte) (int, error) {
n, err := w.bw.Write(b)
if err != nil {
w.err = err
}
return n, err
}
func (w *baseLayerWriter) Close() error {
err := w.closeCurrentFile()
if err != nil {
return err
}
if w.err == nil {
// Restore the file times of all the directories, since they may have
// been modified by creating child directories.
for i := range w.dirInfo {
di := &w.dirInfo[len(w.dirInfo)-i-1]
f, err := winio.OpenForBackup(di.path, uint32(syscall.GENERIC_READ|syscall.GENERIC_WRITE), syscall.FILE_SHARE_READ, syscall.OPEN_EXISTING)
if err != nil {
return makeError(err, "Failed to OpenForBackup", di.path)
}
err = winio.SetFileBasicInfo(f, &di.fileInfo)
f.Close()
if err != nil {
return makeError(err, "Failed to SetFileBasicInfo", di.path)
}
}
err = ProcessBaseLayer(w.root)
if err != nil {
return err
}
if w.hasUtilityVM {
err = ProcessUtilityVMImage(filepath.Join(w.root, "UtilityVM"))
if err != nil {
return err
}
}
}
return w.err
}

79
vendor/github.com/Microsoft/hcsshim/callback.go generated vendored Normal file
View file

@ -0,0 +1,79 @@
package hcsshim
import (
"sync"
"syscall"
)
var (
nextCallback uintptr
callbackMap = map[uintptr]*notifcationWatcherContext{}
callbackMapLock = sync.RWMutex{}
notificationWatcherCallback = syscall.NewCallback(notificationWatcher)
// Notifications for HCS_SYSTEM handles
hcsNotificationSystemExited hcsNotification = 0x00000001
hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
// Notifications for HCS_PROCESS handles
hcsNotificationProcessExited hcsNotification = 0x00010000
// Common notifications
hcsNotificationInvalid hcsNotification = 0x00000000
hcsNotificationServiceDisconnect hcsNotification = 0x01000000
)
type hcsNotification uint32
type notificationChannel chan error
type notifcationWatcherContext struct {
channels notificationChannels
handle hcsCallback
}
type notificationChannels map[hcsNotification]notificationChannel
func newChannels() notificationChannels {
channels := make(notificationChannels)
channels[hcsNotificationSystemExited] = make(notificationChannel, 1)
channels[hcsNotificationSystemCreateCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1)
channels[hcsNotificationProcessExited] = make(notificationChannel, 1)
channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1)
return channels
}
func closeChannels(channels notificationChannels) {
close(channels[hcsNotificationSystemExited])
close(channels[hcsNotificationSystemCreateCompleted])
close(channels[hcsNotificationSystemStartCompleted])
close(channels[hcsNotificationSystemPauseCompleted])
close(channels[hcsNotificationSystemResumeCompleted])
close(channels[hcsNotificationProcessExited])
close(channels[hcsNotificationServiceDisconnect])
}
func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr {
var result error
if int32(notificationStatus) < 0 {
result = syscall.Errno(win32FromHresult(notificationStatus))
}
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return 0
}
context.channels[notificationType] <- result
return 0
}

7
vendor/github.com/Microsoft/hcsshim/cgo.go generated vendored Normal file
View file

@ -0,0 +1,7 @@
package hcsshim
import "C"
// This import is needed to make the library compile as CGO because HCSSHIM
// only works with CGO due to callbacks from HCS comming back from a C thread
// which is not supported without CGO. See https://github.com/golang/go/issues/10973

565
vendor/github.com/Microsoft/hcsshim/container.go generated vendored Normal file
View file

@ -0,0 +1,565 @@
package hcsshim
import (
"encoding/json"
"runtime"
"syscall"
"time"
"github.com/Sirupsen/logrus"
)
var (
defaultTimeout = time.Minute * 4
)
const (
pendingUpdatesQuery = `{ "PropertyTypes" : ["PendingUpdates"]}`
statisticsQuery = `{ "PropertyTypes" : ["Statistics"]}`
processListQuery = `{ "PropertyTypes" : ["ProcessList"]}`
)
type container struct {
handle hcsSystem
id string
callbackNumber uintptr
}
type containerProperties struct {
ID string `json:"Id"`
Name string
SystemType string
Owner string
SiloGUID string `json:"SiloGuid,omitempty"`
IsDummy bool `json:",omitempty"`
RuntimeID string `json:"RuntimeId,omitempty"`
Stopped bool `json:",omitempty"`
ExitType string `json:",omitempty"`
AreUpdatesPending bool `json:",omitempty"`
ObRoot string `json:",omitempty"`
Statistics Statistics `json:",omitempty"`
ProcessList []ProcessListItem `json:",omitempty"`
}
// MemoryStats holds the memory statistics for a container
type MemoryStats struct {
UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"`
UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"`
UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"`
}
// ProcessorStats holds the processor statistics for a container
type ProcessorStats struct {
TotalRuntime100ns uint64 `json:",omitempty"`
RuntimeUser100ns uint64 `json:",omitempty"`
RuntimeKernel100ns uint64 `json:",omitempty"`
}
// StorageStats holds the storage statistics for a container
type StorageStats struct {
ReadCountNormalized uint64 `json:",omitempty"`
ReadSizeBytes uint64 `json:",omitempty"`
WriteCountNormalized uint64 `json:",omitempty"`
WriteSizeBytes uint64 `json:",omitempty"`
}
// NetworkStats holds the network statistics for a container
type NetworkStats struct {
BytesReceived uint64 `json:",omitempty"`
BytesSent uint64 `json:",omitempty"`
PacketsReceived uint64 `json:",omitempty"`
PacketsSent uint64 `json:",omitempty"`
DroppedPacketsIncoming uint64 `json:",omitempty"`
DroppedPacketsOutgoing uint64 `json:",omitempty"`
EndpointId string `json:",omitempty"`
InstanceId string `json:",omitempty"`
}
// Statistics is the structure returned by a statistics call on a container
type Statistics struct {
Timestamp time.Time `json:",omitempty"`
ContainerStartTime time.Time `json:",omitempty"`
Uptime100ns uint64 `json:",omitempty"`
Memory MemoryStats `json:",omitempty"`
Processor ProcessorStats `json:",omitempty"`
Storage StorageStats `json:",omitempty"`
Network []NetworkStats `json:",omitempty"`
}
// ProcessList is the structure of an item returned by a ProcessList call on a container
type ProcessListItem struct {
CreateTimestamp time.Time `json:",omitempty"`
ImageName string `json:",omitempty"`
KernelTime100ns uint64 `json:",omitempty"`
MemoryCommitBytes uint64 `json:",omitempty"`
MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"`
MemoryWorkingSetSharedBytes uint64 `json:",omitempty"`
ProcessId uint32 `json:",omitempty"`
UserTime100ns uint64 `json:",omitempty"`
}
// CreateContainer creates a new container with the given configuration but does not start it.
func CreateContainer(id string, c *ContainerConfig) (Container, error) {
operation := "CreateContainer"
title := "HCSShim::" + operation
container := &container{
id: id,
}
configurationb, err := json.Marshal(c)
if err != nil {
return nil, err
}
configuration := string(configurationb)
logrus.Debugf(title+" id=%s config=%s", id, configuration)
var (
resultp *uint16
createError error
)
if hcsCallbacksSupported {
var identity syscall.Handle
createError = hcsCreateComputeSystem(id, configuration, identity, &container.handle, &resultp)
if createError == nil || IsPending(createError) {
if err := container.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
}
} else {
createError = hcsCreateComputeSystemTP5(id, configuration, &container.handle, &resultp)
}
err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout)
if err != nil {
return nil, makeContainerError(container, operation, configuration, err)
}
logrus.Debugf(title+" succeeded id=%s handle=%d", id, container.handle)
runtime.SetFinalizer(container, closeContainer)
return container, nil
}
// OpenContainer opens an existing container by ID.
func OpenContainer(id string) (Container, error) {
operation := "OpenContainer"
title := "HCSShim::" + operation
logrus.Debugf(title+" id=%s", id)
container := &container{
id: id,
}
var (
handle hcsSystem
resultp *uint16
)
err := hcsOpenComputeSystem(id, &handle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
container.handle = handle
logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle)
runtime.SetFinalizer(container, closeContainer)
return container, nil
}
// Start synchronously starts the container.
func (container *container) Start() error {
operation := "Start"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
var resultp *uint16
err := hcsStartComputeSystemTP5(container.handle, nil, &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemStartCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Shutdown requests a container shutdown, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (container *container) Shutdown() error {
operation := "Shutdown"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
var resultp *uint16
err := hcsShutdownComputeSystemTP5(container.handle, nil, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Terminate requests a container terminate, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (container *container) Terminate() error {
operation := "Terminate"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
var resultp *uint16
err := hcsTerminateComputeSystemTP5(container.handle, nil, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Wait synchronously waits for the container to shutdown or terminate.
func (container *container) Wait() error {
operation := "Wait"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if hcsCallbacksSupported {
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil {
return makeContainerError(container, operation, "", err)
}
} else {
_, err := container.waitTimeoutInternal(syscall.INFINITE)
if err != nil {
return makeContainerError(container, operation, "", err)
}
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
func (container *container) waitTimeoutInternal(timeout uint32) (bool, error) {
return waitTimeoutInternalHelper(container, timeout)
}
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse.
// If the timeout expires, IsTimeout(err) == true
func (container *container) WaitTimeout(timeout time.Duration) error {
operation := "WaitTimeout"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if hcsCallbacksSupported {
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, &timeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
} else {
finished, err := waitTimeoutHelper(container, timeout)
if !finished {
err = ErrTimeout
}
if err != nil {
return makeContainerError(container, operation, "", err)
}
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
func (container *container) hcsWait(timeout uint32) (bool, error) {
var (
resultp *uint16
exitEvent syscall.Handle
)
err := hcsCreateComputeSystemWait(container.handle, &exitEvent, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return false, err
}
defer syscall.CloseHandle(exitEvent)
return waitForSingleObject(exitEvent, timeout)
}
func (container *container) properties(query string) (*containerProperties, error) {
var (
resultp *uint16
propertiesp *uint16
)
err := hcsGetComputeSystemProperties(container.handle, query, &propertiesp, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, err
}
if propertiesp == nil {
return nil, ErrUnexpectedValue
}
propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
properties := &containerProperties{}
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, err
}
return properties, nil
}
// HasPendingUpdates returns true if the container has updates pending to install
func (container *container) HasPendingUpdates() (bool, error) {
operation := "HasPendingUpdates"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
properties, err := container.properties(pendingUpdatesQuery)
if err != nil {
return false, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.AreUpdatesPending, nil
}
// Statistics returns statistics for the container
func (container *container) Statistics() (Statistics, error) {
operation := "Statistics"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
properties, err := container.properties(statisticsQuery)
if err != nil {
return Statistics{}, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.Statistics, nil
}
// ProcessList returns an array of ProcessListItems for the container
func (container *container) ProcessList() ([]ProcessListItem, error) {
operation := "ProcessList"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
properties, err := container.properties(processListQuery)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.ProcessList, nil
}
// Pause pauses the execution of the container. This feature is not enabled in TP5.
func (container *container) Pause() error {
operation := "Pause"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
var resultp *uint16
err := hcsPauseComputeSystemTP5(container.handle, nil, &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemPauseCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Resume resumes the execution of the container. This feature is not enabled in TP5.
func (container *container) Resume() error {
operation := "Resume"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
var (
resultp *uint16
)
err := hcsResumeComputeSystemTP5(container.handle, nil, &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemResumeCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// CreateProcess launches a new process within the container.
func (container *container) CreateProcess(c *ProcessConfig) (Process, error) {
operation := "CreateProcess"
title := "HCSShim::Container::" + operation
var (
processInfo hcsProcessInformation
processHandle hcsProcess
resultp *uint16
)
// If we are not emulating a console, ignore any console size passed to us
if !c.EmulateConsole {
c.ConsoleSize[0] = 0
c.ConsoleSize[1] = 0
}
configurationb, err := json.Marshal(c)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
configuration := string(configurationb)
logrus.Debugf(title+" id=%s config=%s", container.id, configuration)
err = hcsCreateProcess(container.handle, configuration, &processInfo, &processHandle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, makeContainerError(container, operation, configuration, err)
}
process := &process{
handle: processHandle,
processID: int(processInfo.ProcessId),
container: container,
cachedPipes: &cachedPipes{
stdIn: processInfo.StdInput,
stdOut: processInfo.StdOutput,
stdErr: processInfo.StdError,
},
}
if hcsCallbacksSupported {
if err := process.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
}
logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID)
runtime.SetFinalizer(process, closeProcess)
return process, nil
}
// OpenProcess gets an interface to an existing process within the container.
func (container *container) OpenProcess(pid int) (Process, error) {
operation := "OpenProcess"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s, processid=%d", container.id, pid)
var (
processHandle hcsProcess
resultp *uint16
)
err := hcsOpenProcess(container.handle, uint32(pid), &processHandle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
process := &process{
handle: processHandle,
processID: pid,
container: container,
}
if hcsCallbacksSupported {
if err := process.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
}
logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID)
runtime.SetFinalizer(process, closeProcess)
return process, nil
}
// Close cleans up any state associated with the container but does not terminate or wait for it.
func (container *container) Close() error {
operation := "Close"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
// Don't double free this
if container.handle == 0 {
return nil
}
if hcsCallbacksSupported {
if err := container.unregisterCallback(); err != nil {
return makeContainerError(container, operation, "", err)
}
}
if err := hcsCloseComputeSystem(container.handle); err != nil {
return makeContainerError(container, operation, "", err)
}
container.handle = 0
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// closeContainer wraps container.Close for use by a finalizer
func closeContainer(container *container) {
container.Close()
}
func (container *container) registerCallback() error {
context := &notifcationWatcherContext{
channels: newChannels(),
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = context
callbackMapLock.Unlock()
var callbackHandle hcsCallback
err := hcsRegisterComputeSystemCallback(container.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
if err != nil {
return err
}
context.handle = callbackHandle
container.callbackNumber = callbackNumber
return nil
}
func (container *container) unregisterCallback() error {
callbackNumber := container.callbackNumber
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return nil
}
handle := context.handle
if handle == 0 {
return nil
}
// hcsUnregisterComputeSystemCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := hcsUnregisterComputeSystemCallback(handle)
if err != nil {
return err
}
closeChannels(context.channels)
callbackMapLock.Lock()
callbackMap[callbackNumber] = nil
callbackMapLock.Unlock()
handle = 0
return nil
}

27
vendor/github.com/Microsoft/hcsshim/createlayer.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// CreateLayer creates a new, empty, read-only layer on the filesystem based on
// the parent layer provided.
func CreateLayer(info DriverInfo, id, parent string) error {
title := "hcsshim::CreateLayer "
logrus.Debugf(title+"Flavour %d ID %s parent %s", info.Flavour, id, parent)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = createLayer(&infop, id, parent)
if err != nil {
err = makeErrorf(err, title, "id=%s parent=%s flavour=%d", id, parent, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+" - succeeded id=%s parent=%s flavour=%d", id, parent, info.Flavour)
return nil
}

View file

@ -0,0 +1,35 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// CreateSandboxLayer creates and populates new read-write layer for use by a container.
// This requires both the id of the direct parent layer, as well as the full list
// of paths to all parent layers up to the base (and including the direct parent
// whose id was provided).
func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
title := "hcsshim::CreateSandboxLayer "
logrus.Debugf(title+"layerId %s parentId %s", layerId, parentId)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = createSandboxLayer(&infop, layerId, parentId, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s parentId=%s", layerId, parentId)
logrus.Error(err)
return err
}
logrus.Debugf(title+"- succeeded layerId=%s parentId=%s", layerId, parentId)
return nil
}

26
vendor/github.com/Microsoft/hcsshim/deactivatelayer.go generated vendored Normal file
View file

@ -0,0 +1,26 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
func DeactivateLayer(info DriverInfo, id string) error {
title := "hcsshim::DeactivateLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = deactivateLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
return nil
}

27
vendor/github.com/Microsoft/hcsshim/destroylayer.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// DestroyLayer will remove the on-disk files representing the layer with the given
// id, including that layer's containing folder, if any.
func DestroyLayer(info DriverInfo, id string) error {
title := "hcsshim::DestroyLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = destroyLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
return nil
}

190
vendor/github.com/Microsoft/hcsshim/errors.go generated vendored Normal file
View file

@ -0,0 +1,190 @@
package hcsshim
import (
"errors"
"fmt"
"syscall"
)
var (
// ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists
ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e)
// ErrElementNotFound is an error encountered when the object being referenced does not exist
ErrElementNotFound = syscall.Errno(0x490)
// ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed
ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed")
// ErrInvalidNotificationType is an error encountered when an invalid notification type is used
ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type")
// ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation
ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation")
// ErrTimeout is an error encountered when waiting on a notification times out
ErrTimeout = errors.New("hcsshim: timeout waiting for notification")
// ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for
// a different expected notification
ErrUnexpectedContainerExit = errors.New("unexpected container exit")
// ErrUnexpectedProcessAbort is the error encountered when communication with the compute service
// is lost while waiting for a notification
ErrUnexpectedProcessAbort = errors.New("lost communication with compute service")
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
// ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously
ErrVmcomputeOperationPending = syscall.Errno(0xC0370103)
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105)
// ErrProcNotFound is an error encountered when the the process cannot be found
ErrProcNotFound = syscall.Errno(0x7f)
)
// ProcessError is an error encountered in HCS during an operation on a Process object
type ProcessError struct {
Process *process
Operation string
ExtraInfo string
Err error
}
// ContainerError is an error encountered in HCS during an operation on a Container object
type ContainerError struct {
Container *container
Operation string
ExtraInfo string
Err error
}
func (e *ContainerError) Error() string {
if e == nil {
return "<nil>"
}
if e.Container == nil {
return "unexpected nil container for error: " + e.Err.Error()
}
s := "container " + e.Container.id
if e.Operation != "" {
s += " encountered an error during " + e.Operation
}
if e.Err != nil {
s += fmt.Sprintf(" failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err))
}
if e.ExtraInfo != "" {
s += " extra info: " + e.ExtraInfo
}
return s
}
func makeContainerError(container *container, operation string, extraInfo string, err error) error {
// Don't double wrap errors
if _, ok := err.(*ContainerError); ok {
return err
}
containerError := &ContainerError{Container: container, Operation: operation, ExtraInfo: extraInfo, Err: err}
return containerError
}
func (e *ProcessError) Error() string {
if e == nil {
return "<nil>"
}
if e.Process == nil {
return "Unexpected nil process for error: " + e.Err.Error()
}
s := fmt.Sprintf("process %d", e.Process.processID)
if e.Process.container != nil {
s += " in container " + e.Process.container.id
}
if e.Operation != "" {
s += " " + e.Operation
}
switch e.Err.(type) {
case nil:
break
case syscall.Errno:
s += fmt.Sprintf(" failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err))
default:
s += fmt.Sprintf(" failed: %s", e.Error())
}
return s
}
func makeProcessError(process *process, operation string, extraInfo string, err error) error {
// Don't double wrap errors
if _, ok := err.(*ProcessError); ok {
return err
}
processError := &ProcessError{Process: process, Operation: operation, ExtraInfo: extraInfo, Err: err}
return processError
}
// IsNotExist checks if an error is caused by the Container or Process not existing.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
func IsNotExist(err error) bool {
err = getInnerError(err)
return err == ErrComputeSystemDoesNotExist ||
err == ErrElementNotFound ||
err == ErrProcNotFound
}
// IsPending returns a boolean indicating whether the error is that
// the requested operation is being completed in the background.
func IsPending(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeOperationPending
}
// IsTimeout returns a boolean indicating whether the error is caused by
// a timeout waiting for the operation to complete.
func IsTimeout(err error) bool {
err = getInnerError(err)
return err == ErrTimeout
}
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
// a Container or Process being already stopped.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
func IsAlreadyStopped(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeAlreadyStopped ||
err == ErrElementNotFound ||
err == ErrProcNotFound
}
func getInnerError(err error) error {
switch pe := err.(type) {
case nil:
return nil
case *ContainerError:
err = pe.Err
case *ProcessError:
err = pe.Err
}
return err
}

View file

@ -0,0 +1,26 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// ExpandSandboxSize expands the size of a layer to at least size bytes.
func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error {
title := "hcsshim::ExpandSandboxSize "
logrus.Debugf(title+"layerId=%s size=%d", layerId, size)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = expandSandboxSize(&infop, layerId, size)
if err != nil {
err = makeErrorf(err, title, "layerId=%s size=%d", layerId, size)
logrus.Error(err)
return err
}
logrus.Debugf(title+"- succeeded layerId=%s size=%d", layerId, size)
return nil
}

158
vendor/github.com/Microsoft/hcsshim/exportlayer.go generated vendored Normal file
View file

@ -0,0 +1,158 @@
package hcsshim
import (
"io"
"io/ioutil"
"os"
"runtime"
"syscall"
"github.com/Microsoft/go-winio"
"github.com/Sirupsen/logrus"
)
// ExportLayer will create a folder at exportFolderPath and fill that folder with
// the transport format version of the layer identified by layerId. This transport
// format includes any metadata required for later importing the layer (using
// ImportLayer), and requires the full list of parent layer paths in order to
// perform the export.
func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error {
title := "hcsshim::ExportLayer "
logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerId, exportFolderPath)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = exportLayer(&infop, layerId, exportFolderPath, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerId, info.Flavour, exportFolderPath)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerId, exportFolderPath)
return nil
}
type LayerReader interface {
Next() (string, int64, *winio.FileBasicInfo, error)
Read(b []byte) (int, error)
Close() error
}
// FilterLayerReader provides an interface for extracting the contents of an on-disk layer.
type FilterLayerReader struct {
context uintptr
}
// Next reads the next available file from a layer, ensuring that parent directories are always read
// before child files and directories.
//
// Next returns the file's relative path, size, and basic file metadata. Read() should be used to
// extract a Win32 backup stream with the remainder of the metadata and the data.
func (r *FilterLayerReader) Next() (string, int64, *winio.FileBasicInfo, error) {
var fileNamep *uint16
fileInfo := &winio.FileBasicInfo{}
var deleted uint32
var fileSize int64
err := exportLayerNext(r.context, &fileNamep, fileInfo, &fileSize, &deleted)
if err != nil {
if err == syscall.ERROR_NO_MORE_FILES {
err = io.EOF
} else {
err = makeError(err, "ExportLayerNext", "")
}
return "", 0, nil, err
}
fileName := convertAndFreeCoTaskMemString(fileNamep)
if deleted != 0 {
fileInfo = nil
}
if fileName[0] == '\\' {
fileName = fileName[1:]
}
return fileName, fileSize, fileInfo, nil
}
// Read reads from the current file's Win32 backup stream.
func (r *FilterLayerReader) Read(b []byte) (int, error) {
var bytesRead uint32
err := exportLayerRead(r.context, b, &bytesRead)
if err != nil {
return 0, makeError(err, "ExportLayerRead", "")
}
if bytesRead == 0 {
return 0, io.EOF
}
return int(bytesRead), nil
}
// Close frees resources associated with the layer reader. It will return an
// error if there was an error while reading the layer or of the layer was not
// completely read.
func (r *FilterLayerReader) Close() (err error) {
if r.context != 0 {
err = exportLayerEnd(r.context)
if err != nil {
err = makeError(err, "ExportLayerEnd", "")
}
r.context = 0
}
return
}
// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer.
// The caller must have taken the SeBackupPrivilege privilege
// to call this and any methods on the resulting LayerReader.
func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) {
if procExportLayerBegin.Find() != nil {
// The new layer reader is not available on this Windows build. Fall back to the
// legacy export code path.
path, err := ioutil.TempDir("", "hcs")
if err != nil {
return nil, err
}
err = ExportLayer(info, layerID, path, parentLayerPaths)
if err != nil {
os.RemoveAll(path)
return nil, err
}
return &legacyLayerReaderWrapper{newLegacyLayerReader(path)}, nil
}
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return nil, err
}
infop, err := convertDriverInfo(info)
if err != nil {
return nil, err
}
r := &FilterLayerReader{}
err = exportLayerBegin(&infop, layerID, layers, &r.context)
if err != nil {
return nil, makeError(err, "ExportLayerBegin", "")
}
runtime.SetFinalizer(r, func(r *FilterLayerReader) { r.Close() })
return r, err
}
type legacyLayerReaderWrapper struct {
*legacyLayerReader
}
func (r *legacyLayerReaderWrapper) Close() error {
err := r.legacyLayerReader.Close()
os.RemoveAll(r.root)
return err
}

View file

@ -0,0 +1,55 @@
package hcsshim
import (
"syscall"
"github.com/Sirupsen/logrus"
)
// GetLayerMountPath will look for a mounted layer with the given id and return
// the path at which that layer can be accessed. This path may be a volume path
// if the layer is a mounted read-write layer, otherwise it is expected to be the
// folder path at which the layer is stored.
func GetLayerMountPath(info DriverInfo, id string) (string, error) {
title := "hcsshim::GetLayerMountPath "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return "", err
}
var mountPathLength uintptr
mountPathLength = 0
// Call the procedure itself.
logrus.Debugf("Calling proc (1)")
err = getLayerMountPath(&infop, id, &mountPathLength, nil)
if err != nil {
err = makeErrorf(err, title, "(first call) id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return "", err
}
// Allocate a mount path of the returned length.
if mountPathLength == 0 {
return "", nil
}
mountPathp := make([]uint16, mountPathLength)
mountPathp[0] = 0
// Call the procedure again
logrus.Debugf("Calling proc (2)")
err = getLayerMountPath(&infop, id, &mountPathLength, &mountPathp[0])
if err != nil {
err = makeErrorf(err, title, "(second call) id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return "", err
}
path := syscall.UTF16ToString(mountPathp[0:])
logrus.Debugf(title+"succeeded flavour=%d id=%s path=%s", info.Flavour, id, path)
return path, nil
}

View file

@ -0,0 +1,22 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// GetSharedBaseImages will enumerate the images stored in the common central
// image store and return descriptive info about those images for the purpose
// of registering them with the graphdriver, graph, and tagstore.
func GetSharedBaseImages() (imageData string, err error) {
title := "hcsshim::GetSharedBaseImages "
logrus.Debugf("Calling proc")
var buffer *uint16
err = getBaseImages(&buffer)
if err != nil {
err = makeError(err, title, "")
logrus.Error(err)
return
}
imageData = convertAndFreeCoTaskMemString(buffer)
logrus.Debugf(title+" - succeeded output=%s", imageData)
return
}

19
vendor/github.com/Microsoft/hcsshim/guid.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
package hcsshim
import (
"crypto/sha1"
"fmt"
)
type GUID [16]byte
func NewGUID(source string) *GUID {
h := sha1.Sum([]byte(source))
var g GUID
copy(g[0:], h[0:16])
return &g
}
func (g *GUID) ToString() string {
return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:])
}

183
vendor/github.com/Microsoft/hcsshim/hcsshim.go generated vendored Normal file
View file

@ -0,0 +1,183 @@
// Shim for the Host Compute Service (HSC) to manage Windows Server
// containers and Hyper-V containers.
package hcsshim
import (
"fmt"
"syscall"
"unsafe"
"github.com/Sirupsen/logrus"
)
//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go
//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer?
//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer?
//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer?
//sys createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer?
//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize?
//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer?
//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer?
//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer?
//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath?
//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages?
//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer?
//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists?
//sys nameToGuid(name string, guid *GUID) (hr error) = vmcompute.NameToGuid?
//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer?
//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer?
//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage?
//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage?
//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin?
//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext?
//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite?
//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd?
//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin?
//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext?
//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead?
//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd?
//sys createComputeSystem(id string, configuration string) (hr error) = vmcompute.CreateComputeSystem?
//sys createProcessWithStdHandlesInComputeSystem(id string, paramsJson string, pid *uint32, stdin *syscall.Handle, stdout *syscall.Handle, stderr *syscall.Handle) (hr error) = vmcompute.CreateProcessWithStdHandlesInComputeSystem?
//sys resizeConsoleInComputeSystem(id string, pid uint32, height uint16, width uint16, flags uint32) (hr error) = vmcompute.ResizeConsoleInComputeSystem?
//sys shutdownComputeSystem(id string, timeout uint32) (hr error) = vmcompute.ShutdownComputeSystem?
//sys startComputeSystem(id string) (hr error) = vmcompute.StartComputeSystem?
//sys terminateComputeSystem(id string) (hr error) = vmcompute.TerminateComputeSystem?
//sys terminateProcessInComputeSystem(id string, pid uint32) (hr error) = vmcompute.TerminateProcessInComputeSystem?
//sys waitForProcessInComputeSystem(id string, pid uint32, timeout uint32, exitCode *uint32) (hr error) = vmcompute.WaitForProcessInComputeSystem?
//sys getComputeSystemProperties(id string, flags uint32, properties **uint16) (hr error) = vmcompute.GetComputeSystemProperties?
//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems?
//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem?
//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem?
//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
//sys hcsCreateComputeSystemWait(computeSystem hcsSystem, exitEvent *syscall.Handle, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystemWait?
//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess?
//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess?
//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess?
//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo?
//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties?
//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess?
//sys hcsCreateProcessWait(process hcsProcess, settings *syscall.Handle, result **uint16) (hr error) = vmcompute.HcsCreateProcessWait?
//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties?
//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings?
//sys hcsCreateComputeSystemTP5(id string, configuration string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
//sys hcsStartComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
//sys hcsShutdownComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
//sys hcsTerminateComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
//sys hcsPauseComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
//sys hcsResumeComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback?
//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback?
//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall?
const (
// Specific user-visible exit codes
WaitErrExecFailed = 32767
ERROR_GEN_FAILURE = syscall.Errno(31)
ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115)
WSAEINVAL = syscall.Errno(10022)
// Timeout on wait calls
TimeoutInfinite = 0xFFFFFFFF
)
type HcsError struct {
title string
rest string
Err error
}
type hcsSystem syscall.Handle
type hcsProcess syscall.Handle
type hcsCallback syscall.Handle
type hcsProcessInformation struct {
ProcessId uint32
Reserved uint32
StdInput syscall.Handle
StdOutput syscall.Handle
StdError syscall.Handle
}
func makeError(err error, title, rest string) error {
// Pass through DLL errors directly since they do not originate from HCS.
if _, ok := err.(*syscall.DLLError); ok {
return err
}
return &HcsError{title, rest, err}
}
func makeErrorf(err error, title, format string, a ...interface{}) error {
return makeError(err, title, fmt.Sprintf(format, a...))
}
func win32FromError(err error) uint32 {
if herr, ok := err.(*HcsError); ok {
return win32FromError(herr.Err)
}
if code, ok := err.(syscall.Errno); ok {
return uint32(code)
}
return uint32(ERROR_GEN_FAILURE)
}
func win32FromHresult(hr uintptr) uintptr {
if hr&0x1fff0000 == 0x00070000 {
return hr & 0xffff
}
return hr
}
func (e *HcsError) Error() string {
s := e.title
if len(s) > 0 && s[len(s)-1] != ' ' {
s += " "
}
s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err))
if e.rest != "" {
if e.rest[0] != ' ' {
s += " "
}
s += e.rest
}
return s
}
func convertAndFreeCoTaskMemString(buffer *uint16) string {
str := syscall.UTF16ToString((*[1 << 30]uint16)(unsafe.Pointer(buffer))[:])
coTaskMemFree(unsafe.Pointer(buffer))
return str
}
func convertAndFreeCoTaskMemBytes(buffer *uint16) []byte {
return []byte(convertAndFreeCoTaskMemString(buffer))
}
func processHcsResult(err error, resultp *uint16) error {
if resultp != nil {
result := convertAndFreeCoTaskMemString(resultp)
logrus.Debugf("Result: %s", result)
}
return err
}

152
vendor/github.com/Microsoft/hcsshim/hnsfuncs.go generated vendored Normal file
View file

@ -0,0 +1,152 @@
package hcsshim
import (
"encoding/json"
"fmt"
"net"
"github.com/Sirupsen/logrus"
)
type NatPolicy struct {
Type string
Protocol string
InternalPort uint16
ExternalPort uint16
}
type QosPolicy struct {
Type string
MaximumOutgoingBandwidthInBytes uint64
}
type VlanPolicy struct {
Type string
VLAN uint
}
type VsidPolicy struct {
Type string
VSID uint
}
// Subnet is assoicated with a network and represents a list
// of subnets available to the network
type Subnet struct {
AddressPrefix string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
}
// MacPool is assoicated with a network and represents a list
// of macaddresses available to the network
type MacPool struct {
StartMacAddress string `json:",omitempty"`
EndMacAddress string `json:",omitempty"`
}
// HNSNetwork represents a network in HNS
type HNSNetwork struct {
Id string `json:",omitempty"`
Name string `json:",omitempty"`
Type string `json:",omitempty"`
NetworkAdapterName string `json:",omitempty"`
SourceMac string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
MacPools []MacPool `json:",omitempty"`
Subnets []Subnet `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
DNSServerCompartment uint32 `json:",omitempty"`
}
// HNSEndpoint represents a network endpoint in HNS
type HNSEndpoint struct {
Id string `json:",omitempty"`
Name string `json:",omitempty"`
VirtualNetwork string `json:",omitempty"`
VirtualNetworkName string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
MacAddress string `json:",omitempty"`
IPAddress net.IP `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
EnableInternalDNS bool `json:",omitempty"`
PrefixLength uint8 `json:",omitempty"`
}
type hnsNetworkResponse struct {
Success bool
Error string
Output HNSNetwork
}
type hnsResponse struct {
Success bool
Error string
Output json.RawMessage
}
func hnsCall(method, path, request string, returnResponse interface{}) error {
var responseBuffer *uint16
err := _hnsCall(method, path, request, &responseBuffer)
if err != nil {
return makeError(err, "hnsCall ", "")
}
response := convertAndFreeCoTaskMemString(responseBuffer)
hnsresponse := &hnsResponse{}
if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil {
return err
}
if !hnsresponse.Success {
return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error)
}
if len(hnsresponse.Output) == 0 {
return nil
}
logrus.Debugf("Network Response : %s", hnsresponse.Output)
err = json.Unmarshal(hnsresponse.Output, returnResponse)
if err != nil {
return err
}
return nil
}
// HNSNetworkRequest makes a call into HNS to update/query a single network
func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) {
var network HNSNetwork
err := hnsCall(method, "/networks/"+path, request, &network)
if err != nil {
return nil, err
}
return &network, nil
}
// HNSListNetworkRequest makes a HNS call to query the list of available networks
func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) {
var network []HNSNetwork
err := hnsCall(method, "/networks/"+path, request, &network)
if err != nil {
return nil, err
}
return network, nil
}
// HNSEndpointRequest makes a HNS call to modify/query a network endpoint
func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) {
endpoint := &HNSEndpoint{}
err := hnsCall(method, "/endpoints/"+path, request, &endpoint)
if err != nil {
return nil, err
}
return endpoint, nil
}

193
vendor/github.com/Microsoft/hcsshim/importlayer.go generated vendored Normal file
View file

@ -0,0 +1,193 @@
package hcsshim
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/Microsoft/go-winio"
"github.com/Sirupsen/logrus"
)
// ImportLayer will take the contents of the folder at importFolderPath and import
// that into a layer with the id layerId. Note that in order to correctly populate
// the layer and interperet the transport format, all parent layers must already
// be present on the system at the paths provided in parentLayerPaths.
func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error {
title := "hcsshim::ImportLayer "
logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerID, importFolderPath)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = importLayer(&infop, layerID, importFolderPath, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerID, info.Flavour, importFolderPath)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerID, importFolderPath)
return nil
}
// LayerWriter is an interface that supports writing a new container image layer.
type LayerWriter interface {
// Add adds a file to the layer with given metadata.
Add(name string, fileInfo *winio.FileBasicInfo) error
// AddLink adds a hard link to the layer. The target must already have been added.
AddLink(name string, target string) error
// Remove removes a file that was present in a parent layer from the layer.
Remove(name string) error
// Write writes data to the current file. The data must be in the format of a Win32
// backup stream.
Write(b []byte) (int, error)
// Close finishes the layer writing process and releases any resources.
Close() error
}
// FilterLayerWriter provides an interface to write the contents of a layer to the file system.
type FilterLayerWriter struct {
context uintptr
}
// Add adds a file or directory to the layer. The file's parent directory must have already been added.
//
// name contains the file's relative path. fileInfo contains file times and file attributes; the rest
// of the file metadata and the file data must be written as a Win32 backup stream to the Write() method.
// winio.BackupStreamWriter can be used to facilitate this.
func (w *FilterLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
if name[0] != '\\' {
name = `\` + name
}
err := importLayerNext(w.context, name, fileInfo)
if err != nil {
return makeError(err, "ImportLayerNext", "")
}
return nil
}
// AddLink adds a hard link to the layer. The target of the link must have already been added.
func (w *FilterLayerWriter) AddLink(name string, target string) error {
return errors.New("hard links not yet supported")
}
// Remove removes a file from the layer. The file must have been present in the parent layer.
//
// name contains the file's relative path.
func (w *FilterLayerWriter) Remove(name string) error {
if name[0] != '\\' {
name = `\` + name
}
err := importLayerNext(w.context, name, nil)
if err != nil {
return makeError(err, "ImportLayerNext", "")
}
return nil
}
// Write writes more backup stream data to the current file.
func (w *FilterLayerWriter) Write(b []byte) (int, error) {
err := importLayerWrite(w.context, b)
if err != nil {
err = makeError(err, "ImportLayerWrite", "")
return 0, err
}
return len(b), err
}
// Close completes the layer write operation. The error must be checked to ensure that the
// operation was successful.
func (w *FilterLayerWriter) Close() (err error) {
if w.context != 0 {
err = importLayerEnd(w.context)
if err != nil {
err = makeError(err, "ImportLayerEnd", "")
}
w.context = 0
}
return
}
type legacyLayerWriterWrapper struct {
*legacyLayerWriter
info DriverInfo
layerID string
path string
parentLayerPaths []string
}
func (r *legacyLayerWriterWrapper) Close() error {
err := r.legacyLayerWriter.Close()
if err == nil {
var fullPath string
// Use the original path here because ImportLayer does not support long paths for the source in TP5.
// But do use a long path for the destination to work around another bug with directories
// with MAX_PATH - 12 < length < MAX_PATH.
info := r.info
fullPath, err = makeLongAbsPath(filepath.Join(info.HomeDir, r.layerID))
if err == nil {
info.HomeDir = ""
err = ImportLayer(info, fullPath, r.path, r.parentLayerPaths)
}
}
os.RemoveAll(r.root)
return err
}
// NewLayerWriter returns a new layer writer for creating a layer on disk.
// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges
// to call this and any methods on the resulting LayerWriter.
func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) {
if len(parentLayerPaths) == 0 {
// This is a base layer. It gets imported differently.
return &baseLayerWriter{
root: filepath.Join(info.HomeDir, layerID),
}, nil
}
if procImportLayerBegin.Find() != nil {
// The new layer reader is not available on this Windows build. Fall back to the
// legacy export code path.
path, err := ioutil.TempDir("", "hcs")
if err != nil {
return nil, err
}
return &legacyLayerWriterWrapper{
legacyLayerWriter: newLegacyLayerWriter(path),
info: info,
layerID: layerID,
path: path,
parentLayerPaths: parentLayerPaths,
}, nil
}
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return nil, err
}
infop, err := convertDriverInfo(info)
if err != nil {
return nil, err
}
w := &FilterLayerWriter{}
err = importLayerBegin(&infop, layerID, layers, &w.context)
if err != nil {
return nil, makeError(err, "ImportLayerStart", "")
}
runtime.SetFinalizer(w, func(w *FilterLayerWriter) { w.Close() })
return w, nil
}

146
vendor/github.com/Microsoft/hcsshim/interface.go generated vendored Normal file
View file

@ -0,0 +1,146 @@
package hcsshim
import (
"io"
"time"
)
// ProcessConfig is used as both the input of Container.CreateProcess
// and to convert the parameters to JSON for passing onto the HCS
type ProcessConfig struct {
ApplicationName string
CommandLine string
WorkingDirectory string
Environment map[string]string
EmulateConsole bool
CreateStdInPipe bool
CreateStdOutPipe bool
CreateStdErrPipe bool
ConsoleSize [2]uint
}
type Layer struct {
ID string
Path string
}
type MappedDir struct {
HostPath string
ContainerPath string
ReadOnly bool
BandwidthMaximum uint64
IOPSMaximum uint64
}
type HvRuntime struct {
ImagePath string `json:",omitempty"`
SkipTemplate bool `json:",omitempty"`
}
// ContainerConfig is used as both the input of CreateContainer
// and to convert the parameters to JSON for passing onto the HCS
type ContainerConfig struct {
SystemType string // HCS requires this to be hard-coded to "Container"
Name string // Name of the container. We use the docker ID.
Owner string // The management platform that created this container
IsDummy bool // Used for development purposes.
VolumePath string // Windows volume path for scratch space
IgnoreFlushesDuringBoot bool // Optimization hint for container startup in Windows
LayerFolderPath string // Where the layer folders are located
Layers []Layer // List of storage layers
Credentials string `json:",omitempty"` // Credentials information
ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container.
ProcessorWeight uint64 `json:",omitempty"` // CPU Shares 0..10000 on Windows; where 0 will be omitted and HCS will default.
ProcessorMaximum int64 `json:",omitempty"` // CPU maximum usage percent 1..100
StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS
StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second
StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller
MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes
HostName string // Hostname
MappedDirectories []MappedDir // List of mapped directories (volumes/mounts)
SandboxPath string // Location of unmounted sandbox (used for Hyper-V containers)
HvPartition bool // True if it a Hyper-V Container
EndpointList []string // List of networking endpoints to be attached to container
HvRuntime *HvRuntime // Hyper-V container settings
Servicing bool // True if this container is for servicing
AllowUnqualifiedDNSQuery bool // True to allow unqualified DNS name resolution
}
// Container represents a created (but not necessarily running) container.
type Container interface {
// Start synchronously starts the container.
Start() error
// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds.
Shutdown() error
// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds.
Terminate() error
// Waits synchronously waits for the container to shutdown or terminate.
Wait() error
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It
// returns false if timeout occurs.
WaitTimeout(time.Duration) error
// Pause pauses the execution of a container.
Pause() error
// Resume resumes the execution of a container.
Resume() error
// HasPendingUpdates returns true if the container has updates pending to install.
HasPendingUpdates() (bool, error)
// Statistics returns statistics for a container.
Statistics() (Statistics, error)
// ProcessList returns details for the processes in a container.
ProcessList() ([]ProcessListItem, error)
// CreateProcess launches a new process within the container.
CreateProcess(c *ProcessConfig) (Process, error)
// OpenProcess gets an interface to an existing process within the container.
OpenProcess(pid int) (Process, error)
// Close cleans up any state associated with the container but does not terminate or wait for it.
Close() error
}
// Process represents a running or exited process.
type Process interface {
// Pid returns the process ID of the process within the container.
Pid() int
// Kill signals the process to terminate but does not wait for it to finish terminating.
Kill() error
// Wait waits for the process to exit.
Wait() error
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
// false if timeout occurs.
WaitTimeout(time.Duration) error
// ExitCode returns the exit code of the process. The process must have
// already terminated.
ExitCode() (int, error)
// ResizeConsole resizes the console of the process.
ResizeConsole(width, height uint16) error
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
// these pipes does not close the underlying pipes; it should be possible to
// call this multiple times to get multiple interfaces.
Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error)
// CloseStdin closes the write side of the stdin pipe so that the process is
// notified on the read side that there is no more data in stdin.
CloseStdin() error
// Close cleans up any state associated with the process but does not kill
// or wait on it.
Close() error
}

30
vendor/github.com/Microsoft/hcsshim/layerexists.go generated vendored Normal file
View file

@ -0,0 +1,30 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// LayerExists will return true if a layer with the given id exists and is known
// to the system.
func LayerExists(info DriverInfo, id string) (bool, error) {
title := "hcsshim::LayerExists "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return false, err
}
// Call the procedure itself.
var exists uint32
err = layerExists(&infop, id, &exists)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return false, err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s exists=%d", info.Flavour, id, exists)
return exists != 0, nil
}

111
vendor/github.com/Microsoft/hcsshim/layerutils.go generated vendored Normal file
View file

@ -0,0 +1,111 @@
package hcsshim
// This file contains utility functions to support storage (graph) related
// functionality.
import (
"path/filepath"
"syscall"
"github.com/Sirupsen/logrus"
)
/* To pass into syscall, we need a struct matching the following:
enum GraphDriverType
{
DiffDriver,
FilterDriver
};
struct DriverInfo {
GraphDriverType Flavour;
LPCWSTR HomeDir;
};
*/
type DriverInfo struct {
Flavour int
HomeDir string
}
type driverInfo struct {
Flavour int
HomeDirp *uint16
}
func convertDriverInfo(info DriverInfo) (driverInfo, error) {
homedirp, err := syscall.UTF16PtrFromString(info.HomeDir)
if err != nil {
logrus.Debugf("Failed conversion of home to pointer for driver info: %s", err.Error())
return driverInfo{}, err
}
return driverInfo{
Flavour: info.Flavour,
HomeDirp: homedirp,
}, nil
}
/* To pass into syscall, we need a struct matching the following:
typedef struct _WC_LAYER_DESCRIPTOR {
//
// The ID of the layer
//
GUID LayerId;
//
// Additional flags
//
union {
struct {
ULONG Reserved : 31;
ULONG Dirty : 1; // Created from sandbox as a result of snapshot
};
ULONG Value;
} Flags;
//
// Path to the layer root directory, null-terminated
//
PCWSTR Path;
} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR;
*/
type WC_LAYER_DESCRIPTOR struct {
LayerId GUID
Flags uint32
Pathp *uint16
}
func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) {
// Array of descriptors that gets constructed.
var layers []WC_LAYER_DESCRIPTOR
for i := 0; i < len(parentLayerPaths); i++ {
// Create a layer descriptor, using the folder name
// as the source for a GUID LayerId
_, folderName := filepath.Split(parentLayerPaths[i])
g, err := NameToGuid(folderName)
if err != nil {
logrus.Debugf("Failed to convert name to guid %s", err)
return nil, err
}
p, err := syscall.UTF16PtrFromString(parentLayerPaths[i])
if err != nil {
logrus.Debugf("Failed conversion of parentLayerPath to pointer %s", err)
return nil, err
}
layers = append(layers, WC_LAYER_DESCRIPTOR{
LayerId: g,
Flags: 0,
Pathp: p,
})
}
return layers, nil
}

441
vendor/github.com/Microsoft/hcsshim/legacy.go generated vendored Normal file
View file

@ -0,0 +1,441 @@
package hcsshim
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/Microsoft/go-winio"
)
var errorIterationCanceled = errors.New("")
func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) {
return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition)
}
func makeLongAbsPath(path string) (string, error) {
if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) {
return path, nil
}
if !filepath.IsAbs(path) {
absPath, err := filepath.Abs(path)
if err != nil {
return "", err
}
path = absPath
}
if strings.HasPrefix(path, `\\`) {
return `\\?\UNC\` + path[2:], nil
}
return `\\?\` + path, nil
}
type fileEntry struct {
path string
fi os.FileInfo
err error
}
type legacyLayerReader struct {
root string
result chan *fileEntry
proceed chan bool
currentFile *os.File
backupReader *winio.BackupFileReader
isTP4Format bool
}
// newLegacyLayerReader returns a new LayerReader that can read the Windows
// TP4 transport format from disk.
func newLegacyLayerReader(root string) *legacyLayerReader {
r := &legacyLayerReader{
root: root,
result: make(chan *fileEntry),
proceed: make(chan bool),
isTP4Format: IsTP4(),
}
go r.walk()
return r
}
func readTombstones(path string) (map[string]([]string), error) {
tf, err := os.Open(filepath.Join(path, "tombstones.txt"))
if err != nil {
return nil, err
}
defer tf.Close()
s := bufio.NewScanner(tf)
if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" {
return nil, errors.New("Invalid tombstones file")
}
ts := make(map[string]([]string))
for s.Scan() {
t := filepath.Join("Files", s.Text()[1:]) // skip leading `\`
dir := filepath.Dir(t)
ts[dir] = append(ts[dir], t)
}
if err = s.Err(); err != nil {
return nil, err
}
return ts, nil
}
func (r *legacyLayerReader) walkUntilCancelled() error {
root, err := makeLongAbsPath(r.root)
if err != nil {
return err
}
r.root = root
ts, err := readTombstones(r.root)
if err != nil {
return err
}
err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") {
return nil
}
r.result <- &fileEntry{path, info, nil}
if !<-r.proceed {
return errorIterationCanceled
}
// List all the tombstones.
if info.IsDir() {
relPath, err := filepath.Rel(r.root, path)
if err != nil {
return err
}
if dts, ok := ts[relPath]; ok {
for _, t := range dts {
r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil}
if !<-r.proceed {
return errorIterationCanceled
}
}
}
}
return nil
})
if err == errorIterationCanceled {
return nil
}
if err == nil {
return io.EOF
}
return err
}
func (r *legacyLayerReader) walk() {
defer close(r.result)
if !<-r.proceed {
return
}
err := r.walkUntilCancelled()
if err != nil {
for {
r.result <- &fileEntry{err: err}
if !<-r.proceed {
return
}
}
}
}
func (r *legacyLayerReader) reset() {
if r.backupReader != nil {
r.backupReader.Close()
r.backupReader = nil
}
if r.currentFile != nil {
r.currentFile.Close()
r.currentFile = nil
}
}
func findBackupStreamSize(r io.Reader) (int64, error) {
br := winio.NewBackupStreamReader(r)
for {
hdr, err := br.Next()
if err != nil {
if err == io.EOF {
err = nil
}
return 0, err
}
if hdr.Id == winio.BackupData {
return hdr.Size, nil
}
}
}
func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) {
r.reset()
r.proceed <- true
fe := <-r.result
if fe == nil {
err = errors.New("LegacyLayerReader closed")
return
}
if fe.err != nil {
err = fe.err
return
}
path, err = filepath.Rel(r.root, fe.path)
if err != nil {
return
}
if fe.fi == nil {
// This is a tombstone. Return a nil fileInfo.
return
}
if fe.fi.IsDir() && strings.HasPrefix(path, `Files\`) {
fe.path += ".$wcidirs$"
}
f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING)
if err != nil {
return
}
defer func() {
if f != nil {
f.Close()
}
}()
fileInfo, err = winio.GetFileBasicInfo(f)
if err != nil {
return
}
if !strings.HasPrefix(path, `Files\`) {
size = fe.fi.Size()
r.backupReader = winio.NewBackupFileReader(f, false)
if path == "Hives" || path == "Files" {
// The Hives directory has a non-deterministic file time because of the
// nature of the import process. Use the times from System_Delta.
var g *os.File
g, err = os.Open(filepath.Join(r.root, `Hives\System_Delta`))
if err != nil {
return
}
attr := fileInfo.FileAttributes
fileInfo, err = winio.GetFileBasicInfo(g)
g.Close()
if err != nil {
return
}
fileInfo.FileAttributes = attr
}
// The creation time and access time get reset for files outside of the Files path.
fileInfo.CreationTime = fileInfo.LastWriteTime
fileInfo.LastAccessTime = fileInfo.LastWriteTime
} else {
beginning := int64(0)
if !r.isTP4Format {
// In TP5, the file attributes were added before the backup stream
var attr uint32
err = binary.Read(f, binary.LittleEndian, &attr)
if err != nil {
return
}
fileInfo.FileAttributes = uintptr(attr)
beginning = 4
}
// Find the accurate file size.
if !fe.fi.IsDir() {
size, err = findBackupStreamSize(f)
if err != nil {
err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err}
return
}
}
// Return back to the beginning of the backup stream.
_, err = f.Seek(beginning, 0)
if err != nil {
return
}
}
r.currentFile = f
f = nil
return
}
func (r *legacyLayerReader) Read(b []byte) (int, error) {
if r.backupReader == nil {
if r.currentFile == nil {
return 0, io.EOF
}
return r.currentFile.Read(b)
}
return r.backupReader.Read(b)
}
func (r *legacyLayerReader) Close() error {
r.proceed <- false
<-r.result
r.reset()
return nil
}
type legacyLayerWriter struct {
root string
currentFile *os.File
backupWriter *winio.BackupFileWriter
tombstones []string
isTP4Format bool
pathFixed bool
}
// newLegacyLayerWriter returns a LayerWriter that can write the TP4 transport format
// to disk.
func newLegacyLayerWriter(root string) *legacyLayerWriter {
return &legacyLayerWriter{
root: root,
isTP4Format: IsTP4(),
}
}
func (w *legacyLayerWriter) init() error {
if !w.pathFixed {
path, err := makeLongAbsPath(w.root)
if err != nil {
return err
}
w.root = path
w.pathFixed = true
}
return nil
}
func (w *legacyLayerWriter) reset() {
if w.backupWriter != nil {
w.backupWriter.Close()
w.backupWriter = nil
}
if w.currentFile != nil {
w.currentFile.Close()
w.currentFile = nil
}
}
func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
w.reset()
err := w.init()
if err != nil {
return err
}
path := filepath.Join(w.root, name)
createDisposition := uint32(syscall.CREATE_NEW)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
err := os.Mkdir(path, 0)
if err != nil {
return err
}
path += ".$wcidirs$"
}
f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, createDisposition)
if err != nil {
return err
}
defer func() {
if f != nil {
f.Close()
os.Remove(path)
}
}()
strippedFi := *fileInfo
strippedFi.FileAttributes = 0
err = winio.SetFileBasicInfo(f, &strippedFi)
if err != nil {
return err
}
if strings.HasPrefix(name, `Hives\`) {
w.backupWriter = winio.NewBackupFileWriter(f, false)
} else {
if !w.isTP4Format {
// In TP5, the file attributes were added to the header
err = binary.Write(f, binary.LittleEndian, uint32(fileInfo.FileAttributes))
if err != nil {
return err
}
}
}
w.currentFile = f
f = nil
return nil
}
func (w *legacyLayerWriter) AddLink(name string, target string) error {
return errors.New("hard links not supported with legacy writer")
}
func (w *legacyLayerWriter) Remove(name string) error {
if !strings.HasPrefix(name, `Files\`) {
return fmt.Errorf("invalid tombstone %s", name)
}
w.tombstones = append(w.tombstones, name[len(`Files\`):])
return nil
}
func (w *legacyLayerWriter) Write(b []byte) (int, error) {
if w.backupWriter == nil {
if w.currentFile == nil {
return 0, errors.New("closed")
}
return w.currentFile.Write(b)
}
return w.backupWriter.Write(b)
}
func (w *legacyLayerWriter) Close() error {
w.reset()
err := w.init()
if err != nil {
return err
}
tf, err := os.Create(filepath.Join(w.root, "tombstones.txt"))
if err != nil {
return err
}
defer tf.Close()
_, err = tf.Write([]byte("\xef\xbb\xbfVersion 1.0\n"))
if err != nil {
return err
}
for _, t := range w.tombstones {
_, err = tf.Write([]byte(filepath.Join(`\`, t) + "\n"))
if err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,818 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
mksyscall_windows generates windows system call bodies
It parses all files specified on command line containing function
prototypes (like syscall_windows.go) and prints system call bodies
to standard output.
The prototypes are marked by lines beginning with "//sys" and read
like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument. This
includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named err.
* If go func name needs to be different from it's winapi dll name,
the winapi name could be specified at the end, after "=" sign, like
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
* Each function that returns err needs to supply a condition, that
return value of winapi will be tested against to detect failure.
This would set err to windows "last-error", otherwise it will be nil.
The value can be provided at end of //sys declaration, like
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
and is [failretval==0] by default.
Usage:
mksyscall_windows [flags] [path ...]
The flags are:
-output
Specify output file name (outputs to console if blank).
-trace
Generate print statement after every syscall.
*/
package main
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"go/format"
"go/parser"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"text/template"
)
var (
filename = flag.String("output", "", "output file name (standard output if omitted)")
printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
)
func trim(s string) string {
return strings.Trim(s, " \t")
}
var packageName string
func packagename() string {
return packageName
}
func syscalldot() string {
if packageName == "syscall" {
return ""
}
return "syscall."
}
// Param is function parameter
type Param struct {
Name string
Type string
fn *Fn
tmpVarIdx int
}
// tmpVar returns temp variable name that will be used to represent p during syscall.
func (p *Param) tmpVar() string {
if p.tmpVarIdx < 0 {
p.tmpVarIdx = p.fn.curTmpVarIdx
p.fn.curTmpVarIdx++
}
return fmt.Sprintf("_p%d", p.tmpVarIdx)
}
// BoolTmpVarCode returns source code for bool temp variable.
func (p *Param) BoolTmpVarCode() string {
const code = `var %s uint32
if %s {
%s = 1
} else {
%s = 0
}`
tmp := p.tmpVar()
return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
}
// SliceTmpVarCode returns source code for slice temp variable.
func (p *Param) SliceTmpVarCode() string {
const code = `var %s *%s
if len(%s) > 0 {
%s = &%s[0]
}`
tmp := p.tmpVar()
return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
}
// StringTmpVarCode returns source code for string temp variable.
func (p *Param) StringTmpVarCode() string {
errvar := p.fn.Rets.ErrorVarName()
if errvar == "" {
errvar = "_"
}
tmp := p.tmpVar()
const code = `var %s %s
%s, %s = %s(%s)`
s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
if errvar == "-" {
return s
}
const morecode = `
if %s != nil {
return
}`
return s + fmt.Sprintf(morecode, errvar)
}
// TmpVarCode returns source code for temp variable.
func (p *Param) TmpVarCode() string {
switch {
case p.Type == "bool":
return p.BoolTmpVarCode()
case strings.HasPrefix(p.Type, "[]"):
return p.SliceTmpVarCode()
default:
return ""
}
}
// TmpVarHelperCode returns source code for helper's temp variable.
func (p *Param) TmpVarHelperCode() string {
if p.Type != "string" {
return ""
}
return p.StringTmpVarCode()
}
// SyscallArgList returns source code fragments representing p parameter
// in syscall. Slices are translated into 2 syscall parameters: pointer to
// the first element and length.
func (p *Param) SyscallArgList() []string {
t := p.HelperType()
var s string
switch {
case t[0] == '*':
s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
case t == "bool":
s = p.tmpVar()
case strings.HasPrefix(t, "[]"):
return []string{
fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
fmt.Sprintf("uintptr(len(%s))", p.Name),
}
default:
s = p.Name
}
return []string{fmt.Sprintf("uintptr(%s)", s)}
}
// IsError determines if p parameter is used to return error.
func (p *Param) IsError() bool {
return p.Name == "err" && p.Type == "error"
}
// HelperType returns type of parameter p used in helper function.
func (p *Param) HelperType() string {
if p.Type == "string" {
return p.fn.StrconvType()
}
return p.Type
}
// join concatenates parameters ps into a string with sep separator.
// Each parameter is converted into string by applying fn to it
// before conversion.
func join(ps []*Param, fn func(*Param) string, sep string) string {
if len(ps) == 0 {
return ""
}
a := make([]string, 0)
for _, p := range ps {
a = append(a, fn(p))
}
return strings.Join(a, sep)
}
// Rets describes function return parameters.
type Rets struct {
Name string
Type string
ReturnsError bool
FailCond string
}
// ErrorVarName returns error variable name for r.
func (r *Rets) ErrorVarName() string {
if r.ReturnsError {
return "err"
}
if r.Type == "error" {
return r.Name
}
return ""
}
// ToParams converts r into slice of *Param.
func (r *Rets) ToParams() []*Param {
ps := make([]*Param, 0)
if len(r.Name) > 0 {
ps = append(ps, &Param{Name: r.Name, Type: r.Type})
}
if r.ReturnsError {
ps = append(ps, &Param{Name: "err", Type: "error"})
}
return ps
}
// List returns source code of syscall return parameters.
func (r *Rets) List() string {
s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
if len(s) > 0 {
s = "(" + s + ")"
}
return s
}
// PrintList returns source code of trace printing part correspondent
// to syscall return values.
func (r *Rets) PrintList() string {
return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
}
// SetReturnValuesCode returns source code that accepts syscall return values.
func (r *Rets) SetReturnValuesCode() string {
if r.Name == "" && !r.ReturnsError {
return ""
}
retvar := "r0"
if r.Name == "" {
retvar = "r1"
}
errvar := "_"
if r.ReturnsError {
errvar = "e1"
}
return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
}
func (r *Rets) useLongHandleErrorCode(retvar string) string {
const code = `if %s {
if e1 != 0 {
err = error(e1)
} else {
err = %sEINVAL
}
}`
cond := retvar + " == 0"
if r.FailCond != "" {
cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
}
return fmt.Sprintf(code, cond, syscalldot())
}
// SetErrorCode returns source code that sets return parameters.
func (r *Rets) SetErrorCode() string {
const code = `if r0 != 0 {
%s = %sErrno(r0)
}`
const hrCode = `if int32(r0) < 0 {
%s = %sErrno(win32FromHresult(r0))
}`
if r.Name == "" && !r.ReturnsError {
return ""
}
if r.Name == "" {
return r.useLongHandleErrorCode("r1")
}
if r.Type == "error" {
if r.Name == "hr" {
return fmt.Sprintf(hrCode, r.Name, syscalldot())
} else {
return fmt.Sprintf(code, r.Name, syscalldot())
}
}
s := ""
switch {
case r.Type[0] == '*':
s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
case r.Type == "bool":
s = fmt.Sprintf("%s = r0 != 0", r.Name)
default:
s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
}
if !r.ReturnsError {
return s
}
return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
}
// Fn describes syscall function.
type Fn struct {
Name string
Params []*Param
Rets *Rets
PrintTrace bool
confirmproc bool
dllname string
dllfuncname string
src string
// TODO: get rid of this field and just use parameter index instead
curTmpVarIdx int // insure tmp variables have uniq names
}
// extractParams parses s to extract function parameters.
func extractParams(s string, f *Fn) ([]*Param, error) {
s = trim(s)
if s == "" {
return nil, nil
}
a := strings.Split(s, ",")
ps := make([]*Param, len(a))
for i := range ps {
s2 := trim(a[i])
b := strings.Split(s2, " ")
if len(b) != 2 {
b = strings.Split(s2, "\t")
if len(b) != 2 {
return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
}
}
ps[i] = &Param{
Name: trim(b[0]),
Type: trim(b[1]),
fn: f,
tmpVarIdx: -1,
}
}
return ps, nil
}
// extractSection extracts text out of string s starting after start
// and ending just before end. found return value will indicate success,
// and prefix, body and suffix will contain correspondent parts of string s.
func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
s = trim(s)
if strings.HasPrefix(s, string(start)) {
// no prefix
body = s[1:]
} else {
a := strings.SplitN(s, string(start), 2)
if len(a) != 2 {
return "", "", s, false
}
prefix = a[0]
body = a[1]
}
a := strings.SplitN(body, string(end), 2)
if len(a) != 2 {
return "", "", "", false
}
return prefix, a[0], a[1], true
}
// newFn parses string s and return created function Fn.
func newFn(s string) (*Fn, error) {
s = trim(s)
f := &Fn{
Rets: &Rets{},
src: s,
PrintTrace: *printTraceFlag,
}
// function name and args
prefix, body, s, found := extractSection(s, '(', ')')
if !found || prefix == "" {
return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
}
f.Name = prefix
var err error
f.Params, err = extractParams(body, f)
if err != nil {
return nil, err
}
// return values
_, body, s, found = extractSection(s, '(', ')')
if found {
r, err := extractParams(body, f)
if err != nil {
return nil, err
}
switch len(r) {
case 0:
case 1:
if r[0].IsError() {
f.Rets.ReturnsError = true
} else {
f.Rets.Name = r[0].Name
f.Rets.Type = r[0].Type
}
case 2:
if !r[1].IsError() {
return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
}
f.Rets.ReturnsError = true
f.Rets.Name = r[0].Name
f.Rets.Type = r[0].Type
default:
return nil, errors.New("Too many return values in \"" + f.src + "\"")
}
}
// fail condition
_, body, s, found = extractSection(s, '[', ']')
if found {
f.Rets.FailCond = body
}
// dll and dll function names
s = trim(s)
if s == "" {
return f, nil
}
if !strings.HasPrefix(s, "=") {
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
}
s = trim(s[1:])
a := strings.Split(s, ".")
switch len(a) {
case 1:
f.dllfuncname = a[0]
case 2:
f.dllname = a[0]
f.dllfuncname = a[1]
default:
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
}
if f.dllfuncname[len(f.dllfuncname)-1] == '?' {
f.confirmproc = true
f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1]
}
return f, nil
}
// DLLName returns DLL name for function f.
func (f *Fn) DLLName() string {
if f.dllname == "" {
return "kernel32"
}
return f.dllname
}
// DLLName returns DLL function name for function f.
func (f *Fn) DLLFuncName() string {
if f.dllfuncname == "" {
return f.Name
}
return f.dllfuncname
}
func (f *Fn) ConfirmProc() bool {
return f.confirmproc
}
// ParamList returns source code for function f parameters.
func (f *Fn) ParamList() string {
return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
}
// HelperParamList returns source code for helper function f parameters.
func (f *Fn) HelperParamList() string {
return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
}
// ParamPrintList returns source code of trace printing part correspondent
// to syscall input parameters.
func (f *Fn) ParamPrintList() string {
return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
}
// ParamCount return number of syscall parameters for function f.
func (f *Fn) ParamCount() int {
n := 0
for _, p := range f.Params {
n += len(p.SyscallArgList())
}
return n
}
// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
// to use. It returns parameter count for correspondent SyscallX function.
func (f *Fn) SyscallParamCount() int {
n := f.ParamCount()
switch {
case n <= 3:
return 3
case n <= 6:
return 6
case n <= 9:
return 9
case n <= 12:
return 12
case n <= 15:
return 15
default:
panic("too many arguments to system call")
}
}
// Syscall determines which SyscallX function to use for function f.
func (f *Fn) Syscall() string {
c := f.SyscallParamCount()
if c == 3 {
return syscalldot() + "Syscall"
}
return syscalldot() + "Syscall" + strconv.Itoa(c)
}
// SyscallParamList returns source code for SyscallX parameters for function f.
func (f *Fn) SyscallParamList() string {
a := make([]string, 0)
for _, p := range f.Params {
a = append(a, p.SyscallArgList()...)
}
for len(a) < f.SyscallParamCount() {
a = append(a, "0")
}
return strings.Join(a, ", ")
}
// HelperCallParamList returns source code of call into function f helper.
func (f *Fn) HelperCallParamList() string {
a := make([]string, 0, len(f.Params))
for _, p := range f.Params {
s := p.Name
if p.Type == "string" {
s = p.tmpVar()
}
a = append(a, s)
}
return strings.Join(a, ", ")
}
// IsUTF16 is true, if f is W (utf16) function. It is false
// for all A (ascii) functions.
func (_ *Fn) IsUTF16() bool {
return true
}
// StrconvFunc returns name of Go string to OS string function for f.
func (f *Fn) StrconvFunc() string {
if f.IsUTF16() {
return syscalldot() + "UTF16PtrFromString"
}
return syscalldot() + "BytePtrFromString"
}
// StrconvType returns Go type name used for OS string for f.
func (f *Fn) StrconvType() string {
if f.IsUTF16() {
return "*uint16"
}
return "*byte"
}
// HasStringParam is true, if f has at least one string parameter.
// Otherwise it is false.
func (f *Fn) HasStringParam() bool {
for _, p := range f.Params {
if p.Type == "string" {
return true
}
}
return false
}
var uniqDllFuncName = make(map[string]bool)
// IsNotDuplicate is true if f is not a duplicated function
func (f *Fn) IsNotDuplicate() bool {
funcName := f.DLLFuncName()
if uniqDllFuncName[funcName] == false {
uniqDllFuncName[funcName] = true
return true
}
return false
}
// HelperName returns name of function f helper.
func (f *Fn) HelperName() string {
if !f.HasStringParam() {
return f.Name
}
return "_" + f.Name
}
// Source files and functions.
type Source struct {
Funcs []*Fn
Files []string
}
// ParseFiles parses files listed in fs and extracts all syscall
// functions listed in sys comments. It returns source files
// and functions collection *Source if successful.
func ParseFiles(fs []string) (*Source, error) {
src := &Source{
Funcs: make([]*Fn, 0),
Files: make([]string, 0),
}
for _, file := range fs {
if err := src.ParseFile(file); err != nil {
return nil, err
}
}
return src, nil
}
// DLLs return dll names for a source set src.
func (src *Source) DLLs() []string {
uniq := make(map[string]bool)
r := make([]string, 0)
for _, f := range src.Funcs {
name := f.DLLName()
if _, found := uniq[name]; !found {
uniq[name] = true
r = append(r, name)
}
}
return r
}
// ParseFile adds additional file path to a source set src.
func (src *Source) ParseFile(path string) error {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
s := bufio.NewScanner(file)
for s.Scan() {
t := trim(s.Text())
if len(t) < 7 {
continue
}
if !strings.HasPrefix(t, "//sys") {
continue
}
t = t[5:]
if !(t[0] == ' ' || t[0] == '\t') {
continue
}
f, err := newFn(t[1:])
if err != nil {
return err
}
src.Funcs = append(src.Funcs, f)
}
if err := s.Err(); err != nil {
return err
}
src.Files = append(src.Files, path)
// get package name
fset := token.NewFileSet()
_, err = file.Seek(0, 0)
if err != nil {
return err
}
pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
if err != nil {
return err
}
packageName = pkg.Name.Name
return nil
}
// Generate output source file from a source set src.
func (src *Source) Generate(w io.Writer) error {
funcMap := template.FuncMap{
"packagename": packagename,
"syscalldot": syscalldot,
}
t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
err := t.Execute(w, src)
if err != nil {
return errors.New("Failed to execute template: " + err.Error())
}
return nil
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
flag.PrintDefaults()
os.Exit(1)
}
func main() {
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
src, err := ParseFiles(flag.Args())
if err != nil {
log.Fatal(err)
}
var buf bytes.Buffer
if err := src.Generate(&buf); err != nil {
log.Fatal(err)
}
data, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
}
if *filename == "" {
_, err = os.Stdout.Write(data)
} else {
err = ioutil.WriteFile(*filename, data, 0644)
}
if err != nil {
log.Fatal(err)
}
}
// TODO: use println instead to print in the following template
const srcTemplate = `
{{define "main"}}// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package {{packagename}}
import "github.com/Microsoft/go-winio"
import "unsafe"{{if syscalldot}}
import "syscall"{{end}}
var _ unsafe.Pointer
var (
{{template "dlls" .}}
{{template "funcnames" .}})
{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
{{end}}
{{/* help functions */}}
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{syscalldot}}NewLazyDLL("{{.}}.dll")
{{end}}{{end}}
{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}}
{{end}}{{end}}
{{define "helperbody"}}
func {{.Name}}({{.ParamList}}) {{template "results" .}}{
{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
}
{{end}}
{{define "funcbody"}}
func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}}
{{template "seterror" .}}{{template "printtrace" .}} return
}
{{end}}
{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
{{end}}{{end}}{{end}}
{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
{{end}}{{end}}{{end}}
{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil {
return
}
{{end}}{{end}}
{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
{{end}}{{end}}
{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
{{end}}{{end}}
`

20
vendor/github.com/Microsoft/hcsshim/nametoguid.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// NameToGuid converts the given string into a GUID using the algorithm in the
// Host Compute Service, ensuring GUIDs generated with the same string are common
// across all clients.
func NameToGuid(name string) (id GUID, err error) {
title := "hcsshim::NameToGuid "
logrus.Debugf(title+"Name %s", name)
err = nameToGuid(name, &id)
if err != nil {
err = makeErrorf(err, title, "name=%s", name)
logrus.Error(err)
return
}
return
}

36
vendor/github.com/Microsoft/hcsshim/preparelayer.go generated vendored Normal file
View file

@ -0,0 +1,36 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// PrepareLayer finds a mounted read-write layer matching layerId and enables the
// the filesystem filter for use on that layer. This requires the paths to all
// parent layers, and is necessary in order to view or interact with the layer
// as an actual filesystem (reading and writing files, creating directories, etc).
// Disabling the filter must be done via UnprepareLayer.
func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error {
title := "hcsshim::PrepareLayer "
logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = prepareLayer(&infop, layerId, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s", info.Flavour, layerId)
return nil
}

389
vendor/github.com/Microsoft/hcsshim/process.go generated vendored Normal file
View file

@ -0,0 +1,389 @@
package hcsshim
import (
"encoding/json"
"io"
"syscall"
"time"
"github.com/Sirupsen/logrus"
)
// ContainerError is an error encountered in HCS
type process struct {
handle hcsProcess
processID int
container *container
cachedPipes *cachedPipes
callbackNumber uintptr
}
type cachedPipes struct {
stdIn syscall.Handle
stdOut syscall.Handle
stdErr syscall.Handle
}
type processModifyRequest struct {
Operation string
ConsoleSize *consoleSize `json:",omitempty"`
CloseHandle *closeHandle `json:",omitempty"`
}
type consoleSize struct {
Height uint16
Width uint16
}
type closeHandle struct {
Handle string
}
type processStatus struct {
ProcessID uint32
Exited bool
ExitCode uint32
LastWaitResult int32
}
const (
stdIn string = "StdIn"
stdOut string = "StdOut"
stdErr string = "StdErr"
)
const (
modifyConsoleSize string = "ConsoleSize"
modifyCloseHandle string = "CloseHandle"
)
// Pid returns the process ID of the process within the container.
func (process *process) Pid() int {
return process.processID
}
// Kill signals the process to terminate but does not wait for it to finish terminating.
func (process *process) Kill() error {
operation := "Kill"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
var resultp *uint16
err := hcsTerminateProcess(process.handle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// Wait waits for the process to exit.
func (process *process) Wait() error {
operation := "Wait"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if hcsCallbacksSupported {
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
if err != nil {
return makeProcessError(process, operation, "", err)
}
} else {
_, err := process.waitTimeoutInternal(syscall.INFINITE)
if err != nil {
return makeProcessError(process, operation, "", err)
}
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
// false if timeout occurs.
func (process *process) WaitTimeout(timeout time.Duration) error {
operation := "WaitTimeout"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if hcsCallbacksSupported {
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
if err != nil {
return makeProcessError(process, operation, "", err)
}
} else {
finished, err := waitTimeoutHelper(process, timeout)
if !finished {
err = ErrTimeout
}
if err != nil {
return makeProcessError(process, operation, "", err)
}
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
func (process *process) hcsWait(timeout uint32) (bool, error) {
var (
resultp *uint16
exitEvent syscall.Handle
)
err := hcsCreateProcessWait(process.handle, &exitEvent, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return false, err
}
defer syscall.CloseHandle(exitEvent)
return waitForSingleObject(exitEvent, timeout)
}
func (process *process) waitTimeoutInternal(timeout uint32) (bool, error) {
return waitTimeoutInternalHelper(process, timeout)
}
// ExitCode returns the exit code of the process. The process must have
// already terminated.
func (process *process) ExitCode() (int, error) {
operation := "ExitCode"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
properties, err := process.properties()
if err != nil {
return 0, makeProcessError(process, operation, "", err)
}
if properties.Exited == false {
return 0, makeProcessError(process, operation, "", ErrInvalidProcessState)
}
logrus.Debugf(title+" succeeded processid=%d exitCode=%d", process.processID, properties.ExitCode)
return int(properties.ExitCode), nil
}
// ResizeConsole resizes the console of the process.
func (process *process) ResizeConsole(width, height uint16) error {
operation := "ResizeConsole"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
modifyRequest := processModifyRequest{
Operation: modifyConsoleSize,
ConsoleSize: &consoleSize{
Height: height,
Width: width,
},
}
modifyRequestb, err := json.Marshal(modifyRequest)
if err != nil {
return err
}
modifyRequestStr := string(modifyRequestb)
var resultp *uint16
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
func (process *process) properties() (*processStatus, error) {
operation := "properties"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
var (
resultp *uint16
propertiesp *uint16
)
err := hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, err
}
if propertiesp == nil {
return nil, ErrUnexpectedValue
}
propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
properties := &processStatus{}
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, err
}
logrus.Debugf(title+" succeeded processid=%d, properties=%s", process.processID, propertiesRaw)
return properties, nil
}
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
// these pipes does not close the underlying pipes; it should be possible to
// call this multiple times to get multiple interfaces.
func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) {
operation := "Stdio"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
var stdIn, stdOut, stdErr syscall.Handle
if process.cachedPipes == nil {
var (
processInfo hcsProcessInformation
resultp *uint16
)
err := hcsGetProcessInfo(process.handle, &processInfo, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, "", err)
}
stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError
} else {
// Use cached pipes
stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr
// Invalidate the cache
process.cachedPipes = nil
}
pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr})
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return pipes[0], pipes[1], pipes[2], nil
}
// CloseStdin closes the write side of the stdin pipe so that the process is
// notified on the read side that there is no more data in stdin.
func (process *process) CloseStdin() error {
operation := "CloseStdin"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
modifyRequest := processModifyRequest{
Operation: modifyCloseHandle,
CloseHandle: &closeHandle{
Handle: stdIn,
},
}
modifyRequestb, err := json.Marshal(modifyRequest)
if err != nil {
return err
}
modifyRequestStr := string(modifyRequestb)
var resultp *uint16
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// Close cleans up any state associated with the process but does not kill
// or wait on it.
func (process *process) Close() error {
operation := "Close"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
// Don't double free this
if process.handle == 0 {
return nil
}
if hcsCallbacksSupported {
if err := process.unregisterCallback(); err != nil {
return makeProcessError(process, operation, "", err)
}
}
if err := hcsCloseProcess(process.handle); err != nil {
return makeProcessError(process, operation, "", err)
}
process.handle = 0
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// closeProcess wraps process.Close for use by a finalizer
func closeProcess(process *process) {
process.Close()
}
func (process *process) registerCallback() error {
context := &notifcationWatcherContext{
channels: newChannels(),
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = context
callbackMapLock.Unlock()
var callbackHandle hcsCallback
err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
if err != nil {
return err
}
context.handle = callbackHandle
process.callbackNumber = callbackNumber
return nil
}
func (process *process) unregisterCallback() error {
callbackNumber := process.callbackNumber
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return nil
}
handle := context.handle
if handle == 0 {
return nil
}
// hcsUnregisterProcessCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := hcsUnregisterProcessCallback(handle)
if err != nil {
return err
}
closeChannels(context.channels)
callbackMapLock.Lock()
callbackMap[callbackNumber] = nil
callbackMapLock.Unlock()
handle = 0
return nil
}

23
vendor/github.com/Microsoft/hcsshim/processimage.go generated vendored Normal file
View file

@ -0,0 +1,23 @@
package hcsshim
import "os"
// ProcessBaseLayer post-processes a base layer that has had its files extracted.
// The files should have been extracted to <path>\Files.
func ProcessBaseLayer(path string) error {
err := processBaseImage(path)
if err != nil {
return &os.PathError{Op: "ProcessBaseLayer", Path: path, Err: err}
}
return nil
}
// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted.
// The files should have been extracted to <path>\Files.
func ProcessUtilityVMImage(path string) error {
err := processUtilityImage(path)
if err != nil {
return &os.PathError{Op: "ProcessUtilityVMImage", Path: path, Err: err}
}
return nil
}

27
vendor/github.com/Microsoft/hcsshim/unpreparelayer.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// UnprepareLayer disables the filesystem filter for the read-write layer with
// the given id.
func UnprepareLayer(info DriverInfo, layerId string) error {
title := "hcsshim::UnprepareLayer "
logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = unprepareLayer(&infop, layerId)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour %d layerId=%s", info.Flavour, layerId)
return nil
}

39
vendor/github.com/Microsoft/hcsshim/utils.go generated vendored Normal file
View file

@ -0,0 +1,39 @@
package hcsshim
import (
"io"
"syscall"
"github.com/Microsoft/go-winio"
)
var (
vmcomputedll = syscall.NewLazyDLL("vmcompute.dll")
hcsCallbackAPI = vmcomputedll.NewProc("HcsRegisterComputeSystemCallback")
hcsCallbacksSupported = hcsCallbackAPI.Find() == nil
)
// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles
// if there is an error.
func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
fs := make([]io.ReadWriteCloser, len(hs))
for i, h := range hs {
if h != syscall.Handle(0) {
if err == nil {
fs[i], err = winio.MakeOpenFile(h)
}
if err != nil {
syscall.Close(h)
}
}
}
if err != nil {
for _, f := range fs {
if f != nil {
f.Close()
}
}
return nil, err
}
return fs, nil
}

7
vendor/github.com/Microsoft/hcsshim/version.go generated vendored Normal file
View file

@ -0,0 +1,7 @@
package hcsshim
// IsTP4 returns whether the currently running Windows build is at least TP4.
func IsTP4() bool {
// HNSCall was not present in TP4
return procHNSCall.Find() != nil
}

126
vendor/github.com/Microsoft/hcsshim/waithelper.go generated vendored Normal file
View file

@ -0,0 +1,126 @@
package hcsshim
import (
"github.com/Sirupsen/logrus"
"syscall"
"time"
)
type waitable interface {
waitTimeoutInternal(timeout uint32) (bool, error)
hcsWait(timeout uint32) (bool, error)
}
func waitTimeoutHelper(object waitable, timeout time.Duration) (bool, error) {
var (
millis uint32
)
for totalMillis := uint64(timeout / time.Millisecond); totalMillis > 0; totalMillis = totalMillis - uint64(millis) {
if totalMillis >= syscall.INFINITE {
millis = syscall.INFINITE - 1
} else {
millis = uint32(totalMillis)
}
result, err := object.waitTimeoutInternal(millis)
if err != nil {
return result, err
}
}
return true, nil
}
func waitTimeoutInternalHelper(object waitable, timeout uint32) (bool, error) {
return object.hcsWait(timeout)
}
func waitForSingleObject(handle syscall.Handle, timeout uint32) (bool, error) {
s, e := syscall.WaitForSingleObject(handle, timeout)
switch s {
case syscall.WAIT_OBJECT_0:
return true, nil
case syscall.WAIT_TIMEOUT:
return false, nil
default:
return false, e
}
}
func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
err = processHcsResult(err, resultp)
if IsPending(err) {
return waitForNotification(callbackNumber, expectedNotification, timeout)
}
return err
}
func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
callbackMapLock.RLock()
channels := callbackMap[callbackNumber].channels
callbackMapLock.RUnlock()
expectedChannel := channels[expectedNotification]
if expectedChannel == nil {
logrus.Errorf("unknown notification type in waitForNotification %x", expectedNotification)
return ErrInvalidNotificationType
}
if timeout != nil {
timer := time.NewTimer(*timeout)
defer timer.Stop()
select {
case err, ok := <-expectedChannel:
if !ok {
return ErrHandleClose
}
return err
case err, ok := <-channels[hcsNotificationSystemExited]:
if !ok {
return ErrHandleClose
}
// If the expected notification is hcsNotificationSystemExited which of the two selects
// chosen is random. Return the raw error if hcsNotificationSystemExited is expected
if channels[hcsNotificationSystemExited] == expectedChannel {
return err
}
return ErrUnexpectedContainerExit
case _, ok := <-channels[hcsNotificationServiceDisconnect]:
if !ok {
return ErrHandleClose
}
// hcsNotificationServiceDisconnect should never be an expected notification
// it does not need the same handling as hcsNotificationSystemExited
return ErrUnexpectedProcessAbort
case <-timer.C:
return ErrTimeout
}
}
select {
case err, ok := <-expectedChannel:
if !ok {
return ErrHandleClose
}
return err
case err, ok := <-channels[hcsNotificationSystemExited]:
if !ok {
return ErrHandleClose
}
// If the expected notification is hcsNotificationSystemExited which of the two selects
// chosen is random. Return the raw error if hcsNotificationSystemExited is expected
if channels[hcsNotificationSystemExited] == expectedChannel {
return err
}
return ErrUnexpectedContainerExit
case _, ok := <-channels[hcsNotificationServiceDisconnect]:
if !ok {
return ErrHandleClose
}
// hcsNotificationServiceDisconnect should never be an expected notification
// it does not need the same handling as hcsNotificationSystemExited
return ErrUnexpectedProcessAbort
}
}

1317
vendor/github.com/Microsoft/hcsshim/zhcsshim.go generated vendored Normal file

File diff suppressed because it is too large Load diff

182
vendor/github.com/containers/image/copy/copy.go generated vendored Normal file
View file

@ -0,0 +1,182 @@
package copy
import (
"crypto/sha256"
"crypto/subtle"
"encoding/hex"
"fmt"
"hash"
"io"
"strings"
"github.com/containers/image/image"
"github.com/containers/image/signature"
"github.com/containers/image/transports"
"github.com/containers/image/types"
)
// supportedDigests lists the supported blob digest types.
var supportedDigests = map[string]func() hash.Hash{
"sha256": sha256.New,
}
type digestingReader struct {
source io.Reader
digest hash.Hash
expectedDigest []byte
validationFailed bool
}
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
// and set validationFailed to true if the source stream does not match expectedDigestString.
func newDigestingReader(source io.Reader, expectedDigestString string) (*digestingReader, error) {
fields := strings.SplitN(expectedDigestString, ":", 2)
if len(fields) != 2 {
return nil, fmt.Errorf("Invalid digest specification %s", expectedDigestString)
}
fn, ok := supportedDigests[fields[0]]
if !ok {
return nil, fmt.Errorf("Invalid digest specification %s: unknown digest type %s", expectedDigestString, fields[0])
}
digest := fn()
expectedDigest, err := hex.DecodeString(fields[1])
if err != nil {
return nil, fmt.Errorf("Invalid digest value %s: %v", expectedDigestString, err)
}
if len(expectedDigest) != digest.Size() {
return nil, fmt.Errorf("Invalid digest specification %s: length %d does not match %d", expectedDigestString, len(expectedDigest), digest.Size())
}
return &digestingReader{
source: source,
digest: digest,
expectedDigest: expectedDigest,
validationFailed: false,
}, nil
}
func (d *digestingReader) Read(p []byte) (int, error) {
n, err := d.source.Read(p)
if n > 0 {
if n2, err := d.digest.Write(p[:n]); n2 != n || err != nil {
// Coverage: This should not happen, the hash.Hash interface requires
// d.digest.Write to never return an error, and the io.Writer interface
// requires n2 == len(input) if no error is returned.
return 0, fmt.Errorf("Error updating digest during verification: %d vs. %d, %v", n2, n, err)
}
}
if err == io.EOF {
actualDigest := d.digest.Sum(nil)
if subtle.ConstantTimeCompare(actualDigest, d.expectedDigest) != 1 {
d.validationFailed = true
return 0, fmt.Errorf("Digest did not match, expected %s, got %s", hex.EncodeToString(d.expectedDigest), hex.EncodeToString(actualDigest))
}
}
return n, err
}
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
type Options struct {
RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
}
// Image copies image from srcRef to destRef, using policyContext to validate source image admissibility.
func Image(ctx *types.SystemContext, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) error {
dest, err := destRef.NewImageDestination(ctx)
if err != nil {
return fmt.Errorf("Error initializing destination %s: %v", transports.ImageName(destRef), err)
}
defer dest.Close()
rawSource, err := srcRef.NewImageSource(ctx, dest.SupportedManifestMIMETypes())
if err != nil {
return fmt.Errorf("Error initializing source %s: %v", transports.ImageName(srcRef), err)
}
src := image.FromSource(rawSource)
defer src.Close()
// Please keep this policy check BEFORE reading any other information about the image.
if allowed, err := policyContext.IsRunningImageAllowed(src); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
return fmt.Errorf("Source image rejected: %v", err)
}
manifest, _, err := src.Manifest()
if err != nil {
return fmt.Errorf("Error reading manifest: %v", err)
}
var sigs [][]byte
if options != nil && options.RemoveSignatures {
sigs = [][]byte{}
} else {
s, err := src.Signatures()
if err != nil {
return fmt.Errorf("Error reading signatures: %v", err)
}
sigs = s
}
if len(sigs) != 0 {
if err := dest.SupportsSignatures(); err != nil {
return fmt.Errorf("Can not copy signatures: %v", err)
}
}
blobDigests, err := src.BlobDigests()
if err != nil {
return fmt.Errorf("Error parsing manifest: %v", err)
}
for _, digest := range blobDigests {
stream, blobSize, err := rawSource.GetBlob(digest)
if err != nil {
return fmt.Errorf("Error reading blob %s: %v", digest, err)
}
defer stream.Close()
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
// use a separate validation failure indicator.
// Note that we don't use a stronger "validationSucceeded" indicator, because
// dest.PutBlob may detect that the layer already exists, in which case we don't
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(stream, digest)
if err != nil {
return fmt.Errorf("Error preparing to verify blob %s: %v", digest, err)
}
if _, _, err := dest.PutBlob(digestingReader, digest, blobSize); err != nil {
return fmt.Errorf("Error writing blob: %v", err)
}
if digestingReader.validationFailed { // Coverage: This should never happen.
return fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", digest)
}
}
if options != nil && options.SignBy != "" {
mech, err := signature.NewGPGSigningMechanism()
if err != nil {
return fmt.Errorf("Error initializing GPG: %v", err)
}
dockerReference := dest.Reference().DockerReference()
if dockerReference == nil {
return fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
}
newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, options.SignBy)
if err != nil {
return fmt.Errorf("Error creating signature: %v", err)
}
sigs = append(sigs, newSig)
}
if err := dest.PutManifest(manifest); err != nil {
return fmt.Errorf("Error writing manifest: %v", err)
}
if err := dest.PutSignatures(sigs); err != nil {
return fmt.Errorf("Error writing signatures: %v", err)
}
if err := dest.Commit(); err != nil {
return fmt.Errorf("Error committing the finished image: %v", err)
}
return nil
}

60
vendor/github.com/containers/image/signature/docker.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
package signature
import (
"fmt"
"github.com/containers/image/manifest"
)
// SignDockerManifest returns a signature for manifest as the specified dockerReference,
// using mech and keyIdentity.
func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
manifestDigest, err := manifest.Digest(m)
if err != nil {
return nil, err
}
sig := privateSignature{
Signature{
DockerManifestDigest: manifestDigest,
DockerReference: dockerReference,
},
}
return sig.sign(mech, keyIdentity)
}
// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference,
// using mech.
func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte,
expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) {
sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
if keyIdentity != expectedKeyIdentity {
return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)}
}
return nil
},
validateSignedDockerReference: func(signedDockerReference string) error {
if signedDockerReference != expectedDockerReference {
return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s",
signedDockerReference, expectedDockerReference)}
}
return nil
},
validateSignedDockerManifestDigest: func(signedDockerManifestDigest string) error {
matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest)
if err != nil {
return err
}
if !matches {
return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)}
}
return nil
},
})
if err != nil {
return nil, err
}
return sig, nil
}

107
vendor/github.com/containers/image/signature/json.go generated vendored Normal file
View file

@ -0,0 +1,107 @@
package signature
import (
"bytes"
"encoding/json"
"fmt"
"io"
)
// jsonFormatError is returned when JSON does not match expected format.
type jsonFormatError string
func (err jsonFormatError) Error() string {
return string(err)
}
// validateExactMapKeys returns an error if the keys of m are not exactly expectedKeys, which must be pairwise distinct
func validateExactMapKeys(m map[string]interface{}, expectedKeys ...string) error {
if len(m) != len(expectedKeys) {
return jsonFormatError("Unexpected keys in a JSON object")
}
for _, k := range expectedKeys {
if _, ok := m[k]; !ok {
return jsonFormatError(fmt.Sprintf("Key %s missing in a JSON object", k))
}
}
// Assuming expectedKeys are pairwise distinct, we know m contains len(expectedKeys) different values in expectedKeys.
return nil
}
// mapField returns a member fieldName of m, if it is a JSON map, or an error.
func mapField(m map[string]interface{}, fieldName string) (map[string]interface{}, error) {
untyped, ok := m[fieldName]
if !ok {
return nil, jsonFormatError(fmt.Sprintf("Field %s missing", fieldName))
}
v, ok := untyped.(map[string]interface{})
if !ok {
return nil, jsonFormatError(fmt.Sprintf("Field %s is not a JSON object", fieldName))
}
return v, nil
}
// stringField returns a member fieldName of m, if it is a string, or an error.
func stringField(m map[string]interface{}, fieldName string) (string, error) {
untyped, ok := m[fieldName]
if !ok {
return "", jsonFormatError(fmt.Sprintf("Field %s missing", fieldName))
}
v, ok := untyped.(string)
if !ok {
return "", jsonFormatError(fmt.Sprintf("Field %s is not a JSON object", fieldName))
}
return v, nil
}
// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
//
// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
// we could use reflection to automate this. Later?
func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
seenKeys := map[string]struct{}{}
dec := json.NewDecoder(bytes.NewReader(data))
t, err := dec.Token()
if err != nil {
return jsonFormatError(err.Error())
}
if t != json.Delim('{') {
return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
}
for {
t, err := dec.Token()
if err != nil {
return jsonFormatError(err.Error())
}
if t == json.Delim('}') {
break
}
key, ok := t.(string)
if !ok {
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
}
if _, ok := seenKeys[key]; ok {
return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
}
seenKeys[key] = struct{}{}
valuePtr := fieldResolver(key)
if valuePtr == nil {
return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
}
// This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
if err := dec.Decode(valuePtr); err != nil {
return jsonFormatError(err.Error())
}
}
if _, err := dec.Token(); err != io.EOF {
return jsonFormatError("Unexpected data after JSON object")
}
return nil
}

View file

@ -0,0 +1,121 @@
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
package signature
import (
"bytes"
"fmt"
"github.com/mtrmac/gpgme"
)
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
// eliminate ambiguities, support CA signatures and perhaps other key properties)
type SigningMechanism interface {
// ImportKeysFromBytes imports public keys from the supplied blob and returns their identities.
// The blob is assumed to have an appropriate format (the caller is expected to know which one).
// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism).
ImportKeysFromBytes(blob []byte) ([]string, error)
// Sign creates a (non-detached) signature of input using keyidentity
Sign(input []byte, keyIdentity string) ([]byte, error)
// Verify parses unverifiedSignature and returns the content and the signer's identity
Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
}
// A GPG/OpenPGP signing mechanism.
type gpgSigningMechanism struct {
ctx *gpgme.Context
}
// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism.
func NewGPGSigningMechanism() (SigningMechanism, error) {
return newGPGSigningMechanismInDirectory("")
}
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
ctx, err := gpgme.New()
if err != nil {
return nil, err
}
if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil {
return nil, err
}
if optionalDir != "" {
err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir)
if err != nil {
return nil, err
}
}
ctx.SetArmor(false)
ctx.SetTextMode(false)
return gpgSigningMechanism{ctx: ctx}, nil
}
// ImportKeysFromBytes implements SigningMechanism.ImportKeysFromBytes
func (m gpgSigningMechanism) ImportKeysFromBytes(blob []byte) ([]string, error) {
inputData, err := gpgme.NewDataBytes(blob)
if err != nil {
return nil, err
}
res, err := m.ctx.Import(inputData)
if err != nil {
return nil, err
}
keyIdentities := []string{}
for _, i := range res.Imports {
if i.Result == nil {
keyIdentities = append(keyIdentities, i.Fingerprint)
}
}
return keyIdentities, nil
}
// Sign implements SigningMechanism.Sign
func (m gpgSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
key, err := m.ctx.GetKey(keyIdentity, true)
if err != nil {
return nil, err
}
inputData, err := gpgme.NewDataBytes(input)
if err != nil {
return nil, err
}
var sigBuffer bytes.Buffer
sigData, err := gpgme.NewDataWriter(&sigBuffer)
if err != nil {
return nil, err
}
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
return nil, err
}
return sigBuffer.Bytes(), nil
}
// Verify implements SigningMechanism.Verify
func (m gpgSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
signedBuffer := bytes.Buffer{}
signedData, err := gpgme.NewDataWriter(&signedBuffer)
if err != nil {
return nil, "", err
}
unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature)
if err != nil {
return nil, "", err
}
_, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData)
if err != nil {
return nil, "", err
}
if len(sigs) != 1 {
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
}
sig := sigs[0]
// This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
// FIXME: Better error reporting eventually
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
}
return signedBuffer.Bytes(), sig.Fingerprint, nil
}

View file

@ -0,0 +1,697 @@
// policy_config.go hanles creation of policy objects, either by parsing JSON
// or by programs building them programmatically.
// The New* constructors are intended to be a stable API. FIXME: after an independent review.
// Do not invoke the internals of the JSON marshaling/unmarshaling directly.
// We can't just blindly call json.Unmarshal because that would silently ignore
// typos, and that would just not do for security policy.
// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context.
// But at least it is not worse than blind json.Unmarshal()…
package signature
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"path/filepath"
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/docker/docker/reference"
)
// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
// You can override this at build time with
// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path'
var systemDefaultPolicyPath = builtinDefaultPolicyPath
// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy().
// DO NOT change this, instead see systemDefaultPolicyPath above.
const builtinDefaultPolicyPath = "/etc/containers/policy.json"
// InvalidPolicyFormatError is returned when parsing an invalid policy configuration.
type InvalidPolicyFormatError string
func (err InvalidPolicyFormatError) Error() string {
return string(err)
}
// FIXME: NewDefaultPolicy, from default file (or environment if trusted?)
// DefaultPolicy returns the default policy of the system.
// Most applications should be using this method to get the policy configured
// by the system administrator.
// ctx should usually be nil, can be set to override the default.
// NOTE: When this function returns an error, report it to the user and abort.
// DO NOT hard-code fallback policies in your application.
func DefaultPolicy(ctx *types.SystemContext) (*Policy, error) {
return NewPolicyFromFile(defaultPolicyPath(ctx))
}
// defaultPolicyPath returns a path to the default policy of the system.
func defaultPolicyPath(ctx *types.SystemContext) string {
if ctx != nil {
if ctx.SignaturePolicyPath != "" {
return ctx.SignaturePolicyPath
}
if ctx.RootForImplicitAbsolutePaths != "" {
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemDefaultPolicyPath)
}
}
return systemDefaultPolicyPath
}
// NewPolicyFromFile returns a policy configured in the specified file.
func NewPolicyFromFile(fileName string) (*Policy, error) {
contents, err := ioutil.ReadFile(fileName)
if err != nil {
return nil, err
}
return NewPolicyFromBytes(contents)
}
// NewPolicyFromBytes returns a policy parsed from the specified blob.
// Use this function instead of calling json.Unmarshal directly.
func NewPolicyFromBytes(data []byte) (*Policy, error) {
p := Policy{}
if err := json.Unmarshal(data, &p); err != nil {
return nil, InvalidPolicyFormatError(err.Error())
}
return &p, nil
}
// Compile-time check that Policy implements json.Unmarshaler.
var _ json.Unmarshaler = (*Policy)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (p *Policy) UnmarshalJSON(data []byte) error {
*p = Policy{}
transports := policyTransportsMap{}
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "default":
return &p.Default
case "transports":
return &transports
default:
return nil
}
}); err != nil {
return err
}
if p.Default == nil {
return InvalidPolicyFormatError("Default policy is missing")
}
p.Transports = map[string]PolicyTransportScopes(transports)
return nil
}
// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member.
type policyTransportsMap map[string]PolicyTransportScopes
// Compile-time check that policyTransportsMap implements json.Unmarshaler.
var _ json.Unmarshaler = (*policyTransportsMap)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyTransportScopes{}
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
transport, ok := transports.KnownTransports[key]
if !ok {
return nil
}
// paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
if _, ok := tmpMap[key]; ok {
return nil
}
ptsWithTransport := policyTransportScopesWithTransport{
transport: transport,
dest: &PolicyTransportScopes{}, // This allocates a new instance on each call.
}
tmpMap[key] = ptsWithTransport.dest
return &ptsWithTransport
}); err != nil {
return err
}
for key, ptr := range tmpMap {
(*m)[key] = *ptr
}
return nil
}
// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler.
// we want to only use policyTransportScopesWithTransport
var _ json.Unmarshaler = (*PolicyTransportScopes)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error {
return errors.New("Do not try to unmarshal PolicyTransportScopes directly")
}
// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes
// while validating using a specific ImageTransport.
type policyTransportScopesWithTransport struct {
transport types.ImageTransport
dest *PolicyTransportScopes
}
// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler.
var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyRequirements{}
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
// paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
if _, ok := tmpMap[key]; ok {
return nil
}
if key != "" {
if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil {
return nil
}
}
ptr := &PolicyRequirements{} // This allocates a new instance on each call.
tmpMap[key] = ptr
return ptr
}); err != nil {
return err
}
for key, ptr := range tmpMap {
(*m.dest)[key] = *ptr
}
return nil
}
// Compile-time check that PolicyRequirements implements json.Unmarshaler.
var _ json.Unmarshaler = (*PolicyRequirements)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (m *PolicyRequirements) UnmarshalJSON(data []byte) error {
reqJSONs := []json.RawMessage{}
if err := json.Unmarshal(data, &reqJSONs); err != nil {
return err
}
if len(reqJSONs) == 0 {
return InvalidPolicyFormatError("List of verification policy requirements must not be empty")
}
res := make([]PolicyRequirement, len(reqJSONs))
for i, reqJSON := range reqJSONs {
req, err := newPolicyRequirementFromJSON(reqJSON)
if err != nil {
return err
}
res[i] = req
}
*m = res
return nil
}
// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation.
func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
var typeField prCommon
if err := json.Unmarshal(data, &typeField); err != nil {
return nil, err
}
var res PolicyRequirement
switch typeField.Type {
case prTypeInsecureAcceptAnything:
res = &prInsecureAcceptAnything{}
case prTypeReject:
res = &prReject{}
case prTypeSignedBy:
res = &prSignedBy{}
case prTypeSignedBaseLayer:
res = &prSignedBaseLayer{}
default:
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
}
if err := json.Unmarshal(data, &res); err != nil {
return nil, err
}
return res, nil
}
// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type.
func newPRInsecureAcceptAnything() *prInsecureAcceptAnything {
return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}}
}
// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement.
func NewPRInsecureAcceptAnything() PolicyRequirement {
return newPRInsecureAcceptAnything()
}
// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler.
var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
*pr = prInsecureAcceptAnything{}
var tmp prInsecureAcceptAnything
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
default:
return nil
}
}); err != nil {
return err
}
if tmp.Type != prTypeInsecureAcceptAnything {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
}
*pr = *newPRInsecureAcceptAnything()
return nil
}
// newPRReject is NewPRReject, except it returns the private type.
func newPRReject() *prReject {
return &prReject{prCommon{Type: prTypeReject}}
}
// NewPRReject returns a new "reject" PolicyRequirement.
func NewPRReject() PolicyRequirement {
return newPRReject()
}
// Compile-time check that prReject implements json.Unmarshaler.
var _ json.Unmarshaler = (*prReject)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (pr *prReject) UnmarshalJSON(data []byte) error {
*pr = prReject{}
var tmp prReject
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
default:
return nil
}
}); err != nil {
return err
}
if tmp.Type != prTypeReject {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
}
*pr = *newPRReject()
return nil
}
// newPRSignedBy returns a new prSignedBy if parameters are valid.
func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
if !keyType.IsValid() {
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
}
if len(keyPath) > 0 && len(keyData) > 0 {
return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously")
}
if signedIdentity == nil {
return nil, InvalidPolicyFormatError("signedIdentity not specified")
}
return &prSignedBy{
prCommon: prCommon{Type: prTypeSignedBy},
KeyType: keyType,
KeyPath: keyPath,
KeyData: keyData,
SignedIdentity: signedIdentity,
}, nil
}
// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type.
func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
return newPRSignedBy(keyType, keyPath, nil, signedIdentity)
}
// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath
func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
return newPRSignedByKeyPath(keyType, keyPath, signedIdentity)
}
// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type.
func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
return newPRSignedBy(keyType, "", keyData, signedIdentity)
}
// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData
func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
return newPRSignedByKeyData(keyType, keyData, signedIdentity)
}
// Compile-time check that prSignedBy implements json.Unmarshaler.
var _ json.Unmarshaler = (*prSignedBy)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
*pr = prSignedBy{}
var tmp prSignedBy
var gotKeyPath, gotKeyData = false, false
var signedIdentity json.RawMessage
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
case "keyType":
return &tmp.KeyType
case "keyPath":
gotKeyPath = true
return &tmp.KeyPath
case "keyData":
gotKeyData = true
return &tmp.KeyData
case "signedIdentity":
return &signedIdentity
default:
return nil
}
}); err != nil {
return err
}
if tmp.Type != prTypeSignedBy {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
}
if signedIdentity == nil {
tmp.SignedIdentity = NewPRMMatchExact()
} else {
si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
if err != nil {
return err
}
tmp.SignedIdentity = si
}
var res *prSignedBy
var err error
switch {
case gotKeyPath && gotKeyData:
return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously")
case gotKeyPath && !gotKeyData:
res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity)
case !gotKeyPath && gotKeyData:
res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity)
case !gotKeyPath && !gotKeyData:
return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified")
default: // Coverage: This should never happen
return fmt.Errorf("Impossible keyPath/keyData presence combination!?")
}
if err != nil {
return err
}
*pr = *res
return nil
}
// IsValid returns true iff kt is a recognized value
func (kt sbKeyType) IsValid() bool {
switch kt {
case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys,
SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
return true
default:
return false
}
}
// Compile-time check that sbKeyType implements json.Unmarshaler.
var _ json.Unmarshaler = (*sbKeyType)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (kt *sbKeyType) UnmarshalJSON(data []byte) error {
*kt = sbKeyType("")
var s string
if err := json.Unmarshal(data, &s); err != nil {
return err
}
if !sbKeyType(s).IsValid() {
return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s))
}
*kt = sbKeyType(s)
return nil
}
// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type.
func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) {
if baseLayerIdentity == nil {
return nil, InvalidPolicyFormatError("baseLayerIdentity not specified")
}
return &prSignedBaseLayer{
prCommon: prCommon{Type: prTypeSignedBaseLayer},
BaseLayerIdentity: baseLayerIdentity,
}, nil
}
// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement.
func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
return newPRSignedBaseLayer(baseLayerIdentity)
}
// Compile-time check that prSignedBaseLayer implements json.Unmarshaler.
var _ json.Unmarshaler = (*prSignedBaseLayer)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
*pr = prSignedBaseLayer{}
var tmp prSignedBaseLayer
var baseLayerIdentity json.RawMessage
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
case "baseLayerIdentity":
return &baseLayerIdentity
default:
return nil
}
}); err != nil {
return err
}
if tmp.Type != prTypeSignedBaseLayer {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
}
if baseLayerIdentity == nil {
return InvalidPolicyFormatError(fmt.Sprintf("baseLayerIdentity not specified"))
}
bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
if err != nil {
return err
}
res, err := newPRSignedBaseLayer(bli)
if err != nil {
// Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid.
return err
}
*pr = *res
return nil
}
// newPolicyRequirementFromJSON parses JSON data into a PolicyReferenceMatch implementation.
func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) {
var typeField prmCommon
if err := json.Unmarshal(data, &typeField); err != nil {
return nil, err
}
var res PolicyReferenceMatch
switch typeField.Type {
case prmTypeMatchExact:
res = &prmMatchExact{}
case prmTypeMatchRepository:
res = &prmMatchRepository{}
case prmTypeExactReference:
res = &prmExactReference{}
case prmTypeExactRepository:
res = &prmExactRepository{}
default:
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type))
}
if err := json.Unmarshal(data, &res); err != nil {
return nil, err
}
return res, nil
}
// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type.
func newPRMMatchExact() *prmMatchExact {
return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}}
}
// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch.
func NewPRMMatchExact() PolicyReferenceMatch {
return newPRMMatchExact()
}
// Compile-time check that prmMatchExact implements json.Unmarshaler.
var _ json.Unmarshaler = (*prmMatchExact)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchExact{}
var tmp prmMatchExact
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
default:
return nil
}
}); err != nil {
return err
}
if tmp.Type != prmTypeMatchExact {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
}
*prm = *newPRMMatchExact()
return nil
}
// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type.
func newPRMMatchRepository() *prmMatchRepository {
return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}}
}
// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch.
func NewPRMMatchRepository() PolicyReferenceMatch {
return newPRMMatchRepository()
}
// Compile-time check that prmMatchRepository implements json.Unmarshaler.
var _ json.Unmarshaler = (*prmMatchRepository)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepository{}
var tmp prmMatchRepository
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
default:
return nil
}
}); err != nil {
return err
}
if tmp.Type != prmTypeMatchRepository {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
}
*prm = *newPRMMatchRepository()
return nil
}
// newPRMExactReference is NewPRMExactReference, except it resturns the private type.
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
ref, err := reference.ParseNamed(dockerReference)
if err != nil {
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error()))
}
if reference.IsNameOnly(ref) {
return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference))
}
return &prmExactReference{
prmCommon: prmCommon{Type: prmTypeExactReference},
DockerReference: dockerReference,
}, nil
}
// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch.
func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) {
return newPRMExactReference(dockerReference)
}
// Compile-time check that prmExactReference implements json.Unmarshaler.
var _ json.Unmarshaler = (*prmExactReference)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
*prm = prmExactReference{}
var tmp prmExactReference
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
case "dockerReference":
return &tmp.DockerReference
default:
return nil
}
}); err != nil {
return err
}
if tmp.Type != prmTypeExactReference {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
}
res, err := newPRMExactReference(tmp.DockerReference)
if err != nil {
return err
}
*prm = *res
return nil
}
// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type.
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
if _, err := reference.ParseNamed(dockerRepository); err != nil {
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
}
return &prmExactRepository{
prmCommon: prmCommon{Type: prmTypeExactRepository},
DockerRepository: dockerRepository,
}, nil
}
// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch.
func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) {
return newPRMExactRepository(dockerRepository)
}
// Compile-time check that prmExactRepository implements json.Unmarshaler.
var _ json.Unmarshaler = (*prmExactRepository)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
*prm = prmExactRepository{}
var tmp prmExactRepository
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
case "dockerRepository":
return &tmp.DockerRepository
default:
return nil
}
}); err != nil {
return err
}
if tmp.Type != prmTypeExactRepository {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
}
res, err := newPRMExactRepository(tmp.DockerRepository)
if err != nil {
return err
}
*prm = *res
return nil
}

View file

@ -0,0 +1,287 @@
// This defines the top-level policy evaluation API.
// To the extent possible, the interface of the fuctions provided
// here is intended to be completely unambiguous, and stable for users
// to rely on.
package signature
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/containers/image/types"
)
// PolicyRequirementError is an explanatory text for rejecting a signature or an image.
type PolicyRequirementError string
func (err PolicyRequirementError) Error() string {
return string(err)
}
// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted.
type signatureAcceptanceResult string
const (
sarAccepted signatureAcceptanceResult = "sarAccepted"
sarRejected signatureAcceptanceResult = "sarRejected"
sarUnknown signatureAcceptanceResult = "sarUnknown"
)
// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
// The type is public, but its definition is private.
type PolicyRequirement interface {
// FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache
// costly initialization like creating temporary GPG home directories and reading files.
// Setup() (someState, error)
// Then, the operations below would be done on the someState object, not directly on a PolicyRequirement.
// isSignatureAuthorAccepted, given an image and a signature blob, returns:
// - sarAccepted if the signature has been verified against the appropriate public key
// (where "appropriate public key" may depend on the contents of the signature);
// in that case a parsed Signature should be returned.
// - sarRejected if the signature has not been verified;
// in that case error must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// - sarUnknown if if this PolicyRequirement does not deal with signatures.
// NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
// Returning sarUnknown and a non-nil error value is invalid.
// WARNING: This makes the signature contents acceptable for futher processing,
// but it does not necessarily mean that the contents of the signature are
// consistent with local policy.
// For example:
// - Do not use a true value to determine whether to run
// a container based on this image; use IsRunningImageAllowed instead.
// - Just because a signature is accepted does not automatically mean the contents of the
// signature are authorized to run code as root, or to affect system or cluster configuration.
isSignatureAuthorAccepted(image types.Image, sig []byte) (signatureAcceptanceResult, *Signature, error)
// isRunningImageAllowed returns true if the requirement allows running an image.
// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// WARNING: This validates signatures and the manifest, but does not download or validate the
// layers. Users must validate that the layers match their expected digests.
isRunningImageAllowed(image types.Image) (bool, error)
}
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
// The type is public, but its implementation is private.
type PolicyReferenceMatch interface {
// matchesDockerReference decides whether a specific image identity is accepted for an image
// (or, usually, for the image's Reference().DockerReference()). Note that
// image.Reference().DockerReference() may be nil.
matchesDockerReference(image types.Image, signatureDockerReference string) bool
}
// PolicyContext encapsulates a policy and possible cached state
// for speeding up its evaluation.
type PolicyContext struct {
Policy *Policy
state policyContextState // Internal consistency checking
}
// policyContextState is used internally to verify the users are not misusing a PolicyContext.
type policyContextState string
const (
pcInvalid policyContextState = ""
pcInitializing policyContextState = "Initializing"
pcReady policyContextState = "Ready"
pcInUse policyContextState = "InUse"
pcDestroying policyContextState = "Destroying"
pcDestroyed policyContextState = "Destroyed"
)
// changeContextState changes pc.state, or fails if the state is unexpected
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
if pc.state != expected {
return fmt.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
}
pc.state = new
return nil
}
// NewPolicyContext sets up and initializes a context for the specified policy.
// The policy must not be modified while the context exists. FIXME: make a deep copy?
// If this function succeeds, the caller should call PolicyContext.Destroy() when done.
func NewPolicyContext(policy *Policy) (*PolicyContext, error) {
pc := &PolicyContext{Policy: policy, state: pcInitializing}
// FIXME: initialize
if err := pc.changeState(pcInitializing, pcReady); err != nil {
// Huh?! This should never fail, we didn't give the pointer to anybody.
// Just give up and leave unclean state around.
return nil, err
}
return pc, nil
}
// Destroy should be called when the user of the context is done with it.
func (pc *PolicyContext) Destroy() error {
if err := pc.changeState(pcReady, pcDestroying); err != nil {
return err
}
// FIXME: destroy
return pc.changeState(pcDestroying, pcDestroyed)
}
// policyIdentityLogName returns a string description of the image identity for policy purposes.
// ONLY use this for log messages, not for any decisions!
func policyIdentityLogName(ref types.ImageReference) string {
return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity()
}
// requirementsForImageRef selects the appropriate requirements for ref.
func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements {
// Do we have a PolicyTransportScopes for this transport?
transportName := ref.Transport().Name()
if transportScopes, ok := pc.Policy.Transports[transportName]; ok {
// Look for a full match.
identity := ref.PolicyConfigurationIdentity()
if req, ok := transportScopes[identity]; ok {
logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity)
return req
}
// Look for a match of the possible parent namespaces.
for _, name := range ref.PolicyConfigurationNamespaces() {
if req, ok := transportScopes[name]; ok {
logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name)
return req
}
}
// Look for a default match for the transport.
if req, ok := transportScopes[""]; ok {
logrus.Debugf(` Using transport "%s" policy section ""`, transportName)
return req
}
}
logrus.Debugf(" Using default policy section")
return pc.Policy.Default
}
// GetSignaturesWithAcceptedAuthor returns those signatures from an image
// for which the policy accepts the author (and which have been successfully
// verified).
// NOTE: This may legitimately return an empty list and no error, if the image
// has no signatures or only invalid signatures.
// WARNING: This makes the signature contents acceptable for futher processing,
// but it does not necessarily mean that the contents of the signature are
// consistent with local policy.
// For example:
// - Do not use a an existence of an accepted signature to determine whether to run
// a container based on this image; use IsRunningImageAllowed instead.
// - Just because a signature is accepted does not automatically mean the contents of the
// signature are authorized to run code as root, or to affect system or cluster configuration.
func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(image types.Image) (sigs []*Signature, finalErr error) {
if err := pc.changeState(pcReady, pcInUse); err != nil {
return nil, err
}
defer func() {
if err := pc.changeState(pcInUse, pcReady); err != nil {
sigs = nil
finalErr = err
}
}()
logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference()))
reqs := pc.requirementsForImageRef(image.Reference())
// FIXME: rename Signatures to UnverifiedSignatures
unverifiedSignatures, err := image.Signatures()
if err != nil {
return nil, err
}
res := make([]*Signature, 0, len(unverifiedSignatures))
for sigNumber, sig := range unverifiedSignatures {
var acceptedSig *Signature // non-nil if accepted
rejected := false
// FIXME? Say more about the contents of the signature, i.e. parse it even before verification?!
logrus.Debugf("Evaluating signature %d:", sigNumber)
interpretingReqs:
for reqNumber, req := range reqs {
// FIXME: Log the requirement itself? For now, we use just the number.
// FIXME: supply state
switch res, as, err := req.isSignatureAuthorAccepted(image, sig); res {
case sarAccepted:
if as == nil { // Coverage: this should never happen
logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber)
rejected = true
break interpretingReqs
}
logrus.Debugf(" Requirement %d: signature accepted", reqNumber)
if acceptedSig == nil {
acceptedSig = as
} else if *as != *acceptedSig { // Coverage: this should never happen
// Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents?
logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber)
rejected = true
acceptedSig = nil
break interpretingReqs
}
case sarRejected:
logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error())
rejected = true
break interpretingReqs
case sarUnknown:
if err != nil { // Coverage: this should never happen
logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error())
rejected = true
break interpretingReqs
}
logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber)
default: // Coverage: this should never happen
logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res))
rejected = true
break interpretingReqs
}
}
// This also handles the (invalid) case of empty reqs, by rejecting the signature.
if acceptedSig != nil && !rejected {
logrus.Debugf(" Overall: OK, signature accepted")
res = append(res, acceptedSig)
} else {
logrus.Debugf(" Overall: Signature not accepted")
}
}
return res, nil
}
// IsRunningImageAllowed returns true iff the policy allows running the image.
// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// WARNING: This validates signatures and the manifest, but does not download or validate the
// layers. Users must validate that the layers match their expected digests.
func (pc *PolicyContext) IsRunningImageAllowed(image types.Image) (res bool, finalErr error) {
if err := pc.changeState(pcReady, pcInUse); err != nil {
return false, err
}
defer func() {
if err := pc.changeState(pcInUse, pcReady); err != nil {
res = false
finalErr = err
}
}()
logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference()))
reqs := pc.requirementsForImageRef(image.Reference())
if len(reqs) == 0 {
return false, PolicyRequirementError("List of verification policy requirements must not be empty")
}
for reqNumber, req := range reqs {
// FIXME: supply state
allowed, err := req.isRunningImageAllowed(image)
if !allowed {
logrus.Debugf("Requirement %d: denied, done", reqNumber)
return false, err
}
logrus.Debugf(" Requirement %d: allowed", reqNumber)
}
// We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image.
logrus.Debugf("Overall: allowed")
return true, nil
}

View file

@ -0,0 +1,18 @@
// Policy evaluation for prSignedBaseLayer.
package signature
import (
"github.com/Sirupsen/logrus"
"github.com/containers/image/types"
)
func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(image types.Image, sig []byte) (signatureAcceptanceResult, *Signature, error) {
return sarUnknown, nil, nil
}
func (pr *prSignedBaseLayer) isRunningImageAllowed(image types.Image) (bool, error) {
// FIXME? Reject this at policy parsing time already?
logrus.Errorf("signedBaseLayer not implemented yet!")
return false, PolicyRequirementError("signedBaseLayer not implemented yet!")
}

View file

@ -0,0 +1,137 @@
// Policy evaluation for prSignedBy.
package signature
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
)
func (pr *prSignedBy) isSignatureAuthorAccepted(image types.Image, sig []byte) (signatureAcceptanceResult, *Signature, error) {
switch pr.KeyType {
case SBKeyTypeGPGKeys:
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
// FIXME? Reject this at policy parsing time already?
return sarRejected, nil, fmt.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
default:
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
return sarRejected, nil, fmt.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
}
if pr.KeyPath != "" && pr.KeyData != nil {
return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`)
}
// FIXME: move this to per-context initialization
var data []byte
if pr.KeyData != nil {
data = pr.KeyData
} else {
d, err := ioutil.ReadFile(pr.KeyPath)
if err != nil {
return sarRejected, nil, err
}
data = d
}
// FIXME: move this to per-context initialization
dir, err := ioutil.TempDir("", "skopeo-signedBy-")
if err != nil {
return sarRejected, nil, err
}
defer os.RemoveAll(dir)
mech, err := newGPGSigningMechanismInDirectory(dir)
if err != nil {
return sarRejected, nil, err
}
trustedIdentities, err := mech.ImportKeysFromBytes(data)
if err != nil {
return sarRejected, nil, err
}
if len(trustedIdentities) == 0 {
return sarRejected, nil, PolicyRequirementError("No public keys imported")
}
signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
for _, trustedIdentity := range trustedIdentities {
if keyIdentity == trustedIdentity {
return nil
}
}
// Coverage: We use a private GPG home directory and only import trusted keys, so this should
// not be reachable.
return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity))
},
validateSignedDockerReference: func(ref string) error {
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
}
return nil
},
validateSignedDockerManifestDigest: func(digest string) error {
m, _, err := image.Manifest()
if err != nil {
return err
}
digestMatches, err := manifest.MatchesDigest(m, digest)
if err != nil {
return err
}
if !digestMatches {
return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest))
}
return nil
},
})
if err != nil {
return sarRejected, nil, err
}
return sarAccepted, signature, nil
}
func (pr *prSignedBy) isRunningImageAllowed(image types.Image) (bool, error) {
sigs, err := image.Signatures()
if err != nil {
return false, err
}
var rejections []error
for _, s := range sigs {
var reason error
switch res, _, err := pr.isSignatureAuthorAccepted(image, s); res {
case sarAccepted:
// One accepted signature is enough.
return true, nil
case sarRejected:
reason = err
case sarUnknown:
// Huh?! This should not happen at all; treat it as any other invalid value.
fallthrough
default:
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
}
rejections = append(rejections, reason)
}
var summary error
switch len(rejections) {
case 0:
summary = PolicyRequirementError("A signature was required, but no signature exists")
case 1:
summary = rejections[0]
default:
var msgs []string
for _, e := range rejections {
msgs = append(msgs, e.Error())
}
summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
strings.Join(msgs, "; ")))
}
return false, summary
}

View file

@ -0,0 +1,28 @@
// Policy evaluation for the various simple PolicyRequirement types.
package signature
import (
"fmt"
"github.com/containers/image/transports"
"github.com/containers/image/types"
)
func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(image types.Image, sig []byte) (signatureAcceptanceResult, *Signature, error) {
// prInsecureAcceptAnything semantics: Every image is allowed to run,
// but this does not consider the signature as verified.
return sarUnknown, nil, nil
}
func (pr *prInsecureAcceptAnything) isRunningImageAllowed(image types.Image) (bool, error) {
return true, nil
}
func (pr *prReject) isSignatureAuthorAccepted(image types.Image, sig []byte) (signatureAcceptanceResult, *Signature, error) {
return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference())))
}
func (pr *prReject) isRunningImageAllowed(image types.Image) (bool, error) {
return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference())))
}

View file

@ -0,0 +1,78 @@
// PolicyReferenceMatch implementations.
package signature
import (
"fmt"
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/docker/docker/reference"
)
// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images.
func parseImageAndDockerReference(image types.Image, s2 string) (reference.Named, reference.Named, error) {
r1 := image.Reference().DockerReference()
if r1 == nil {
return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity",
transports.ImageName(image.Reference())))
}
r2, err := reference.ParseNamed(s2)
if err != nil {
return nil, nil, err
}
return r1, r2, nil
}
func (prm *prmMatchExact) matchesDockerReference(image types.Image, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
}
// Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now.
if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
return false
}
return signature.String() == intended.String()
}
func (prm *prmMatchRepository) matchesDockerReference(image types.Image, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
}
return signature.Name() == intended.Name()
}
// parseDockerReferences converts two reference strings into parsed entities, failing on any error
func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) {
r1, err := reference.ParseNamed(s1)
if err != nil {
return nil, nil, err
}
r2, err := reference.ParseNamed(s2)
if err != nil {
return nil, nil, err
}
return r1, r2, nil
}
func (prm *prmExactReference) matchesDockerReference(image types.Image, signatureDockerReference string) bool {
intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference)
if err != nil {
return false
}
// prm.DockerReference and signatureDockerReference should be exact; so, verify that now.
if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
return false
}
return signature.String() == intended.String()
}
func (prm *prmExactRepository) matchesDockerReference(image types.Image, signatureDockerReference string) bool {
intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference)
if err != nil {
return false
}
return signature.Name() == intended.Name()
}

View file

@ -0,0 +1,143 @@
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
// This defines types used to represent a signature verification policy in memory.
// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements
// built using the constructor functions provided in policy_config.go.
package signature
// Policy defines requirements for considering a signature valid.
type Policy struct {
// Default applies to any image which does not have a matching policy in Transports.
// Note that this can happen even if a matching PolicyTransportScopes exists in Transports
// if the image matches none of the scopes.
Default PolicyRequirements `json:"default"`
Transports map[string]PolicyTransportScopes `json:"transports"`
}
// PolicyTransportScopes defines policies for images for a specific transport,
// for various scopes, the map keys.
// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.);
// there is one scope precisely matching to a single image, and namespace scopes as prefixes
// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]])
// The empty scope, if exists, is considered a parent namespace of all other scopes.
// Most specific scope wins, duplication is prohibited (hard failure).
type PolicyTransportScopes map[string]PolicyRequirements
// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature).
// Must not be empty, frequently will only contain a single element.
type PolicyRequirements []PolicyRequirement
// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
// The type is public, but its definition is private.
// prCommon is the common type field in a JSON encoding of PolicyRequirement.
type prCommon struct {
Type prTypeIdentifier `json:"type"`
}
// prTypeIdentifier is string designating a kind of a PolicyRequirement.
type prTypeIdentifier string
const (
prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything"
prTypeReject prTypeIdentifier = "reject"
prTypeSignedBy prTypeIdentifier = "signedBy"
prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer"
)
// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything:
// every image is allowed to run.
// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit).
// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted).
// FIXME? Better name?
type prInsecureAcceptAnything struct {
prCommon
}
// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected.
type prReject struct {
prCommon
}
// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity
type prSignedBy struct {
prCommon
// KeyType specifies what kind of key reference KeyPath/KeyData is.
// Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs”
// FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only
KeyType sbKeyType `json:"keyType"`
// KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified.
KeyPath string `json:"keyPath,omitempty"`
// KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified.
KeyData []byte `json:"keyData,omitempty"`
// SignedIdentity specifies what image identity the signature must be claiming about the image.
// Defaults to "match-exact" if not specified.
SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
}
// sbKeyType are the allowed values for prSignedBy.KeyType
type sbKeyType string
const (
// SBKeyTypeGPGKeys refers to keys contained in a GPG keyring
SBKeyTypeGPGKeys sbKeyType = "GPGKeys"
// SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring
SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys"
// SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates
// FIXME: PEM, DER?
SBKeyTypeX509Certificates sbKeyType = "X509Certificates"
// SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs
// FIXME: PEM, DER?
SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs"
)
// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image.
type prSignedBaseLayer struct {
prCommon
// BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful.
BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"`
}
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
// The type is public, but its implementation is private.
// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch.
type prmCommon struct {
Type prmTypeIdentifier `json:"type"`
}
// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch.
type prmTypeIdentifier string
const (
prmTypeMatchExact prmTypeIdentifier = "matchExact"
prmTypeMatchRepository prmTypeIdentifier = "matchRepository"
prmTypeExactReference prmTypeIdentifier = "exactReference"
prmTypeExactRepository prmTypeIdentifier = "exactRepository"
)
// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly.
type prmMatchExact struct {
prmCommon
}
// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag.
type prmMatchRepository struct {
prmCommon
}
// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly.
type prmExactReference struct {
prmCommon
DockerReference string `json:"dockerReference"`
}
// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag.
type prmExactRepository struct {
prmCommon
DockerRepository string `json:"dockerRepository"`
}

View file

@ -0,0 +1,191 @@
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
package signature
import (
"encoding/json"
"errors"
"fmt"
"time"
"github.com/containers/image/version"
)
const (
signatureType = "atomic container signature"
)
// InvalidSignatureError is returned when parsing an invalid signature.
type InvalidSignatureError struct {
msg string
}
func (err InvalidSignatureError) Error() string {
return err.msg
}
// Signature is a parsed content of a signature.
type Signature struct {
DockerManifestDigest string // FIXME: more precise type?
DockerReference string // FIXME: more precise type?
}
// Wrap signature to add to it some methods which we don't want to make public.
type privateSignature struct {
Signature
}
// Compile-time check that privateSignature implements json.Marshaler
var _ json.Marshaler = (*privateSignature)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s privateSignature) MarshalJSON() ([]byte, error) {
return s.marshalJSONWithVariables(time.Now().UTC().Unix(), "atomic "+version.Version)
}
// Implementation of MarshalJSON, with a caller-chosen values of the variable items to help testing.
func (s privateSignature) marshalJSONWithVariables(timestamp int64, creatorID string) ([]byte, error) {
if s.DockerManifestDigest == "" || s.DockerReference == "" {
return nil, errors.New("Unexpected empty signature content")
}
critical := map[string]interface{}{
"type": signatureType,
"image": map[string]string{"docker-manifest-digest": s.DockerManifestDigest},
"identity": map[string]string{"docker-reference": s.DockerReference},
}
optional := map[string]interface{}{
"creator": creatorID,
"timestamp": timestamp,
}
signature := map[string]interface{}{
"critical": critical,
"optional": optional,
}
return json.Marshal(signature)
}
// Compile-time check that privateSignature implements json.Unmarshaler
var _ json.Unmarshaler = (*privateSignature)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface
func (s *privateSignature) UnmarshalJSON(data []byte) error {
err := s.strictUnmarshalJSON(data)
if err != nil {
if _, ok := err.(jsonFormatError); ok {
err = InvalidSignatureError{msg: err.Error()}
}
}
return err
}
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
func (s *privateSignature) strictUnmarshalJSON(data []byte) error {
var untyped interface{}
if err := json.Unmarshal(data, &untyped); err != nil {
return err
}
o, ok := untyped.(map[string]interface{})
if !ok {
return InvalidSignatureError{msg: "Invalid signature format"}
}
if err := validateExactMapKeys(o, "critical", "optional"); err != nil {
return err
}
c, err := mapField(o, "critical")
if err != nil {
return err
}
if err := validateExactMapKeys(c, "type", "image", "identity"); err != nil {
return err
}
optional, err := mapField(o, "optional")
if err != nil {
return err
}
_ = optional // We don't use anything from here for now.
t, err := stringField(c, "type")
if err != nil {
return err
}
if t != signatureType {
return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
}
image, err := mapField(c, "image")
if err != nil {
return err
}
if err := validateExactMapKeys(image, "docker-manifest-digest"); err != nil {
return err
}
digest, err := stringField(image, "docker-manifest-digest")
if err != nil {
return err
}
s.DockerManifestDigest = digest
identity, err := mapField(c, "identity")
if err != nil {
return err
}
if err := validateExactMapKeys(identity, "docker-reference"); err != nil {
return err
}
reference, err := stringField(identity, "docker-reference")
if err != nil {
return err
}
s.DockerReference = reference
return nil
}
// Sign formats the signature and returns a blob signed using mech and keyIdentity
func (s privateSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
json, err := json.Marshal(s)
if err != nil {
return nil, err
}
return mech.Sign(json, keyIdentity)
}
// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable.
// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies
// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
// because all of the functions have the same type, so there is a risk of exchanging the functions;
// named members of this struct are more explicit.
type signatureAcceptanceRules struct {
validateKeyIdentity func(string) error
validateSignedDockerReference func(string) error
validateSignedDockerManifestDigest func(string) error
}
// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components
// match expected values, both as specified by rules, and returns it
func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
signed, keyIdentity, err := mech.Verify(unverifiedSignature)
if err != nil {
return nil, err
}
if err := rules.validateKeyIdentity(keyIdentity); err != nil {
return nil, err
}
var unmatchedSignature privateSignature
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
return nil, InvalidSignatureError{msg: err.Error()}
}
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.DockerManifestDigest); err != nil {
return nil, err
}
if err := rules.validateSignedDockerReference(unmatchedSignature.DockerReference); err != nil {
return nil, err
}
signature := unmatchedSignature.Signature // Policy OK.
return &signature, nil
}

View file

@ -0,0 +1,454 @@
package storage
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/containers/image/image"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/storage"
)
var (
// ErrInvalidBlobDigest is returned when PutBlob() is given a blob
// with a digest-based name that can't be used as a map key.
ErrInvalidBlobDigest = errors.New("invalid blob digest")
// ErrBlobDigestMismatch is returned when PutBlob() is given a blob
// with a digest-based name that doesn't match its contents.
ErrBlobDigestMismatch = errors.New("blob digest mismatch")
)
type storageImage struct {
store storage.Store
imageRef *storageReference
Tag string `json:"tag,omitempty"`
Created time.Time `json:"created-time,omitempty"`
ID string `json:"id"`
BlobList []string `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle
Layers map[string]string `json:"layers,omitempty"` // Map from names of blobs that are layers to layer IDs
BlobData map[string][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary
SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice
}
type storageLayerMetadata struct {
ExpectedSize int64 `json:"expected-size,omitempty"`
Digest string `json:"digest,omitempty"`
Size int64 `json:"size,omitempty"`
}
// newImageSource sets us up to read out an image, which we assume exists.
func newImageSource(imageRef *storageReference) *storageImage {
tag := ""
if imageRef.ID() == "" {
logrus.Errorf("no image matching reference %q found", imageRef.StringWithinTransport())
return nil
}
img, err := imageRef.store.GetImage(imageRef.ID())
if err != nil {
logrus.Errorf("error reading image %q: %v", imageRef.ID(), err)
return nil
}
tag = imageRef.StringWithinTransport()
image := storageImage{
store: imageRef.store,
imageRef: imageRef,
Tag: tag,
Created: time.Now(),
ID: img.ID,
BlobList: []string{},
Layers: make(map[string]string),
BlobData: make(map[string][]byte),
}
if err := image.loadMetadata(); err != nil {
logrus.Errorf("error decoding metadata for source image: %v", err)
return nil
}
return &image
}
// newImageDestination sets us up to write a new image.
func newImageDestination(imageRef *storageReference) *storageImage {
// We set the image's ID if the reference we got looked like one, since
// we take that as an indication that it's going to end up with the
// same contents as an image we already have. If the reference looks
// more like a name, we don't know yet if it'll be exactly the same as,
// or be an updated version of, an image we might already have, so we
// have to err on the side of caution and create a new image which will
// be assigned the name as its tag.
image := storageImage{
store: imageRef.store,
imageRef: imageRef,
Tag: imageRef.StringWithinTransport(),
Created: time.Now(),
ID: imageRef.ID(),
BlobList: []string{},
Layers: make(map[string]string),
BlobData: make(map[string][]byte),
}
return &image
}
func newImage(imageRef *storageReference) *storageImage {
return newImageSource(imageRef)
}
func (s *storageImage) loadMetadata() error {
if s.ID != "" {
image, err := s.store.GetImage(s.ID)
if image != nil && err == nil {
if err := json.Unmarshal([]byte(image.Metadata), s); err != nil {
return err
}
}
}
return nil
}
func (s *storageImage) saveMetadata() error {
if s.ID != "" {
metadata, err := json.Marshal(s)
if len(metadata) != 0 && err == nil {
istore, err := s.store.GetImageStore()
if istore != nil && err == nil {
if err = istore.SetMetadata(s.ID, string(metadata)); err != nil {
logrus.Errorf("error setting metadata for image %q: %v", s.ID, err)
}
} else {
logrus.Errorf("error locating image store: %v", err)
}
return err
}
logrus.Errorf("error encoding metadata for image %q: %v", s.ID, err)
return err
}
return nil
}
func (s *storageImage) Reference() types.ImageReference {
return s.imageRef
}
func (s *storageImage) Close() {
}
// SupportsSignatures returns an error if we can't expect GetSignatures() to
// return data that was previously supplied to PutSignatures().
func (s *storageImage) SupportsSignatures() error {
return nil
}
// PutBlob is used to both store filesystem layers and binary data that is part
// of the image.
func (s *storageImage) PutBlob(stream io.Reader, digest string, expectedSize int64) (actualDigest string, actualSize int64, err error) {
// Try to read an initial snippet of the blob.
header := make([]byte, 10240)
n, err := stream.Read(header)
if err != nil && err != io.EOF {
return "", -1, err
}
// Set up to read the whole blob (the initial snippet, plus the rest)
// while digesting it with sha256.
hasher := sha256.New()
hash := []byte{}
counter := ioutils.NewWriteCounter(hasher)
defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), stream)
multi := io.TeeReader(defragmented, counter)
if (n > 0) && archive.IsArchive(header[:n]) {
// It's a filesystem layer. If it's not the first one in the
// image, we assume that the most recently added layer is its
// parent.
parentLayer := ""
if len(s.BlobList) > 0 {
for _, blob := range s.BlobList {
if layerID, ok := s.Layers[blob]; ok {
parentLayer = layerID
}
}
}
// Now try to figure out if the identifier we have is an ID or
// something else, so that we can do collision detection
// correctly.
names := []string{}
id := digest
if matches := idRegexp.FindStringSubmatch(digest); len(matches) > 1 {
// Looks like a digest. Use the value as a layer ID.
id = matches[len(matches)-1]
} else {
// If it isn't empty, it's a name.
if digest != "" {
names = append(names, digest)
id = ""
}
}
if len(names) == 0 {
names = nil
}
// Attempt to create the identified layer and import its contents.
layer, err := s.store.PutLayer(id, parentLayer, names, "", true, multi)
if err != nil && err != storage.ErrDuplicateID {
logrus.Debugf("error importing layer blob %q: %v", digest, err)
return "", -1, err
}
if err == storage.ErrDuplicateID {
// We specified an ID, and there's already a layer with
// the same ID. Drain the input so that we can look at
// its length and digest.
_, err := io.Copy(ioutil.Discard, multi)
if err != nil && err != io.EOF {
logrus.Debugf("error digesting layer blob %q: %v", digest, err)
return "", -1, err
}
hash = hasher.Sum(nil)
} else {
// Applied the layer, either with a specified ID or a
// new ID. Note the size info and computed digest.
hash = hasher.Sum(nil)
layerdata := storageLayerMetadata{
ExpectedSize: expectedSize,
Digest: "sha256:" + hex.EncodeToString(hash[:]),
Size: counter.Count,
}
if metadata, err := json.Marshal(layerdata); len(metadata) != 0 && err == nil {
s.store.SetMetadata(layer.ID, string(metadata))
}
// Hang on to the new layer's ID.
id = layer.ID
}
// If the ID was a digest, verify that our computed sha256sum
// matches the ID. */
if strings.HasPrefix(digest, "sha256:") && digest != "sha256:"+hex.EncodeToString(hash[:]) {
logrus.Debugf("blob %q digests to %q, rejecting", digest, hex.EncodeToString(hash[:]))
if layer != nil {
// Something's wrong; delete the newly-created layer.
s.store.DeleteLayer(layer.ID)
}
return "", -1, ErrBlobDigestMismatch
}
// If we didn't get a name, we might as well assign one using the hash.
if digest == "" {
digest = "sha256:" + hex.EncodeToString(hash[:])
}
// Record that this blob is a layer.
s.Layers[digest] = id
s.BlobList = append(s.BlobList, digest)
if layer != nil {
logrus.Debugf("blob %q imported as a filesystem layer", digest)
} else {
logrus.Debugf("layer blob %q already present", digest)
}
} else {
// It's just data. Finish scanning it in, check that our
// computed sha256sum matches the digest, and store it, but
// leave it out of the blob-to-layer-ID map so that we can tell
// that it's not a layer.
blob, err := ioutil.ReadAll(multi)
if err != nil && err != io.EOF {
return "", -1, err
}
actualSize = int64(len(blob))
hash = hasher.Sum(nil)
// If the ID was a digest, verify that our computed sha256sum
// matches it. */
if strings.HasPrefix(digest, "sha256:") && digest != "sha256:"+hex.EncodeToString(hash[:]) {
logrus.Debugf("blob %q digests to %q, rejecting", digest, hex.EncodeToString(hash[:]))
return "", -1, ErrBlobDigestMismatch
}
// If we didn't get a name, we might as well assign one using the hash.
if digest == "" {
digest = "sha256:" + hex.EncodeToString(hash[:])
}
// Save the blob for when we Commit().
s.BlobData[digest] = blob
s.BlobList = append(s.BlobList, digest)
logrus.Debugf("blob %q imported as opaque data", digest)
}
return digest, actualSize, nil
}
func (s *storageImage) Commit() error {
if s.ID != "" {
// We started with an image ID, or we've already registered
// this one and gotten one, so no need to do anything more.
if img, err := s.store.GetImage(s.ID); img != nil && err == nil {
return nil
}
}
lastLayer := ""
if len(s.BlobList) > 0 {
for _, blob := range s.BlobList {
if layerID, ok := s.Layers[blob]; ok {
lastLayer = layerID
}
}
}
img, err := s.store.CreateImage(s.ID, nil, lastLayer, "")
if err != nil {
return err
}
logrus.Debugf("created new image ID %q", img.ID)
if s.Tag != "" {
// We started with an image name rather than an ID, so move the
// name to this image.
if err := s.store.SetNames(img.ID, []string{s.Tag}); err != nil {
return err
}
logrus.Debugf("set name of image %q to %q", img.ID, s.Tag)
}
// Save the blob data to disk, and drop the contents from memory.
keys := []string{}
for k, v := range s.BlobData {
if err := s.store.SetImageBigData(img.ID, k, v); err != nil {
return err
}
keys = append(keys, k)
}
for _, key := range keys {
delete(s.BlobData, key)
}
s.ID = img.ID
return nil
}
func (s *storageImage) PutManifest(manifest []byte) error {
if err := s.Commit(); err != nil {
return err
}
defer s.saveMetadata()
return s.store.SetImageBigData(s.ID, "manifest", manifest)
}
func (s *storageImage) PutSignatures(signatures [][]byte) error {
if err := s.Commit(); err != nil {
return err
}
sizes := []int{}
sigblob := []byte{}
for _, sig := range signatures {
sizes = append(sizes, len(sig))
newblob := make([]byte, len(sigblob)+len(sig))
copy(newblob, sigblob)
copy(newblob[len(sigblob):], sig)
sigblob = newblob
}
s.SignatureSizes = sizes
defer s.saveMetadata()
return s.store.SetImageBigData(s.ID, "signatures", sigblob)
}
func (s *storageImage) SupportedManifestMIMETypes() []string {
return nil
}
func (s *storageImage) GetBlob(digest string) (rc io.ReadCloser, n int64, err error) {
if blob, ok := s.BlobData[digest]; ok {
r := bytes.NewReader(blob)
return ioutil.NopCloser(r), r.Size(), nil
}
if _, ok := s.Layers[digest]; !ok {
b, err := s.store.GetImageBigData(s.ID, digest)
if err != nil {
return nil, -1, err
}
r := bytes.NewReader(b)
logrus.Debugf("exporting opaque data as blob %q", digest)
return ioutil.NopCloser(r), r.Size(), nil
}
logrus.Debugf("exporting filesystem layer as blob %q", digest)
return s.diffLayer(s.Layers[digest], true)
}
func (s *storageImage) diffLayer(layerID string, computeSize bool) (rc io.ReadCloser, n int64, err error) {
layer, err := s.store.GetLayer(layerID)
if err != nil {
return nil, -1, err
}
layerMeta := storageLayerMetadata{
ExpectedSize: -1,
}
if layer.Metadata != "" {
if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
logrus.Errorf("error decoding metadata for layer %q: %v", layerID, err)
return nil, -1, err
}
}
if computeSize {
if layerMeta.ExpectedSize == -1 {
n, err = s.store.DiffSize("", layer.ID)
if err != nil {
return nil, -1, err
}
} else {
n = layerMeta.ExpectedSize
}
} else {
n = -1
}
diff, err := s.store.Diff("", layer.ID)
if err != nil {
return nil, -1, err
}
return diff, n, nil
}
func (s *storageImage) GetManifest() (manifest []byte, MIMEType string, err error) {
manifest, err = s.store.GetImageBigData(s.ID, "manifest")
return manifest, "", err
}
func (s *storageImage) GetSignatures() (signatures [][]byte, err error) {
var offset int
if err := s.loadMetadata(); err != nil {
logrus.Errorf("error decoding metadata for image: %v", err)
return nil, err
}
signature, err := s.store.GetImageBigData(s.ID, "signatures")
if err != nil {
return nil, err
}
sigslice := [][]byte{}
for _, length := range s.SignatureSizes {
sigslice = append(sigslice, signature[offset:offset+length])
offset += length
}
if offset != len(signature) {
return nil, fmt.Errorf("signatures data contained %d extra bytes", len(signatures)-offset)
}
return sigslice, nil
}
func (s *storageImage) DeleteImage() error {
if s.ID != "" {
if _, err := s.store.DeleteImage(s.ID, true); err != nil {
return err
}
s.ID = ""
}
return nil
}
func (s *storageImage) Manifest() (manifest []byte, MIMEType string, err error) {
return s.GetManifest()
}
func (s *storageImage) Signatures() (signatures [][]byte, err error) {
return s.GetSignatures()
}
func (s *storageImage) BlobDigests() (digests []string, err error) {
return image.FromSource(s).BlobDigests()
}
func (s *storageImage) Inspect() (info *types.ImageInspectInfo, err error) {
return image.FromSource(s).Inspect()
}

View file

@ -0,0 +1,89 @@
package storage
import (
"github.com/Sirupsen/logrus"
"github.com/containers/image/types"
"github.com/containers/storage/storage"
"github.com/docker/docker/reference"
)
// A storageReference holds a name and/or an ID, which is a 32-byte value
// hex-encoded into a 64-character string.
type storageReference struct {
store storage.Store
transport *storageTransport
reference string
id string
}
func newReference(store storage.Store, transport *storageTransport, reference, id string) *storageReference {
return &storageReference{
store: store,
transport: transport,
reference: reference,
id: id,
}
}
// Resolve the reference's name to an image ID in the storage library if
// there's already one present with the same name or ID.
func (s *storageReference) ID() string {
if s.id == "" {
image, err := s.store.GetImage(s.reference)
if image != nil && err == nil {
s.id = image.ID
}
}
return s.id
}
func (s *storageReference) Transport() types.ImageTransport {
return s.transport
}
func (s *storageReference) DockerReference() reference.Named {
return nil
}
func (s *storageReference) StringWithinTransport() string {
if s.reference == "" {
return s.id
}
return s.reference
}
func (s *storageReference) PolicyConfigurationIdentity() string {
if s.reference == "" {
return s.id
}
return s.reference
}
func (s *storageReference) PolicyConfigurationNamespaces() []string {
return nil
}
func (s *storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
return newImage(s), nil
}
func (s *storageReference) DeleteImage(ctx *types.SystemContext) error {
layers, err := s.store.DeleteImage(s.ID(), true)
if err == nil {
logrus.Debugf("deleted image %q", s.ID())
s.id = ""
for _, layer := range layers {
logrus.Debugf("deleted layer %q", layer)
}
}
s.id = ""
return err
}
func (s *storageReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
return newImageSource(s), nil
}
func (s *storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
return newImageDestination(s), nil
}

View file

@ -0,0 +1,79 @@
package storage
import (
"errors"
"regexp"
"github.com/Sirupsen/logrus"
"github.com/containers/image/types"
"github.com/containers/storage/storage"
)
var (
// Transport is an ImageTransport that uses a default storage.Store or
// one that's it's explicitly told to use.
Transport StoreTransport = &storageTransport{}
// ErrInvalidReference is returned when ParseReference() is passed an
// empty reference.
ErrInvalidReference = errors.New("invalid reference")
idRegexp = regexp.MustCompile("^(sha256:)?([0-9a-fA-F]{64})$")
)
// StoreTransport is an ImageTransport that uses a storage.Store to parse
// references, either its own or one that it's told to use.
type StoreTransport interface {
types.ImageTransport
SetStore(storage.Store)
ParseStoreReference(store storage.Store, reference string) (types.ImageReference, error)
}
type storageTransport struct {
store storage.Store
}
func (s *storageTransport) Name() string {
return "oci-storage"
}
// SetStore sets the Store object which the Transport will use for parsing
// references when a Store is not directly specified. If one is not set, the
// library will attempt to initialize one with default settings when a
// reference needs to be parsed. Calling SetStore does not affect previously
// parsed references.
func (s *storageTransport) SetStore(store storage.Store) {
s.store = store
}
// ParseStoreReference takes a name or an ID, tries to figure out which it is
// relative to a given store, and returns it in a reference object.
func (s *storageTransport) ParseStoreReference(store storage.Store, reference string) (types.ImageReference, error) {
if reference == "" {
return nil, ErrInvalidReference
}
id := ""
if matches := idRegexp.FindStringSubmatch(reference); len(matches) > 1 {
id = matches[len(matches)-1]
logrus.Debugf("parsed reference %q into ID %q", reference, id)
reference = ""
} else {
logrus.Debugf("treating reference %q as a name", reference)
}
return newReference(store, s, reference, id), nil
}
// ParseReference initializes the storage library and then takes a name or an
// ID, tries to figure out which it is, and returns it in a reference object.
func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
if s.store == nil {
store, err := storage.MakeStore("", "", "", []string{}, nil, nil)
if err != nil {
return nil, err
}
s.store = store
}
return s.ParseStoreReference(s.store, reference)
}
func (s *storageTransport) ValidatePolicyConfigurationScope(scope string) error {
return nil
}

View file

@ -8,6 +8,7 @@ import (
"github.com/containers/image/docker"
ociLayout "github.com/containers/image/oci/layout"
"github.com/containers/image/openshift"
"github.com/containers/image/storage"
"github.com/containers/image/types"
)
@ -21,6 +22,7 @@ func init() {
docker.Transport,
ociLayout.Transport,
openshift.Transport,
storage.Transport,
} {
name := t.Name()
if _, ok := KnownTransports[name]; ok {

View file

@ -1,7 +1,7 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
@ -176,13 +176,13 @@
END OF TERMS AND CONDITIONS
Copyright 2014-2016 Docker, Inc.
Copyright 2013-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

19
vendor/github.com/containers/storage/NOTICE generated vendored Normal file
View file

@ -0,0 +1,19 @@
Docker
Copyright 2012-2016 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

View file

@ -0,0 +1,586 @@
// +build linux
/*
aufs driver directory structure
.
layers // Metadata of layers
1
2
3
diff // Content of the layer
1 // Contains layers that need to be mounted for the id
2
3
mnt // Mount points for the rw layers to be mounted
1
2
3
*/
package aufs
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
mountpk "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/stringid"
"github.com/opencontainers/runc/libcontainer/label"
rsystem "github.com/opencontainers/runc/libcontainer/system"
)
var (
// ErrAufsNotSupported is returned if aufs is not supported by the host.
ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems")
// ErrAufsNested means aufs cannot be used bc we are in a user namespace
ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace")
backingFs = "<unknown>"
enableDirpermLock sync.Once
enableDirperm bool
)
func init() {
graphdriver.Register("aufs", Init)
}
// Driver contains information about the filesystem mounted.
type Driver struct {
sync.Mutex
root string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
pathCacheLock sync.Mutex
pathCache map[string]string
}
// Init returns a new AUFS driver.
// An error is returned if AUFS is not supported.
func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
// Try to load the aufs kernel module
if err := supportsAufs(); err != nil {
return nil, graphdriver.ErrNotSupported
}
fsMagic, err := graphdriver.GetFSMagic(root)
if err != nil {
return nil, err
}
if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
backingFs = fsName
}
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs:
logrus.Errorf("AUFS is not supported over %s", backingFs)
return nil, graphdriver.ErrIncompatibleFS
}
paths := []string{
"mnt",
"diff",
"layers",
}
a := &Driver{
root: root,
uidMaps: uidMaps,
gidMaps: gidMaps,
pathCache: make(map[string]string),
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
// Create the root aufs driver dir and return
// if it already exists
// If not populate the dir structure
if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil {
if os.IsExist(err) {
return a, nil
}
return nil, err
}
if err := mountpk.MakePrivate(root); err != nil {
return nil, err
}
// Populate the dir structure
for _, p := range paths {
if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil {
return nil, err
}
}
return a, nil
}
// Return a nil error if the kernel supports aufs
// We cannot modprobe because inside dind modprobe fails
// to run
func supportsAufs() error {
// We can try to modprobe aufs first before looking at
// proc/filesystems for when aufs is supported
exec.Command("modprobe", "aufs").Run()
if rsystem.RunningInUserNS() {
return ErrAufsNested
}
f, err := os.Open("/proc/filesystems")
if err != nil {
return err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if strings.Contains(s.Text(), "aufs") {
return nil
}
}
return ErrAufsNotSupported
}
func (a *Driver) rootPath() string {
return a.root
}
func (*Driver) String() string {
return "aufs"
}
// Status returns current information about the filesystem such as root directory, number of directories mounted, etc.
func (a *Driver) Status() [][2]string {
ids, _ := loadIds(path.Join(a.rootPath(), "layers"))
return [][2]string{
{"Root Dir", a.rootPath()},
{"Backing Filesystem", backingFs},
{"Dirs", fmt.Sprintf("%d", len(ids))},
{"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())},
}
}
// GetMetadata not implemented
func (a *Driver) GetMetadata(id string) (map[string]string, error) {
return nil, nil
}
// Exists returns true if the given id is registered with
// this driver
func (a *Driver) Exists(id string) bool {
if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil {
return false
}
return true
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (a *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return a.Create(id, parent, mountLabel, storageOpt)
}
// Create three folders for each id
// mnt, layers, and diff
func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
if len(storageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for aufs")
}
if err := a.createDirsFor(id); err != nil {
return err
}
// Write the layers metadata
f, err := os.Create(path.Join(a.rootPath(), "layers", id))
if err != nil {
return err
}
defer f.Close()
if parent != "" {
ids, err := getParentIds(a.rootPath(), parent)
if err != nil {
return err
}
if _, err := fmt.Fprintln(f, parent); err != nil {
return err
}
for _, i := range ids {
if _, err := fmt.Fprintln(f, i); err != nil {
return err
}
}
}
return nil
}
// createDirsFor creates two directories for the given id.
// mnt and diff
func (a *Driver) createDirsFor(id string) error {
paths := []string{
"mnt",
"diff",
}
rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps)
if err != nil {
return err
}
// Directory permission is 0755.
// The path of directories are <aufs_root_path>/mnt/<image_id>
// and <aufs_root_path>/diff/<image_id>
for _, p := range paths {
if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil {
return err
}
}
return nil
}
// Remove will unmount and remove the given id.
func (a *Driver) Remove(id string) error {
a.pathCacheLock.Lock()
mountpoint, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
if !exists {
mountpoint = a.getMountpoint(id)
}
if err := a.unmount(mountpoint); err != nil {
// no need to return here, we can still try to remove since the `Rename` will fail below if still mounted
logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err)
}
// Atomically remove each directory in turn by first moving it out of the
// way (so that docker doesn't find it anymore) before doing removal of
// the whole tree.
tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id))
if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) {
return err
}
defer os.RemoveAll(tmpMntPath)
tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id))
if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) {
return err
}
defer os.RemoveAll(tmpDiffpath)
// Remove the layers file for the id
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
return err
}
a.pathCacheLock.Lock()
delete(a.pathCache, id)
a.pathCacheLock.Unlock()
return nil
}
// Get returns the rootfs path for the id.
// This will mount the dir at it's given path
func (a *Driver) Get(id, mountLabel string) (string, error) {
parents, err := a.getParentLayerPaths(id)
if err != nil && !os.IsNotExist(err) {
return "", err
}
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
if !exists {
m = a.getDiffPath(id)
if len(parents) > 0 {
m = a.getMountpoint(id)
}
}
if count := a.ctr.Increment(m); count > 1 {
return m, nil
}
// If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data
if len(parents) > 0 {
if err := a.mount(id, m, mountLabel, parents); err != nil {
return "", err
}
}
a.pathCacheLock.Lock()
a.pathCache[id] = m
a.pathCacheLock.Unlock()
return m, nil
}
// Put unmounts and updates list of active mounts.
func (a *Driver) Put(id string) error {
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
if !exists {
m = a.getMountpoint(id)
a.pathCache[id] = m
}
a.pathCacheLock.Unlock()
if count := a.ctr.Decrement(m); count > 0 {
return nil
}
err := a.unmount(m)
if err != nil {
logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
}
return err
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
// AUFS doesn't need the parent layer to produce a diff.
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
Compression: archive.Uncompressed,
ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir},
UIDMaps: a.uidMaps,
GIDMaps: a.gidMaps,
})
}
type fileGetNilCloser struct {
storage.FileGetter
}
func (f fileGetNilCloser) Close() error {
return nil
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
// contains files for the layer differences. Used for direct access for tar-split.
func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
p := path.Join(a.rootPath(), "diff", id)
return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
}
func (a *Driver) applyDiff(id string, diff archive.Reader) error {
return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
UIDMaps: a.uidMaps,
GIDMaps: a.gidMaps,
})
}
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
// AUFS doesn't need the parent layer to calculate the diff size.
return directory.Size(path.Join(a.rootPath(), "diff", id))
}
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
// AUFS doesn't need the parent id to apply the diff.
if err = a.applyDiff(id, diff); err != nil {
return
}
return a.DiffSize(id, parent)
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
// AUFS doesn't have snapshots, so we need to get changes from all parent
// layers.
layers, err := a.getParentLayerPaths(id)
if err != nil {
return nil, err
}
return archive.Changes(layers, path.Join(a.rootPath(), "diff", id))
}
func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
parentIds, err := getParentIds(a.rootPath(), id)
if err != nil {
return nil, err
}
layers := make([]string, len(parentIds))
// Get the diff paths for all the parent ids
for i, p := range parentIds {
layers[i] = path.Join(a.rootPath(), "diff", p)
}
return layers, nil
}
func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
a.Lock()
defer a.Unlock()
// If the id is mounted or we get an error return
if mounted, err := a.mounted(target); err != nil || mounted {
return err
}
rw := a.getDiffPath(id)
if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
}
return nil
}
func (a *Driver) unmount(mountPath string) error {
a.Lock()
defer a.Unlock()
if mounted, err := a.mounted(mountPath); err != nil || !mounted {
return err
}
if err := Unmount(mountPath); err != nil {
return err
}
return nil
}
func (a *Driver) mounted(mountpoint string) (bool, error) {
return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint)
}
// Cleanup aufs and unmount all mountpoints
func (a *Driver) Cleanup() error {
var dirs []string
if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
dirs = append(dirs, path)
return nil
}); err != nil {
return err
}
for _, m := range dirs {
if err := a.unmount(m); err != nil {
logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err)
}
}
return mountpk.Unmount(a.root)
}
func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
defer func() {
if err != nil {
Unmount(target)
}
}()
// Mount options are clipped to page size(4096 bytes). If there are more
// layers then these are remounted individually using append.
offset := 54
if useDirperm() {
offset += len("dirperm1")
}
b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
firstMount := true
i := 0
for {
for ; i < len(ro); i++ {
layer := fmt.Sprintf(":%s=ro+wh", ro[i])
if firstMount {
if bp+len(layer) > len(b) {
break
}
bp += copy(b[bp:], layer)
} else {
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil {
return
}
}
}
if firstMount {
opts := "dio,xino=/dev/shm/aufs.xino"
if useDirperm() {
opts += ",dirperm1"
}
data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
if err = mount("none", target, "aufs", 0, data); err != nil {
return
}
firstMount = false
}
if i == len(ro) {
break
}
}
return
}
// useDirperm checks dirperm1 mount option can be used with the current
// version of aufs.
func useDirperm() bool {
enableDirpermLock.Do(func() {
base, err := ioutil.TempDir("", "docker-aufs-base")
if err != nil {
logrus.Errorf("error checking dirperm1: %v", err)
return
}
defer os.RemoveAll(base)
union, err := ioutil.TempDir("", "docker-aufs-union")
if err != nil {
logrus.Errorf("error checking dirperm1: %v", err)
return
}
defer os.RemoveAll(union)
opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base)
if err := mount("none", union, "aufs", 0, opts); err != nil {
return
}
enableDirperm = true
if err := Unmount(union); err != nil {
logrus.Errorf("error checking dirperm1: failed to unmount %v", err)
}
})
return enableDirperm
}

View file

@ -0,0 +1,64 @@
// +build linux
package aufs
import (
"bufio"
"io/ioutil"
"os"
"path"
)
// Return all the directories
func loadIds(root string) ([]string, error) {
dirs, err := ioutil.ReadDir(root)
if err != nil {
return nil, err
}
out := []string{}
for _, d := range dirs {
if !d.IsDir() {
out = append(out, d.Name())
}
}
return out, nil
}
// Read the layers file for the current id and return all the
// layers represented by new lines in the file
//
// If there are no lines in the file then the id has no parent
// and an empty slice is returned.
func getParentIds(root, id string) ([]string, error) {
f, err := os.Open(path.Join(root, "layers", id))
if err != nil {
return nil, err
}
defer f.Close()
out := []string{}
s := bufio.NewScanner(f)
for s.Scan() {
if t := s.Text(); t != "" {
out = append(out, s.Text())
}
}
return out, s.Err()
}
func (a *Driver) getMountpoint(id string) string {
return path.Join(a.mntPath(), id)
}
func (a *Driver) mntPath() string {
return path.Join(a.rootPath(), "mnt")
}
func (a *Driver) getDiffPath(id string) string {
return path.Join(a.diffPath(), id)
}
func (a *Driver) diffPath() string {
return path.Join(a.rootPath(), "diff")
}

View file

@ -0,0 +1,21 @@
// +build linux
package aufs
import (
"os/exec"
"syscall"
"github.com/Sirupsen/logrus"
)
// Unmount the target specified.
func Unmount(target string) error {
if err := exec.Command("auplink", target, "flush").Run(); err != nil {
logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err)
}
if err := syscall.Unmount(target, 0); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,7 @@
package aufs
import "syscall"
func mount(source string, target string, fstype string, flags uintptr, data string) error {
return syscall.Mount(source, target, fstype, flags, data)
}

View file

@ -0,0 +1,12 @@
// +build !linux
package aufs
import "errors"
// MsRemount declared to specify a non-linux system mount.
const MsRemount = 0
func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
return errors.New("mount is not implemented on this platform")
}

View file

@ -0,0 +1,520 @@
// +build linux
package btrfs
/*
#include <stdlib.h>
#include <dirent.h>
#include <btrfs/ioctl.h>
#include <btrfs/ctree.h>
static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) {
snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value);
}
*/
import "C"
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"syscall"
"unsafe"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/docker/go-units"
"github.com/opencontainers/runc/libcontainer/label"
)
func init() {
graphdriver.Register("btrfs", Init)
}
var (
quotaEnabled = false
userDiskQuota = false
)
type btrfsOptions struct {
minSpace uint64
size uint64
}
// Init returns a new BTRFS driver.
// An error is returned if BTRFS is not supported.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
}
if fsMagic != graphdriver.FsMagicBtrfs {
return nil, graphdriver.ErrPrerequisites
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
return nil, err
}
if err := mount.MakePrivate(home); err != nil {
return nil, err
}
opt, err := parseOptions(options)
if err != nil {
return nil, err
}
if userDiskQuota {
if err := subvolEnableQuota(home); err != nil {
return nil, err
}
quotaEnabled = true
}
driver := &Driver{
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
options: opt,
}
return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil
}
func parseOptions(opt []string) (btrfsOptions, error) {
var options btrfsOptions
for _, option := range opt {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return options, err
}
key = strings.ToLower(key)
switch key {
case "btrfs.min_space":
minSpace, err := units.RAMInBytes(val)
if err != nil {
return options, err
}
userDiskQuota = true
options.minSpace = uint64(minSpace)
default:
return options, fmt.Errorf("Unknown option %s", key)
}
}
return options, nil
}
// Driver contains information about the filesystem mounted.
type Driver struct {
//root of the file system
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
options btrfsOptions
}
// String prints the name of the driver (btrfs).
func (d *Driver) String() string {
return "btrfs"
}
// Status returns current driver information in a two dimensional string array.
// Output contains "Build Version" and "Library Version" of the btrfs libraries used.
// Version information can be used to check compatibility with your kernel.
func (d *Driver) Status() [][2]string {
status := [][2]string{}
if bv := btrfsBuildVersion(); bv != "-" {
status = append(status, [2]string{"Build Version", bv})
}
if lv := btrfsLibVersion(); lv != -1 {
status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)})
}
return status
}
// GetMetadata returns empty metadata for this driver.
func (d *Driver) GetMetadata(id string) (map[string]string, error) {
return nil, nil
}
// Cleanup unmounts the home directory.
func (d *Driver) Cleanup() error {
if quotaEnabled {
if err := subvolDisableQuota(d.home); err != nil {
return err
}
}
return mount.Unmount(d.home)
}
func free(p *C.char) {
C.free(unsafe.Pointer(p))
}
func openDir(path string) (*C.DIR, error) {
Cpath := C.CString(path)
defer free(Cpath)
dir := C.opendir(Cpath)
if dir == nil {
return nil, fmt.Errorf("Can't open dir")
}
return dir, nil
}
func closeDir(dir *C.DIR) {
if dir != nil {
C.closedir(dir)
}
}
func getDirFd(dir *C.DIR) uintptr {
return uintptr(C.dirfd(dir))
}
func subvolCreate(path, name string) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_vol_args
for i, c := range []byte(name) {
args.name[i] = C.char(c)
}
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error())
}
return nil
}
func subvolSnapshot(src, dest, name string) error {
srcDir, err := openDir(src)
if err != nil {
return err
}
defer closeDir(srcDir)
destDir, err := openDir(dest)
if err != nil {
return err
}
defer closeDir(destDir)
var args C.struct_btrfs_ioctl_vol_args_v2
args.fd = C.__s64(getDirFd(srcDir))
var cs = C.CString(name)
C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
C.free(unsafe.Pointer(cs))
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error())
}
return nil
}
func isSubvolume(p string) (bool, error) {
var bufStat syscall.Stat_t
if err := syscall.Lstat(p, &bufStat); err != nil {
return false, err
}
// return true if it is a btrfs subvolume
return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil
}
func subvolDelete(dirpath, name string) error {
dir, err := openDir(dirpath)
if err != nil {
return err
}
defer closeDir(dir)
fullPath := path.Join(dirpath, name)
var args C.struct_btrfs_ioctl_vol_args
// walk the btrfs subvolumes
walkSubvolumes := func(p string, f os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) && p != fullPath {
// missing most likely because the path was a subvolume that got removed in the previous iteration
// since it's gone anyway, we don't care
return nil
}
return fmt.Errorf("error walking subvolumes: %v", err)
}
// we want to check children only so skip itself
// it will be removed after the filepath walk anyways
if f.IsDir() && p != fullPath {
sv, err := isSubvolume(p)
if err != nil {
return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err)
}
if sv {
if err := subvolDelete(path.Dir(p), f.Name()); err != nil {
return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err)
}
}
}
return nil
}
if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil {
return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err)
}
// all subvolumes have been removed
// now remove the one originally passed in
for i, c := range []byte(name) {
args.name[i] = C.char(c)
}
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error())
}
return nil
}
func subvolEnableQuota(path string) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_quota_ctl_args
args.cmd = C.BTRFS_QUOTA_CTL_ENABLE
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error())
}
return nil
}
func subvolDisableQuota(path string) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_quota_ctl_args
args.cmd = C.BTRFS_QUOTA_CTL_DISABLE
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error())
}
return nil
}
func subvolRescanQuota(path string) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_quota_rescan_args
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error())
}
return nil
}
func subvolLimitQgroup(path string, size uint64) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_qgroup_limit_args
args.lim.max_referenced = C.__u64(size)
args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
}
return nil
}
func (d *Driver) subvolumesDir() string {
return path.Join(d.home, "subvolumes")
}
func (d *Driver) subvolumesDirID(id string) string {
return path.Join(d.subvolumesDir(), id)
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.Create(id, parent, mountLabel, storageOpt)
}
// Create the filesystem with given id.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
subvolumes := path.Join(d.home, "subvolumes")
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return err
}
if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil {
return err
}
if parent == "" {
if err := subvolCreate(subvolumes, id); err != nil {
return err
}
} else {
parentDir := d.subvolumesDirID(parent)
st, err := os.Stat(parentDir)
if err != nil {
return err
}
if !st.IsDir() {
return fmt.Errorf("%s: not a directory", parentDir)
}
if err := subvolSnapshot(parentDir, subvolumes, id); err != nil {
return err
}
}
if _, ok := storageOpt["size"]; ok {
driver := &Driver{}
if err := d.parseStorageOpt(storageOpt, driver); err != nil {
return err
}
if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
return err
}
}
// if we have a remapped root (user namespaces enabled), change the created snapshot
// dir ownership to match
if rootUID != 0 || rootGID != 0 {
if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil {
return err
}
}
return label.Relabel(path.Join(subvolumes, id), mountLabel, false)
}
// Parse btrfs storage options
func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error {
// Read size to change the subvolume disk quota per container
for key, val := range storageOpt {
key := strings.ToLower(key)
switch key {
case "size":
size, err := units.RAMInBytes(val)
if err != nil {
return err
}
driver.options.size = uint64(size)
default:
return fmt.Errorf("Unknown option %s", key)
}
}
return nil
}
// Set btrfs storage size
func (d *Driver) setStorageSize(dir string, driver *Driver) error {
if driver.options.size <= 0 {
return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size)))
}
if d.options.minSpace > 0 && driver.options.size < d.options.minSpace {
return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace)))
}
if !quotaEnabled {
if err := subvolEnableQuota(d.home); err != nil {
return err
}
quotaEnabled = true
}
if err := subvolLimitQgroup(dir, driver.options.size); err != nil {
return err
}
return nil
}
// Remove the filesystem with given id.
func (d *Driver) Remove(id string) error {
dir := d.subvolumesDirID(id)
if _, err := os.Stat(dir); err != nil {
return err
}
if err := subvolDelete(d.subvolumesDir(), id); err != nil {
return err
}
if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) {
return err
}
if err := subvolRescanQuota(d.home); err != nil {
return err
}
return nil
}
// Get the requested filesystem id.
func (d *Driver) Get(id, mountLabel string) (string, error) {
dir := d.subvolumesDirID(id)
st, err := os.Stat(dir)
if err != nil {
return "", err
}
if !st.IsDir() {
return "", fmt.Errorf("%s: not a directory", dir)
}
return dir, nil
}
// Put is not implemented for BTRFS as there is no cleanup required for the id.
func (d *Driver) Put(id string) error {
// Get() creates no runtime resources (like e.g. mounts)
// so this doesn't need to do anything.
return nil
}
// Exists checks if the id exists in the filesystem.
func (d *Driver) Exists(id string) bool {
dir := d.subvolumesDirID(id)
_, err := os.Stat(dir)
return err == nil
}

View file

@ -0,0 +1,3 @@
// +build !linux !cgo
package btrfs

View file

@ -0,0 +1,26 @@
// +build linux,!btrfs_noversion
package btrfs
/*
#include <btrfs/version.h>
// around version 3.16, they did not define lib version yet
#ifndef BTRFS_LIB_VERSION
#define BTRFS_LIB_VERSION -1
#endif
// upstream had removed it, but now it will be coming back
#ifndef BTRFS_BUILD_VERSION
#define BTRFS_BUILD_VERSION "-"
#endif
*/
import "C"
func btrfsBuildVersion() string {
return string(C.BTRFS_BUILD_VERSION)
}
func btrfsLibVersion() int {
return int(C.BTRFS_LIB_VERSION)
}

View file

@ -0,0 +1,14 @@
// +build linux,btrfs_noversion
package btrfs
// TODO(vbatts) remove this work-around once supported linux distros are on
// btrfs utilities of >= 3.16.1
func btrfsBuildVersion() string {
return "-"
}
func btrfsLibVersion() int {
return -1
}

View file

@ -0,0 +1,67 @@
package graphdriver
import "sync"
type minfo struct {
check bool
count int
}
// RefCounter is a generic counter for use by graphdriver Get/Put calls
type RefCounter struct {
counts map[string]*minfo
mu sync.Mutex
checker Checker
}
// NewRefCounter returns a new RefCounter
func NewRefCounter(c Checker) *RefCounter {
return &RefCounter{
checker: c,
counts: make(map[string]*minfo),
}
}
// Increment increaes the ref count for the given id and returns the current count
func (c *RefCounter) Increment(path string) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count++
c.mu.Unlock()
return m.count
}
// Decrement decreases the ref count for the given id and returns the current count
func (c *RefCounter) Decrement(path string) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count--
c.mu.Unlock()
return m.count
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,106 @@
package devmapper
// Definition of struct dm_task and sub structures (from lvm2)
//
// struct dm_ioctl {
// /*
// * The version number is made up of three parts:
// * major - no backward or forward compatibility,
// * minor - only backwards compatible,
// * patch - both backwards and forwards compatible.
// *
// * All clients of the ioctl interface should fill in the
// * version number of the interface that they were
// * compiled with.
// *
// * All recognized ioctl commands (ie. those that don't
// * return -ENOTTY) fill out this field, even if the
// * command failed.
// */
// uint32_t version[3]; /* in/out */
// uint32_t data_size; /* total size of data passed in
// * including this struct */
// uint32_t data_start; /* offset to start of data
// * relative to start of this struct */
// uint32_t target_count; /* in/out */
// int32_t open_count; /* out */
// uint32_t flags; /* in/out */
// /*
// * event_nr holds either the event number (input and output) or the
// * udev cookie value (input only).
// * The DM_DEV_WAIT ioctl takes an event number as input.
// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls
// * use the field as a cookie to return in the DM_COOKIE
// * variable with the uevents they issue.
// * For output, the ioctls return the event number, not the cookie.
// */
// uint32_t event_nr; /* in/out */
// uint32_t padding;
// uint64_t dev; /* in/out */
// char name[DM_NAME_LEN]; /* device name */
// char uuid[DM_UUID_LEN]; /* unique identifier for
// * the block device */
// char data[7]; /* padding or data */
// };
// struct target {
// uint64_t start;
// uint64_t length;
// char *type;
// char *params;
// struct target *next;
// };
// typedef enum {
// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */
// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */
// } dm_add_node_t;
// struct dm_task {
// int type;
// char *dev_name;
// char *mangled_dev_name;
// struct target *head, *tail;
// int read_only;
// uint32_t event_nr;
// int major;
// int minor;
// int allow_default_major_fallback;
// uid_t uid;
// gid_t gid;
// mode_t mode;
// uint32_t read_ahead;
// uint32_t read_ahead_flags;
// union {
// struct dm_ioctl *v4;
// } dmi;
// char *newname;
// char *message;
// char *geometry;
// uint64_t sector;
// int no_flush;
// int no_open_count;
// int skip_lockfs;
// int query_inactive_table;
// int suppress_identical_reload;
// dm_add_node_t add_node;
// uint64_t existing_table_size;
// int cookie_set;
// int new_uuid;
// int secure_data;
// int retry_remove;
// int enable_checks;
// int expected_errno;
// char *uuid;
// char *mangled_uuid;
// };
//

View file

@ -0,0 +1,226 @@
// +build linux
package devmapper
import (
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/devicemapper"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/docker/go-units"
)
func init() {
graphdriver.Register("devicemapper", Init)
}
// Driver contains the device set mounted and the home directory
type Driver struct {
*DeviceSet
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
}
// Init creates a driver with the given home and the set of options.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps)
if err != nil {
return nil, err
}
if err := mount.MakePrivate(home); err != nil {
return nil, err
}
d := &Driver{
DeviceSet: deviceSet,
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
}
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
}
func (d *Driver) String() string {
return "devicemapper"
}
// Status returns the status about the driver in a printable format.
// Information returned contains Pool Name, Data File, Metadata file, disk usage by
// the data and metadata, etc.
func (d *Driver) Status() [][2]string {
s := d.DeviceSet.Status()
status := [][2]string{
{"Pool Name", s.PoolName},
{"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))},
{"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))},
{"Backing Filesystem", s.BaseDeviceFS},
{"Data file", s.DataFile},
{"Metadata file", s.MetadataFile},
{"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))},
{"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))},
{"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))},
{"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))},
{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))},
{"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))},
{"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))},
{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
{"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
{"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)},
}
if len(s.DataLoopback) > 0 {
status = append(status, [2]string{"Data loop file", s.DataLoopback})
}
if len(s.MetadataLoopback) > 0 {
status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback})
}
if vStr, err := devicemapper.GetLibraryVersion(); err == nil {
status = append(status, [2]string{"Library Version", vStr})
}
return status
}
// GetMetadata returns a map of information about the device.
func (d *Driver) GetMetadata(id string) (map[string]string, error) {
m, err := d.DeviceSet.exportDeviceMetadata(id)
if err != nil {
return nil, err
}
metadata := make(map[string]string)
metadata["DeviceId"] = strconv.Itoa(m.deviceID)
metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10)
metadata["DeviceName"] = m.deviceName
return metadata, nil
}
// Cleanup unmounts a device.
func (d *Driver) Cleanup() error {
err := d.DeviceSet.Shutdown(d.home)
if err2 := mount.Unmount(d.home); err == nil {
err = err2
}
return err
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.Create(id, parent, mountLabel, storageOpt)
}
// Create adds a device with a given id and the parent.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil {
return err
}
return nil
}
// Remove removes a device with a given id, unmounts the filesystem.
func (d *Driver) Remove(id string) error {
if !d.DeviceSet.HasDevice(id) {
// Consider removing a non-existing device a no-op
// This is useful to be able to progress on container removal
// if the underlying device has gone away due to earlier errors
return nil
}
// This assumes the device has been properly Get/Put:ed and thus is unmounted
if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
return err
}
mp := path.Join(d.home, "mnt", id)
if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// Get mounts a device with given id into the root filesystem
func (d *Driver) Get(id, mountLabel string) (string, error) {
mp := path.Join(d.home, "mnt", id)
rootFs := path.Join(mp, "rootfs")
if count := d.ctr.Increment(mp); count > 1 {
return rootFs, nil
}
uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
d.ctr.Decrement(mp)
return "", err
}
// Create the target directories if they don't exist
if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) {
d.ctr.Decrement(mp)
return "", err
}
if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
d.ctr.Decrement(mp)
return "", err
}
// Mount the device
if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil {
d.ctr.Decrement(mp)
return "", err
}
if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
}
idFile := path.Join(mp, "id")
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
// Create an "id" file with the container/image id in it to help reconstruct this in case
// of later problems
if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
}
}
return rootFs, nil
}
// Put unmounts a device and removes it.
func (d *Driver) Put(id string) error {
mp := path.Join(d.home, "mnt", id)
if count := d.ctr.Decrement(mp); count > 0 {
return nil
}
err := d.DeviceSet.UnmountDevice(id, mp)
if err != nil {
logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err)
}
return err
}
// Exists checks to see if the device exists.
func (d *Driver) Exists(id string) bool {
return d.DeviceSet.HasDevice(id)
}

View file

@ -0,0 +1,89 @@
// +build linux
package devmapper
import (
"bytes"
"fmt"
"os"
"path/filepath"
"syscall"
)
// FIXME: this is copy-pasted from the aufs driver.
// It should be moved into the core.
// Mounted returns true if a mount point exists.
func Mounted(mountpoint string) (bool, error) {
mntpoint, err := os.Stat(mountpoint)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
parent, err := os.Stat(filepath.Join(mountpoint, ".."))
if err != nil {
return false, err
}
mntpointSt := mntpoint.Sys().(*syscall.Stat_t)
parentSt := parent.Sys().(*syscall.Stat_t)
return mntpointSt.Dev != parentSt.Dev, nil
}
type probeData struct {
fsName string
magic string
offset uint64
}
// ProbeFsType returns the filesystem name for the given device id.
func ProbeFsType(device string) (string, error) {
probes := []probeData{
{"btrfs", "_BHRfS_M", 0x10040},
{"ext4", "\123\357", 0x438},
{"xfs", "XFSB", 0},
}
maxLen := uint64(0)
for _, p := range probes {
l := p.offset + uint64(len(p.magic))
if l > maxLen {
maxLen = l
}
}
file, err := os.Open(device)
if err != nil {
return "", err
}
defer file.Close()
buffer := make([]byte, maxLen)
l, err := file.Read(buffer)
if err != nil {
return "", err
}
if uint64(l) != maxLen {
return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device)
}
for _, p := range probes {
if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) {
return p.fsName, nil
}
}
return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device)
}
func joinMountOptions(a, b string) string {
if a == "" {
return b
}
if b == "" {
return a
}
return a + "," + b
}

243
vendor/github.com/containers/storage/drivers/driver.go generated vendored Normal file
View file

@ -0,0 +1,243 @@
package graphdriver
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
)
// FsMagic unsigned id of the filesystem in use.
type FsMagic uint32
const (
// FsMagicUnsupported is a predefined constant value other than a valid filesystem id.
FsMagicUnsupported = FsMagic(0x00000000)
)
var (
// All registered drivers
drivers map[string]InitFunc
// ErrNotSupported returned when driver is not supported.
ErrNotSupported = errors.New("driver not supported")
// ErrPrerequisites retuned when driver does not meet prerequisites.
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
// ErrIncompatibleFS returned when file system is not supported.
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
)
// InitFunc initializes the storage driver.
type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
// ProtoDriver defines the basic capabilities of a driver.
// This interface exists solely to be a minimum set of methods
// for client code which choose not to implement the entire Driver
// interface and use the NaiveDiffDriver wrapper constructor.
//
// Use of ProtoDriver directly by client code is not recommended.
type ProtoDriver interface {
// String returns a string representation of this driver.
String() string
// CreateReadWrite creates a new, empty filesystem layer that is ready
// to be used as the storage for a container.
CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error
// Create creates a new, empty, filesystem layer with the
// specified id and parent and mountLabel. Parent and mountLabel may be "".
Create(id, parent, mountLabel string, storageOpt map[string]string) error
// Remove attempts to remove the filesystem layer with this id.
Remove(id string) error
// Get returns the mountpoint for the layered filesystem referred
// to by this id. You can optionally specify a mountLabel or "".
// Returns the absolute path to the mounted layered filesystem.
Get(id, mountLabel string) (dir string, err error)
// Put releases the system resources for the specified id,
// e.g, unmounting layered filesystem.
Put(id string) error
// Exists returns whether a filesystem layer with the specified
// ID exists on this driver.
Exists(id string) bool
// Status returns a set of key-value pairs which give low
// level diagnostic status about this driver.
Status() [][2]string
// Returns a set of key-value pairs which give low level information
// about the image/container driver is managing.
GetMetadata(id string) (map[string]string, error)
// Cleanup performs necessary tasks to release resources
// held by the driver, e.g., unmounting all layered filesystems
// known to this driver.
Cleanup() error
}
// Driver is the interface for layered/snapshot file system drivers.
type Driver interface {
ProtoDriver
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
Diff(id, parent string) (archive.Archive, error)
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The archive.Reader must be an uncompressed stream.
ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
DiffSize(id, parent string) (size int64, err error)
}
// DiffGetterDriver is the interface for layered file system drivers that
// provide a specialized function for getting file contents for tar-split.
type DiffGetterDriver interface {
Driver
// DiffGetter returns an interface to efficiently retrieve the contents
// of files in a layer.
DiffGetter(id string) (FileGetCloser, error)
}
// FileGetCloser extends the storage.FileGetter interface with a Close method
// for cleaning up.
type FileGetCloser interface {
storage.FileGetter
// Close cleans up any resources associated with the FileGetCloser.
Close() error
}
// Checker makes checks on specified filesystems.
type Checker interface {
// IsMounted returns true if the provided path is mounted for the specific checker
IsMounted(path string) bool
}
func init() {
drivers = make(map[string]InitFunc)
}
// Register registers an InitFunc for the driver.
func Register(name string, initFunc InitFunc) error {
if _, exists := drivers[name]; exists {
return fmt.Errorf("Name already registered %s", name)
}
drivers[name] = initFunc
return nil
}
// GetDriver initializes and returns the registered driver
func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
}
if pluginDriver, err := lookupPlugin(name, home, options); err == nil {
return pluginDriver, nil
}
logrus.Errorf("Failed to GetDriver graph %s %s", name, home)
return nil, ErrNotSupported
}
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
}
logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
return nil, ErrNotSupported
}
// New creates the driver and initializes it at the specified root.
func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if name != "" {
logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver
return GetDriver(name, root, options, uidMaps, gidMaps)
}
// Guess for prior driver
driversMap := scanPriorDrivers(root)
for _, name := range priority {
if name == "vfs" {
// don't use vfs even if there is state present.
continue
}
if _, prior := driversMap[name]; prior {
// of the state found from prior drivers, check in order of our priority
// which we would prefer
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps)
if err != nil {
// unlike below, we will return error here, because there is prior
// state, and now it is no longer supported/prereq/compatible, so
// something changed and needs attention. Otherwise the daemon's
// images would just "disappear".
logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err)
return nil, err
}
// abort starting when there are other prior configured drivers
// to ensure the user explicitly selects the driver to load
if len(driversMap)-1 > 0 {
var driversSlice []string
for name := range driversMap {
driversSlice = append(driversSlice, name)
}
return nil, fmt.Errorf("%q contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", root, strings.Join(driversSlice, ", "))
}
logrus.Infof("[graphdriver] using prior storage driver %q", name)
return driver, nil
}
}
// Check for priority drivers first
for _, name := range priority {
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps)
if err != nil {
if isDriverNotSupported(err) {
continue
}
return nil, err
}
return driver, nil
}
// Check all registered drivers if no priority driver is found
for name, initFunc := range drivers {
driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps)
if err != nil {
if isDriverNotSupported(err) {
continue
}
return nil, err
}
return driver, nil
}
return nil, fmt.Errorf("No supported storage backend found")
}
// isDriverNotSupported returns true if the error initializing
// the graph driver is a non-supported error.
func isDriverNotSupported(err error) bool {
return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS
}
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
func scanPriorDrivers(root string) map[string]bool {
driversMap := make(map[string]bool)
for driver := range drivers {
p := filepath.Join(root, driver)
if _, err := os.Stat(p); err == nil && driver != "vfs" {
driversMap[driver] = true
}
}
return driversMap
}

View file

@ -0,0 +1,19 @@
package graphdriver
import "syscall"
var (
// Slice of drivers that should be used in an order
priority = []string{
"zfs",
}
)
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View file

@ -0,0 +1,133 @@
// +build linux
package graphdriver
import (
"path/filepath"
"syscall"
"github.com/containers/storage/pkg/mount"
)
const (
// FsMagicAufs filesystem id for Aufs
FsMagicAufs = FsMagic(0x61756673)
// FsMagicBtrfs filesystem id for Btrfs
FsMagicBtrfs = FsMagic(0x9123683E)
// FsMagicCramfs filesystem id for Cramfs
FsMagicCramfs = FsMagic(0x28cd3d45)
// FsMagicEcryptfs filesystem id for eCryptfs
FsMagicEcryptfs = FsMagic(0xf15f)
// FsMagicExtfs filesystem id for Extfs
FsMagicExtfs = FsMagic(0x0000EF53)
// FsMagicF2fs filesystem id for F2fs
FsMagicF2fs = FsMagic(0xF2F52010)
// FsMagicGPFS filesystem id for GPFS
FsMagicGPFS = FsMagic(0x47504653)
// FsMagicJffs2Fs filesystem if for Jffs2Fs
FsMagicJffs2Fs = FsMagic(0x000072b6)
// FsMagicJfs filesystem id for Jfs
FsMagicJfs = FsMagic(0x3153464a)
// FsMagicNfsFs filesystem id for NfsFs
FsMagicNfsFs = FsMagic(0x00006969)
// FsMagicRAMFs filesystem id for RamFs
FsMagicRAMFs = FsMagic(0x858458f6)
// FsMagicReiserFs filesystem id for ReiserFs
FsMagicReiserFs = FsMagic(0x52654973)
// FsMagicSmbFs filesystem id for SmbFs
FsMagicSmbFs = FsMagic(0x0000517B)
// FsMagicSquashFs filesystem id for SquashFs
FsMagicSquashFs = FsMagic(0x73717368)
// FsMagicTmpFs filesystem id for TmpFs
FsMagicTmpFs = FsMagic(0x01021994)
// FsMagicVxFS filesystem id for VxFs
FsMagicVxFS = FsMagic(0xa501fcf5)
// FsMagicXfs filesystem id for Xfs
FsMagicXfs = FsMagic(0x58465342)
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
// FsMagicOverlay filesystem id for overlay
FsMagicOverlay = FsMagic(0x794C7630)
)
var (
// Slice of drivers that should be used in an order
priority = []string{
"aufs",
"btrfs",
"zfs",
"devicemapper",
"overlay",
"vfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicAufs: "aufs",
FsMagicBtrfs: "btrfs",
FsMagicCramfs: "cramfs",
FsMagicExtfs: "extfs",
FsMagicF2fs: "f2fs",
FsMagicGPFS: "gpfs",
FsMagicJffs2Fs: "jffs2",
FsMagicJfs: "jfs",
FsMagicNfsFs: "nfs",
FsMagicRAMFs: "ramfs",
FsMagicReiserFs: "reiserfs",
FsMagicSmbFs: "smb",
FsMagicSquashFs: "squashfs",
FsMagicTmpFs: "tmpfs",
FsMagicUnsupported: "unsupported",
FsMagicVxFS: "vxfs",
FsMagicXfs: "xfs",
FsMagicZfs: "zfs",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil {
return 0, err
}
return FsMagic(buf.Type), nil
}
// NewFsChecker returns a checker configured for the provied FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,
}
}
type fsChecker struct {
t FsMagic
}
func (c *fsChecker) IsMounted(path string) bool {
m, _ := Mounted(c.t, path)
return m
}
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct {
}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View file

@ -0,0 +1,65 @@
// +build solaris,cgo
package graphdriver
/*
#include <sys/statvfs.h>
#include <stdlib.h>
static inline struct statvfs *getstatfs(char *s) {
struct statvfs *buf;
int err;
buf = (struct statvfs *)malloc(sizeof(struct statvfs));
err = statvfs(s, buf);
return buf;
}
*/
import "C"
import (
"path/filepath"
"unsafe"
log "github.com/Sirupsen/logrus"
)
const (
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
)
var (
// Slice of drivers that should be used in an order
priority = []string{
"zfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicZfs: "zfs",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
return 0, nil
}
// Mounted checks if the given path is mounted as the fs type
//Solaris supports only ZFS for now
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
cs := C.CString(filepath.Dir(mountPath))
buf := C.getstatfs(cs)
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
(buf.f_basetype[3] != 0) {
log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
C.free(unsafe.Pointer(buf))
return false, ErrPrerequisites
}
C.free(unsafe.Pointer(buf))
C.free(unsafe.Pointer(cs))
return true, nil
}

View file

@ -0,0 +1,15 @@
// +build !linux,!windows,!freebsd,!solaris
package graphdriver
var (
// Slice of drivers that should be used in an order
priority = []string{
"unsupported",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
return FsMagicUnsupported, nil
}

View file

@ -0,0 +1,14 @@
package graphdriver
var (
// Slice of drivers that should be used in order
priority = []string{
"windowsfilter",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
// Note it is OK to return FsMagicUnsupported on Windows.
return FsMagicUnsupported, nil
}

157
vendor/github.com/containers/storage/drivers/fsdiff.go generated vendored Normal file
View file

@ -0,0 +1,157 @@
package graphdriver
import (
"time"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
)
var (
// ApplyUncompressedLayer defines the unpack method used by the graph
// driver.
ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
)
// NaiveDiffDriver takes a ProtoDriver and adds the
// capability of the Diffing methods which it may or may not
// support on its own. See the comment on the exported
// NewNaiveDiffDriver function below.
// Notably, the AUFS driver doesn't need to be wrapped like this.
type NaiveDiffDriver struct {
ProtoDriver
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
}
// NewNaiveDiffDriver returns a fully functional driver that wraps the
// given ProtoDriver and adds the capability of the following methods which
// it may or may not support on its own:
// Diff(id, parent string) (archive.Archive, error)
// Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
// DiffSize(id, parent string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
gdw := &NaiveDiffDriver{
ProtoDriver: driver,
uidMaps: uidMaps,
gidMaps: gidMaps,
}
return gdw
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) {
layerFs, err := gdw.Get(id, "")
if err != nil {
return nil, err
}
defer func() {
if err != nil {
gdw.Put(id)
}
}()
if parent == "" {
archive, err := archive.Tar(layerFs, archive.Uncompressed)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
gdw.Put(id)
return err
}), nil
}
parentFs, err := gdw.Get(parent, "")
if err != nil {
return nil, err
}
defer gdw.Put(parent)
changes, err := archive.ChangesDirs(layerFs, parentFs)
if err != nil {
return nil, err
}
archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
gdw.Put(id)
return err
}), nil
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
layerFs, err := gdw.Get(id, "")
if err != nil {
return nil, err
}
defer gdw.Put(id)
parentFs := ""
if parent != "" {
parentFs, err = gdw.Get(parent, "")
if err != nil {
return nil, err
}
defer gdw.Put(parent)
}
return archive.ChangesDirs(layerFs, parentFs)
}
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
// Mount the root filesystem so we can apply the diff/layer.
layerFs, err := gdw.Get(id, "")
if err != nil {
return
}
defer gdw.Put(id)
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
GIDMaps: gdw.gidMaps}
start := time.Now().UTC()
logrus.Debug("Start untar layer")
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
return
}
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
return
}
// DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
changes, err := gdw.Changes(id, parent)
if err != nil {
return
}
layerFs, err := gdw.Get(id, "")
if err != nil {
return
}
defer gdw.Put(id)
return archive.ChangesSize(layerFs, changes), nil
}

View file

@ -0,0 +1,169 @@
// +build linux
package overlay
import (
"fmt"
"os"
"path/filepath"
"syscall"
"time"
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/system"
)
type copyFlags int
const (
copyHardlink copyFlags = 1 << iota
)
func copyRegular(srcPath, dstPath string, mode os.FileMode) error {
srcFile, err := os.Open(srcPath)
if err != nil {
return err
}
defer srcFile.Close()
dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode)
if err != nil {
return err
}
defer dstFile.Close()
_, err = pools.Copy(dstFile, srcFile)
return err
}
func copyXattr(srcPath, dstPath, attr string) error {
data, err := system.Lgetxattr(srcPath, attr)
if err != nil {
return err
}
if data != nil {
if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
return err
}
}
return nil
}
func copyDir(srcDir, dstDir string, flags copyFlags) error {
err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(srcDir, srcPath)
if err != nil {
return err
}
dstPath := filepath.Join(dstDir, relPath)
if err != nil {
return err
}
stat, ok := f.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
}
isHardlink := false
switch f.Mode() & os.ModeType {
case 0: // Regular file
if flags&copyHardlink != 0 {
isHardlink = true
if err := os.Link(srcPath, dstPath); err != nil {
return err
}
} else {
if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil {
return err
}
}
case os.ModeDir:
if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) {
return err
}
case os.ModeSymlink:
link, err := os.Readlink(srcPath)
if err != nil {
return err
}
if err := os.Symlink(link, dstPath); err != nil {
return err
}
case os.ModeNamedPipe:
fallthrough
case os.ModeSocket:
if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil {
return err
}
case os.ModeDevice:
if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
return err
}
default:
return fmt.Errorf("Unknown file type for %s\n", srcPath)
}
// Everything below is copying metadata from src to dst. All this metadata
// already shares an inode for hardlinks.
if isHardlink {
return nil
}
if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
return err
}
// We need to copy this attribute if it appears in an overlay upper layer, as
// this function is used to copy those. It is set by overlay if a directory
// is removed and then re-created and should not inherit anything from the
// same dir in the lower dir.
if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil {
return err
}
isSymlink := f.Mode()&os.ModeSymlink != 0
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode
if !isSymlink {
if err := os.Chmod(dstPath, f.Mode()); err != nil {
return err
}
}
// system.Chtimes doesn't support a NOFOLLOW flag atm
if !isSymlink {
aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
return err
}
} else {
ts := []syscall.Timespec{stat.Atim, stat.Mtim}
if err := system.LUtimesNano(dstPath, ts); err != nil {
return err
}
}
return nil
})
return err
}

View file

@ -0,0 +1,450 @@
// +build linux
package overlay
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/opencontainers/runc/libcontainer/label"
)
// This is a small wrapper over the NaiveDiffWriter that lets us have a custom
// implementation of ApplyDiff()
var (
// ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer.
ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff")
backingFs = "<unknown>"
)
// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method.
type ApplyDiffProtoDriver interface {
graphdriver.ProtoDriver
// ApplyDiff writes the diff to the archive for the given id and parent id.
// It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise.
ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
}
type naiveDiffDriverWithApply struct {
graphdriver.Driver
applyDiff ApplyDiffProtoDriver
}
// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff.
func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver {
return &naiveDiffDriverWithApply{
Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps),
applyDiff: driver,
}
}
// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback.
func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
b, err := d.applyDiff.ApplyDiff(id, parent, diff)
if err == ErrApplyDiffFallback {
return d.Driver.ApplyDiff(id, parent, diff)
}
return b, err
}
// This backend uses the overlay union filesystem for containers
// plus hard link file sharing for images.
// Each container/image can have a "root" subdirectory which is a plain
// filesystem hierarchy, or they can use overlay.
// If they use overlay there is a "upper" directory and a "lower-id"
// file, as well as "merged" and "work" directories. The "upper"
// directory has the upper layer of the overlay, and "lower-id" contains
// the id of the parent whose "root" directory shall be used as the lower
// layer in the overlay. The overlay itself is mounted in the "merged"
// directory, and the "work" dir is needed for overlay to work.
// When an overlay layer is created there are two cases, either the
// parent has a "root" dir, then we start out with an empty "upper"
// directory overlaid on the parents root. This is typically the
// case with the init layer of a container which is based on an image.
// If there is no "root" in the parent, we inherit the lower-id from
// the parent and start by making a copy in the parent's "upper" dir.
// This is typically the case for a container layer which copies
// its parent -init upper layer.
// Additionally we also have a custom implementation of ApplyLayer
// which makes a recursive copy of the parent "root" layer using
// hardlinks to share file data, and then applies the layer on top
// of that. This means all child images share file (but not directory)
// data with the parent.
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
type Driver struct {
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
}
func init() {
graphdriver.Register("overlay", Init)
}
// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
if err := supportsOverlay(); err != nil {
return nil, graphdriver.ErrNotSupported
}
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
}
if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
backingFs = fsName
}
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs:
logrus.Errorf("'overlay' is not supported over %s", backingFs)
return nil, graphdriver.ErrIncompatibleFS
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
// Create the driver home dir
if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
if err := mount.MakePrivate(home); err != nil {
return nil, err
}
d := &Driver{
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
}
return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil
}
func supportsOverlay() error {
// We can try to modprobe overlay first before looking at
// proc/filesystems for when overlay is supported
exec.Command("modprobe", "overlay").Run()
f, err := os.Open("/proc/filesystems")
if err != nil {
return err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if s.Text() == "nodev\toverlay" {
return nil
}
}
logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
return graphdriver.ErrNotSupported
}
func (d *Driver) String() string {
return "overlay"
}
// Status returns current driver information in a two dimensional string array.
// Output contains "Backing Filesystem" used in this implementation.
func (d *Driver) Status() [][2]string {
return [][2]string{
{"Backing Filesystem", backingFs},
}
}
// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data.
func (d *Driver) GetMetadata(id string) (map[string]string, error) {
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return nil, err
}
metadata := make(map[string]string)
// If id has a root, it is an image
rootDir := path.Join(dir, "root")
if _, err := os.Stat(rootDir); err == nil {
metadata["RootDir"] = rootDir
return metadata, nil
}
lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
if err != nil {
return nil, err
}
metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root")
metadata["UpperDir"] = path.Join(dir, "upper")
metadata["WorkDir"] = path.Join(dir, "work")
metadata["MergedDir"] = path.Join(dir, "merged")
return metadata, nil
}
// Cleanup any state created by overlay which should be cleaned when daemon
// is being shutdown. For now, we just have to unmount the bind mounted
// we had created.
func (d *Driver) Cleanup() error {
return mount.Unmount(d.home)
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.Create(id, parent, mountLabel, storageOpt)
}
// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id.
// The parent filesystem is used to configure these directories for the overlay.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) {
if len(storageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for overlay")
}
dir := d.dir(id)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return err
}
if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil {
return err
}
defer func() {
// Clean up on failure
if retErr != nil {
os.RemoveAll(dir)
}
}()
// Toplevel images are just a "root" dir
if parent == "" {
if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil {
return err
}
return nil
}
parentDir := d.dir(parent)
// Ensure parent exists
if _, err := os.Lstat(parentDir); err != nil {
return err
}
// If parent has a root, just do an overlay to it
parentRoot := path.Join(parentDir, "root")
if s, err := os.Lstat(parentRoot); err == nil {
if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil {
return err
}
if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil {
return err
}
return nil
}
// Otherwise, copy the upper and the lower-id from the parent
lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id"))
if err != nil {
return err
}
if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil {
return err
}
parentUpperDir := path.Join(parentDir, "upper")
s, err := os.Lstat(parentUpperDir)
if err != nil {
return err
}
upperDir := path.Join(dir, "upper")
if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil {
return err
}
return copyDir(parentUpperDir, upperDir, 0)
}
func (d *Driver) dir(id string) string {
return path.Join(d.home, id)
}
// Remove cleans the directories that are created for this id.
func (d *Driver) Remove(id string) error {
if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// Get creates and mounts the required file system for the given id and returns the mount path.
func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return "", err
}
// If id has a root, just return it
rootDir := path.Join(dir, "root")
if _, err := os.Stat(rootDir); err == nil {
return rootDir, nil
}
mergedDir := path.Join(dir, "merged")
if count := d.ctr.Increment(mergedDir); count > 1 {
return mergedDir, nil
}
defer func() {
if err != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 {
syscall.Unmount(mergedDir, 0)
}
}
}()
lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
if err != nil {
return "", err
}
var (
lowerDir = path.Join(d.dir(string(lowerID)), "root")
upperDir = path.Join(dir, "upper")
workDir = path.Join(dir, "work")
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
)
if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
}
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return "", err
}
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
return "", err
}
return mergedDir, nil
}
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
mountpoint := path.Join(d.dir(id), "merged")
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
err := syscall.Unmount(mountpoint, 0)
if err != nil {
rootDir := path.Join(d.dir(id), "root")
if _, err := os.Stat(rootDir); err == nil {
// We weren't mounting a "merged" directory anyway
return nil
}
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
}
return err
}
// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error.
func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {
dir := d.dir(id)
if parent == "" {
return 0, ErrApplyDiffFallback
}
parentRootDir := path.Join(d.dir(parent), "root")
if _, err := os.Stat(parentRootDir); err != nil {
return 0, ErrApplyDiffFallback
}
// We now know there is a parent, and it has a "root" directory containing
// the full root filesystem. We can just hardlink it and apply the
// layer. This relies on two things:
// 1) ApplyDiff is only run once on a clean (no writes to upper layer) container
// 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks)
// These are all currently true and are not expected to break
tmpRootDir, err := ioutil.TempDir(dir, "tmproot")
if err != nil {
return 0, err
}
defer func() {
if err != nil {
os.RemoveAll(tmpRootDir)
} else {
os.RemoveAll(path.Join(dir, "upper"))
os.RemoveAll(path.Join(dir, "work"))
os.RemoveAll(path.Join(dir, "merged"))
os.RemoveAll(path.Join(dir, "lower-id"))
}
}()
if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil {
return 0, err
}
options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps}
if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil {
return 0, err
}
rootDir := path.Join(dir, "root")
if err := os.Rename(tmpRootDir, rootDir); err != nil {
return 0, err
}
return
}
// Exists checks to see if the id is already mounted.
func (d *Driver) Exists(id string) bool {
_, err := os.Stat(d.dir(id))
return err == nil
}

View file

@ -0,0 +1,3 @@
// +build !linux
package overlay

View file

@ -0,0 +1,88 @@
// +build linux
package overlay2
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"os"
"runtime"
"syscall"
"github.com/containers/storage/pkg/reexec"
)
func init() {
reexec.Register("docker-mountfrom", mountFromMain)
}
func fatal(err error) {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
type mountOptions struct {
Device string
Target string
Type string
Label string
Flag uint32
}
func mountFrom(dir, device, target, mType, label string) error {
options := &mountOptions{
Device: device,
Target: target,
Type: mType,
Flag: 0,
Label: label,
}
cmd := reexec.Command("docker-mountfrom", dir)
w, err := cmd.StdinPipe()
if err != nil {
return fmt.Errorf("mountfrom error on pipe creation: %v", err)
}
output := bytes.NewBuffer(nil)
cmd.Stdout = output
cmd.Stderr = output
if err := cmd.Start(); err != nil {
return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
}
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
}
w.Close()
if err := cmd.Wait(); err != nil {
return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output)
}
return nil
}
// mountfromMain is the entry-point for docker-mountfrom on re-exec.
func mountFromMain() {
runtime.LockOSThread()
flag.Parse()
var options *mountOptions
if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
fatal(err)
}
if err := os.Chdir(flag.Arg(0)); err != nil {
fatal(err)
}
if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
fatal(err)
}
os.Exit(0)
}

View file

@ -0,0 +1,514 @@
// +build linux
package overlay2
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strconv"
"strings"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/parsers/kernel"
"github.com/opencontainers/runc/libcontainer/label"
)
var (
// untar defines the untar method
untar = chrootarchive.UntarUncompressed
)
// This backend uses the overlay union filesystem for containers
// with diff directories for each layer.
// This version of the overlay driver requires at least kernel
// 4.0.0 in order to support mounting multiple diff directories.
// Each container/image has at least a "diff" directory and "link" file.
// If there is also a "lower" file when there are diff layers
// below as well as "merged" and "work" directories. The "diff" directory
// has the upper layer of the overlay and is used to capture any
// changes to the layer. The "lower" file contains all the lower layer
// mounts separated by ":" and ordered from uppermost to lowermost
// layers. The overlay itself is mounted in the "merged" directory,
// and the "work" dir is needed for overlay to work.
// The "link" file for each layer contains a unique string for the layer.
// Under the "l" directory at the root there will be a symbolic link
// with that unique string pointing the "diff" directory for the layer.
// The symbolic links are used to reference lower layers in the "lower"
// file and on mount. The links are used to shorten the total length
// of a layer reference without requiring changes to the layer identifier
// or root directory. Mounts are always done relative to root and
// referencing the symbolic links in order to ensure the number of
// lower directories can fit in a single page for making the mount
// syscall. A hard upper limit of 128 lower layers is enforced to ensure
// that mounts do not fail due to length.
const (
driverName = "overlay2"
linkDir = "l"
lowerFile = "lower"
maxDepth = 128
// idLength represents the number of random characters
// which can be used to create the unique link identifer
// for every layer. If this value is too long then the
// page size limit for the mount command may be exceeded.
// The idLength should be selected such that following equation
// is true (512 is a buffer for label metadata).
// ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512)
idLength = 26
)
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
type Driver struct {
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
}
var backingFs = "<unknown>"
func init() {
graphdriver.Register(driverName, Init)
}
// Init returns the a native diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
opts, err := parseOptions(options)
if err != nil {
return nil, err
}
if err := supportsOverlay(); err != nil {
return nil, graphdriver.ErrNotSupported
}
// require kernel 4.0.0 to ensure multiple lower dirs are supported
v, err := kernel.GetKernelVersion()
if err != nil {
return nil, err
}
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 {
if !opts.overrideKernelCheck {
return nil, graphdriver.ErrNotSupported
}
logrus.Warnf("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update")
}
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
}
if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
backingFs = fsName
}
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
switch fsMagic {
case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
logrus.Errorf("'overlay2' is not supported over %s", backingFs)
return nil, graphdriver.ErrIncompatibleFS
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
// Create the driver home dir
if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
if err := mount.MakePrivate(home); err != nil {
return nil, err
}
d := &Driver{
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
}
return d, nil
}
type overlayOptions struct {
overrideKernelCheck bool
}
func parseOptions(options []string) (*overlayOptions, error) {
o := &overlayOptions{}
for _, option := range options {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err
}
key = strings.ToLower(key)
switch key {
case "overlay2.override_kernel_check":
o.overrideKernelCheck, err = strconv.ParseBool(val)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("overlay2: Unknown option %s\n", key)
}
}
return o, nil
}
func supportsOverlay() error {
// We can try to modprobe overlay first before looking at
// proc/filesystems for when overlay is supported
exec.Command("modprobe", "overlay").Run()
f, err := os.Open("/proc/filesystems")
if err != nil {
return err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if s.Text() == "nodev\toverlay" {
return nil
}
}
logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
return graphdriver.ErrNotSupported
}
func (d *Driver) String() string {
return driverName
}
// Status returns current driver information in a two dimensional string array.
// Output contains "Backing Filesystem" used in this implementation.
func (d *Driver) Status() [][2]string {
return [][2]string{
{"Backing Filesystem", backingFs},
}
}
// GetMetadata returns meta data about the overlay driver such as
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
func (d *Driver) GetMetadata(id string) (map[string]string, error) {
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return nil, err
}
metadata := map[string]string{
"WorkDir": path.Join(dir, "work"),
"MergedDir": path.Join(dir, "merged"),
"UpperDir": path.Join(dir, "diff"),
}
lowerDirs, err := d.getLowerDirs(id)
if err != nil {
return nil, err
}
if len(lowerDirs) > 0 {
metadata["LowerDir"] = strings.Join(lowerDirs, ":")
}
return metadata, nil
}
// Cleanup any state created by overlay which should be cleaned when daemon
// is being shutdown. For now, we just have to unmount the bind mounted
// we had created.
func (d *Driver) Cleanup() error {
return mount.Unmount(d.home)
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.Create(id, parent, mountLabel, storageOpt)
}
// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id.
// The parent filesystem is used to configure these directories for the overlay.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) {
if len(storageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for overlay")
}
dir := d.dir(id)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return err
}
if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil {
return err
}
defer func() {
// Clean up on failure
if retErr != nil {
os.RemoveAll(dir)
}
}()
if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil {
return err
}
lid := generateID(idLength)
if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil {
return err
}
// Write link id to link file
if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
return err
}
// if no parent directory, done
if parent == "" {
return nil
}
if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil {
return err
}
lower, err := d.getLower(parent)
if err != nil {
return err
}
if lower != "" {
if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
return err
}
}
return nil
}
func (d *Driver) getLower(parent string) (string, error) {
parentDir := d.dir(parent)
// Ensure parent exists
if _, err := os.Lstat(parentDir); err != nil {
return "", err
}
// Read Parent link fileA
parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link"))
if err != nil {
return "", err
}
lowers := []string{path.Join(linkDir, string(parentLink))}
parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile))
if err == nil {
parentLowers := strings.Split(string(parentLower), ":")
lowers = append(lowers, parentLowers...)
}
if len(lowers) > maxDepth {
return "", errors.New("max depth exceeded")
}
return strings.Join(lowers, ":"), nil
}
func (d *Driver) dir(id string) string {
return path.Join(d.home, id)
}
func (d *Driver) getLowerDirs(id string) ([]string, error) {
var lowersArray []string
lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile))
if err == nil {
for _, s := range strings.Split(string(lowers), ":") {
lp, err := os.Readlink(path.Join(d.home, s))
if err != nil {
return nil, err
}
lowersArray = append(lowersArray, path.Clean(path.Join(d.home, "link", lp)))
}
} else if !os.IsNotExist(err) {
return nil, err
}
return lowersArray, nil
}
// Remove cleans the directories that are created for this id.
func (d *Driver) Remove(id string) error {
dir := d.dir(id)
lid, err := ioutil.ReadFile(path.Join(dir, "link"))
if err == nil {
if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil {
logrus.Debugf("Failed to remove link: %v", err)
}
}
if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// Get creates and mounts the required file system for the given id and returns the mount path.
func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return "", err
}
diffDir := path.Join(dir, "diff")
lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
if err != nil {
// If no lower, just return diff directory
if os.IsNotExist(err) {
return diffDir, nil
}
return "", err
}
mergedDir := path.Join(dir, "merged")
if count := d.ctr.Increment(mergedDir); count > 1 {
return mergedDir, nil
}
defer func() {
if err != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 {
syscall.Unmount(mergedDir, 0)
}
}
}()
workDir := path.Join(dir, "work")
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work"))
mountLabel = label.FormatMountLabel(opts, mountLabel)
if len(mountLabel) > syscall.Getpagesize() {
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel))
}
if err := mountFrom(d.home, "overlay", path.Join(id, "merged"), "overlay", mountLabel); err != nil {
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
}
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return "", err
}
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
return "", err
}
return mergedDir, nil
}
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
mountpoint := path.Join(d.dir(id), "merged")
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
err := syscall.Unmount(mountpoint, 0)
if err != nil {
if _, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)); err != nil {
// We didn't have a "lower" directory, so we weren't mounting a "merged" directory anyway
return nil
}
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
}
return err
}
// Exists checks to see if the id is already mounted.
func (d *Driver) Exists(id string) bool {
_, err := os.Stat(d.dir(id))
return err == nil
}
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {
applyDir := d.getDiffPath(id)
logrus.Debugf("Applying tar in %s", applyDir)
// Overlay doesn't need the parent id to apply the diff
if err := untar(diff, applyDir, &archive.TarOptions{
UIDMaps: d.uidMaps,
GIDMaps: d.gidMaps,
WhiteoutFormat: archive.OverlayWhiteoutFormat,
}); err != nil {
return 0, err
}
return d.DiffSize(id, parent)
}
func (d *Driver) getDiffPath(id string) string {
dir := d.dir(id)
return path.Join(dir, "diff")
}
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
return directory.Size(d.getDiffPath(id))
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (d *Driver) Diff(id, parent string) (archive.Archive, error) {
diffPath := d.getDiffPath(id)
logrus.Debugf("Tar with options on %s", diffPath)
return archive.TarWithOptions(diffPath, &archive.TarOptions{
Compression: archive.Uncompressed,
UIDMaps: d.uidMaps,
GIDMaps: d.gidMaps,
WhiteoutFormat: archive.OverlayWhiteoutFormat,
})
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
// Overlay doesn't have snapshots, so we need to get changes from all parent
// layers.
diffPath := d.getDiffPath(id)
layers, err := d.getLowerDirs(id)
if err != nil {
return nil, err
}
return archive.OverlayChanges(layers, diffPath)
}

View file

@ -0,0 +1,3 @@
// +build !linux
package overlay2

View file

@ -0,0 +1,80 @@
// +build linux
package overlay2
import (
"crypto/rand"
"encoding/base32"
"fmt"
"io"
"os"
"syscall"
"time"
"github.com/Sirupsen/logrus"
)
// generateID creates a new random string identifier with the given length
func generateID(l int) string {
const (
// ensures we backoff for less than 450ms total. Use the following to
// select new value, in units of 10ms:
// n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
maxretries = 9
backoff = time.Millisecond * 10
)
var (
totalBackoff time.Duration
count int
retries int
size = (l*5 + 7) / 8
u = make([]byte, size)
)
// TODO: Include time component, counter component, random component
for {
// This should never block but the read may fail. Because of this,
// we just try to read the random number generator until we get
// something. This is a very rare condition but may happen.
b := time.Duration(retries) * backoff
time.Sleep(b)
totalBackoff += b
n, err := io.ReadFull(rand.Reader, u[count:])
if err != nil {
if retryOnError(err) && retries < maxretries {
count += n
retries++
logrus.Errorf("error generating version 4 uuid, retrying: %v", err)
continue
}
// Any other errors represent a system problem. What did someone
// do to /dev/urandom?
panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
}
break
}
s := base32.StdEncoding.EncodeToString(u)
return s[:l]
}
// retryOnError tries to detect whether or not retrying would be fruitful.
func retryOnError(err error) bool {
switch err := err.(type) {
case *os.PathError:
return retryOnError(err.Err) // unpack the target error
case syscall.Errno:
if err == syscall.EPERM {
// EPERM represents an entropy pool exhaustion, a condition under
// which we backoff and retry.
return true
}
}
return false
}

32
vendor/github.com/containers/storage/drivers/plugin.go generated vendored Normal file
View file

@ -0,0 +1,32 @@
// +build experimental
package graphdriver
import (
"fmt"
"io"
"github.com/containers/storage/pkg/plugins"
)
type pluginClient interface {
// Call calls the specified method with the specified arguments for the plugin.
Call(string, interface{}, interface{}) error
// Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream
Stream(string, interface{}) (io.ReadCloser, error)
// SendFile calls the specified method, and passes through the IO stream
SendFile(string, io.Reader, interface{}) error
}
func lookupPlugin(name, home string, opts []string) (Driver, error) {
pl, err := plugins.Get(name, "GraphDriver")
if err != nil {
return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
}
return newPluginDriver(name, home, opts, pl.Client())
}
func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) {
proxy := &graphDriverProxy{name, c}
return proxy, proxy.Init(home, opts)
}

View file

@ -0,0 +1,7 @@
// +build !experimental
package graphdriver
func lookupPlugin(name, home string, opts []string) (Driver, error) {
return nil, ErrNotSupported
}

225
vendor/github.com/containers/storage/drivers/proxy.go generated vendored Normal file
View file

@ -0,0 +1,225 @@
// +build experimental
package graphdriver
import (
"errors"
"fmt"
"github.com/containers/storage/pkg/archive"
)
type graphDriverProxy struct {
name string
client pluginClient
}
type graphDriverRequest struct {
ID string `json:",omitempty"`
Parent string `json:",omitempty"`
MountLabel string `json:",omitempty"`
}
type graphDriverResponse struct {
Err string `json:",omitempty"`
Dir string `json:",omitempty"`
Exists bool `json:",omitempty"`
Status [][2]string `json:",omitempty"`
Changes []archive.Change `json:",omitempty"`
Size int64 `json:",omitempty"`
Metadata map[string]string `json:",omitempty"`
}
type graphDriverInitRequest struct {
Home string
Opts []string
}
func (d *graphDriverProxy) Init(home string, opts []string) error {
args := &graphDriverInitRequest{
Home: home,
Opts: opts,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) String() string {
return d.name
}
func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Remove(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
args := &graphDriverRequest{
ID: id,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil {
return "", err
}
var err error
if ret.Err != "" {
err = errors.New(ret.Err)
}
return ret.Dir, err
}
func (d *graphDriverProxy) Put(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Exists(id string) bool {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil {
return false
}
return ret.Exists
}
func (d *graphDriverProxy) Status() [][2]string {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil {
return nil
}
return ret.Status
}
func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) {
args := &graphDriverRequest{
ID: id,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
return nil, errors.New(ret.Err)
}
return ret.Metadata, nil
}
func (d *graphDriverProxy) Cleanup() error {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil {
return nil
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
body, err := d.client.Stream("GraphDriver.Diff", args)
if err != nil {
return nil, err
}
return archive.Archive(body), nil
}
func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
return nil, errors.New(ret.Err)
}
return ret.Changes, nil
}
func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
var ret graphDriverResponse
if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
return -1, err
}
if ret.Err != "" {
return -1, errors.New(ret.Err)
}
return ret.Size, nil
}
func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil {
return -1, err
}
if ret.Err != "" {
return -1, errors.New(ret.Err)
}
return ret.Size, nil
}

View file

@ -0,0 +1,8 @@
// +build !exclude_graphdriver_aufs,linux
package register
import (
// register the aufs graphdriver
_ "github.com/containers/storage/drivers/aufs"
)

View file

@ -0,0 +1,8 @@
// +build !exclude_graphdriver_btrfs,linux
package register
import (
// register the btrfs graphdriver
_ "github.com/containers/storage/drivers/btrfs"
)

View file

@ -0,0 +1,8 @@
// +build !exclude_graphdriver_devicemapper,linux
package register
import (
// register the devmapper graphdriver
_ "github.com/containers/storage/drivers/devmapper"
)

View file

@ -0,0 +1,9 @@
// +build !exclude_graphdriver_overlay,linux
package register
import (
// register the overlay graphdriver
_ "github.com/containers/storage/drivers/overlay"
_ "github.com/containers/storage/drivers/overlay2"
)

Some files were not shown because too many files have changed in this diff Show more