*: vendor containers/storage from nalind/image@add-storage-transport

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2016-09-17 17:53:05 +02:00
parent 7fc874e05e
commit 8874367041
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
265 changed files with 32880 additions and 481 deletions

54
glide.lock generated
View file

@ -1,5 +1,5 @@
hash: e1c100879f58ef4902c4e13d6710505f0ec62fbb5eaa237abeff8faf0ddf30f1
updated: 2016-09-17T15:49:39.168380014+02:00
hash: d966770b8886be2e6e94f299dc52f7b6f64597a6e036b70aa361108d0aa5a5aa
updated: 2016-09-17T17:51:57.682909454+02:00
imports:
- name: github.com/containernetworking/cni
version: 9d5e6e60e79491207834ae8439e80c943db65a69
@ -8,7 +8,8 @@ imports:
- pkg/invoke
- pkg/types
- name: github.com/containers/image
version: f6f11ab5cf8b1e70ef4aa3f8b6fdb4b671d16abd
version: 5a72b5908c6978a8eee293fdd654395da29015fe
repo: git@github.com:nalind/image
subpackages:
- directory
- image
@ -19,8 +20,44 @@ imports:
- docker
- oci/layout
- openshift
- storage
- docker/policyconfiguration
- version
- name: github.com/containers/storage
version: 32264f11f3b997e8d2ba032a6a13ebdfcddc2897
subpackages:
- pkg/archive
- pkg/ioutils
- storage
- pkg/fileutils
- pkg/idtools
- pkg/pools
- pkg/promise
- pkg/system
- pkg/longpath
- drivers
- drivers/register
- pkg/stringid
- storageversion
- pkg/chrootarchive
- pkg/mount
- pkg/plugins
- drivers/aufs
- drivers/btrfs
- drivers/devmapper
- drivers/overlay
- drivers/overlay2
- drivers/vfs
- drivers/windows
- drivers/zfs
- pkg/random
- pkg/reexec
- pkg/plugins/transport
- pkg/directory
- pkg/parsers
- pkg/devicemapper
- pkg/loopback
- pkg/parsers/kernel
- name: github.com/docker/distribution
version: cd27f179f2c10c5d300e6d09025b538c475b0d51
subpackages:
@ -126,8 +163,17 @@ imports:
version: ff3ca3d616518087dc20180f69bb4038379f1028
subpackages:
- pkg/kubelet/api/v1alpha1/runtime
- name: github.com/mattn/go-shellwords
version: 525bedee691b5a8df547cb5cf9f86b7fb1883e24
- name: github.com/Microsoft/go-winio
version: 8f9387ea7efabb228a981b9c381142be7667967f
subpackages:
- archive/tar
- backuptar
- name: github.com/Microsoft/hcsshim
version: d8e08e7d31d4f441646638b35f423a760d6dfbcd
- name: github.com/mistifyio/go-zfs
version: 1b4ae6fb4e77b095934d4430860ff202060169f8
- name: github.com/opencontainers/image-spec
version: 287772a24ab02d5d2fe3fbbba4f13dff9b9ce003
subpackages:
@ -160,6 +206,8 @@ imports:
version: 7dab1a245d3e13d0ad03b77409d0bf7e00a6ada4
subpackages:
- specs-go
- name: github.com/pborman/uuid
version: ca53cad383cad2479bbba7f7a1a05797ec1386e4
- name: github.com/rajatchopra/ocicni
version: daea0034a4bc2d193d9bf5031bace0403094011a
- name: github.com/Sirupsen/logrus

View file

@ -3,6 +3,8 @@ import:
- package: github.com/Sirupsen/logrus
version: ~0.10.0
- package: github.com/containers/image
version: add-storage-transport
repo: git@github.com:nalind/image
subpackages:
- directory
- image

View file

@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,342 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tar implements access to tar archives.
// It aims to cover most of the variations, including those produced
// by GNU and BSD tars.
//
// References:
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
package tar
import (
"bytes"
"errors"
"fmt"
"os"
"path"
"time"
)
const (
blockSize = 512
// Types
TypeReg = '0' // regular file
TypeRegA = '\x00' // regular file
TypeLink = '1' // hard link
TypeSymlink = '2' // symbolic link
TypeChar = '3' // character device node
TypeBlock = '4' // block device node
TypeDir = '5' // directory
TypeFifo = '6' // fifo node
TypeCont = '7' // reserved
TypeXHeader = 'x' // extended header
TypeXGlobalHeader = 'g' // global extended header
TypeGNULongName = 'L' // Next file has a long name
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
TypeGNUSparse = 'S' // sparse file
)
// A Header represents a single header in a tar archive.
// Some fields may not be populated.
type Header struct {
Name string // name of header file entry
Mode int64 // permission and mode bits
Uid int // user id of owner
Gid int // group id of owner
Size int64 // length in bytes
ModTime time.Time // modified time
Typeflag byte // type of header entry
Linkname string // target name of link
Uname string // user name of owner
Gname string // group name of owner
Devmajor int64 // major number of character or block device
Devminor int64 // minor number of character or block device
AccessTime time.Time // access time
ChangeTime time.Time // status change time
Xattrs map[string]string
Winheaders map[string]string
}
// File name constants from the tar spec.
const (
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
)
// FileInfo returns an os.FileInfo for the Header.
func (h *Header) FileInfo() os.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements os.FileInfo.
type headerFileInfo struct {
h *Header
}
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
func (fi headerFileInfo) Sys() interface{} { return fi.h }
// Name returns the base name of the file.
func (fi headerFileInfo) Name() string {
if fi.IsDir() {
return path.Base(path.Clean(fi.h.Name))
}
return path.Base(fi.h.Name)
}
// Mode returns the permission and mode bits for the headerFileInfo.
func (fi headerFileInfo) Mode() (mode os.FileMode) {
// Set file permission bits.
mode = os.FileMode(fi.h.Mode).Perm()
// Set setuid, setgid and sticky bits.
if fi.h.Mode&c_ISUID != 0 {
// setuid
mode |= os.ModeSetuid
}
if fi.h.Mode&c_ISGID != 0 {
// setgid
mode |= os.ModeSetgid
}
if fi.h.Mode&c_ISVTX != 0 {
// sticky
mode |= os.ModeSticky
}
// Set file mode bits.
// clear perm, setuid, setgid and sticky bits.
m := os.FileMode(fi.h.Mode) &^ 07777
if m == c_ISDIR {
// directory
mode |= os.ModeDir
}
if m == c_ISFIFO {
// named pipe (FIFO)
mode |= os.ModeNamedPipe
}
if m == c_ISLNK {
// symbolic link
mode |= os.ModeSymlink
}
if m == c_ISBLK {
// device file
mode |= os.ModeDevice
}
if m == c_ISCHR {
// Unix character device
mode |= os.ModeDevice
mode |= os.ModeCharDevice
}
if m == c_ISSOCK {
// Unix domain socket
mode |= os.ModeSocket
}
switch fi.h.Typeflag {
case TypeSymlink:
// symbolic link
mode |= os.ModeSymlink
case TypeChar:
// character device node
mode |= os.ModeDevice
mode |= os.ModeCharDevice
case TypeBlock:
// block device node
mode |= os.ModeDevice
case TypeDir:
// directory
mode |= os.ModeDir
case TypeFifo:
// fifo node
mode |= os.ModeNamedPipe
}
return mode
}
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi os.FileInfo, h *Header) error
// Mode constants from the tar spec.
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
// Keywords for the PAX Extended Header
const (
paxAtime = "atime"
paxCharset = "charset"
paxComment = "comment"
paxCtime = "ctime" // please note that ctime is not a valid pax header.
paxGid = "gid"
paxGname = "gname"
paxLinkpath = "linkpath"
paxMtime = "mtime"
paxPath = "path"
paxSize = "size"
paxUid = "uid"
paxUname = "uname"
paxXattr = "SCHILY.xattr."
paxWindows = "MSWINDOWS."
paxNone = ""
)
// FileInfoHeader creates a partially-populated Header from fi.
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
// Because os.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("tar: FileInfo is nil")
}
fm := fi.Mode()
h := &Header{
Name: fi.Name(),
ModTime: fi.ModTime(),
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
}
switch {
case fm.IsRegular():
h.Mode |= c_ISREG
h.Typeflag = TypeReg
h.Size = fi.Size()
case fi.IsDir():
h.Typeflag = TypeDir
h.Mode |= c_ISDIR
h.Name += "/"
case fm&os.ModeSymlink != 0:
h.Typeflag = TypeSymlink
h.Mode |= c_ISLNK
h.Linkname = link
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
h.Mode |= c_ISCHR
h.Typeflag = TypeChar
} else {
h.Mode |= c_ISBLK
h.Typeflag = TypeBlock
}
case fm&os.ModeNamedPipe != 0:
h.Typeflag = TypeFifo
h.Mode |= c_ISFIFO
case fm&os.ModeSocket != 0:
h.Mode |= c_ISSOCK
default:
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
}
if fm&os.ModeSetuid != 0 {
h.Mode |= c_ISUID
}
if fm&os.ModeSetgid != 0 {
h.Mode |= c_ISGID
}
if fm&os.ModeSticky != 0 {
h.Mode |= c_ISVTX
}
// If possible, populate additional fields from OS-specific
// FileInfo fields.
if sys, ok := fi.Sys().(*Header); ok {
// This FileInfo came from a Header (not the OS). Use the
// original Header to populate all remaining fields.
h.Uid = sys.Uid
h.Gid = sys.Gid
h.Uname = sys.Uname
h.Gname = sys.Gname
h.AccessTime = sys.AccessTime
h.ChangeTime = sys.ChangeTime
if sys.Xattrs != nil {
h.Xattrs = make(map[string]string)
for k, v := range sys.Xattrs {
h.Xattrs[k] = v
}
}
if sys.Typeflag == TypeLink {
// hard link
h.Typeflag = TypeLink
h.Size = 0
h.Linkname = sys.Linkname
}
}
if sysStat != nil {
return h, sysStat(fi, h)
}
return h, nil
}
var zeroBlock = make([]byte, blockSize)
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
// We compute and return both.
func checksum(header []byte) (unsigned int64, signed int64) {
for i := 0; i < len(header); i++ {
if i == 148 {
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
unsigned += ' ' * 8
signed += ' ' * 8
i += 7
continue
}
unsigned += int64(header[i])
signed += int64(int8(header[i]))
}
return
}
type slicer []byte
func (sp *slicer) next(n int) (b []byte) {
s := *sp
b, *sp = s[0:n], s[n:]
return
}
func isASCII(s string) bool {
for _, c := range s {
if c >= 0x80 {
return false
}
}
return true
}
func toASCII(s string) string {
if isASCII(s) {
return s
}
var buf bytes.Buffer
for _, c := range s {
if c < 0x80 {
buf.WriteByte(byte(c))
}
}
return buf.String()
}
// isHeaderOnlyType checks if the given type flag is of the type that has no
// data section even if a size is specified.
func isHeaderOnlyType(flag byte) bool {
switch flag {
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
return true
default:
return false
}
}

View file

@ -0,0 +1,996 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
// TODO(dsymonds):
// - pax extensions
import (
"bytes"
"errors"
"io"
"io/ioutil"
"math"
"os"
"strconv"
"strings"
"time"
)
var (
ErrHeader = errors.New("archive/tar: invalid tar header")
)
const maxNanoSecondIntSize = 9
// A Reader provides sequential access to the contents of a tar archive.
// A tar archive consists of a sequence of files.
// The Next method advances to the next file in the archive (including the first),
// and then it can be treated as an io.Reader to access the file's data.
type Reader struct {
r io.Reader
err error
pad int64 // amount of padding (ignored) after current file entry
curr numBytesReader // reader for current file entry
hdrBuff [blockSize]byte // buffer to use in readHeader
}
type parser struct {
err error // Last error seen
}
// A numBytesReader is an io.Reader with a numBytes method, returning the number
// of bytes remaining in the underlying encoded data.
type numBytesReader interface {
io.Reader
numBytes() int64
}
// A regFileReader is a numBytesReader for reading file data from a tar archive.
type regFileReader struct {
r io.Reader // underlying reader
nb int64 // number of unread bytes for current file entry
}
// A sparseFileReader is a numBytesReader for reading sparse file data from a
// tar archive.
type sparseFileReader struct {
rfr numBytesReader // Reads the sparse-encoded file data
sp []sparseEntry // The sparse map for the file
pos int64 // Keeps track of file position
total int64 // Total size of the file
}
// A sparseEntry holds a single entry in a sparse file's sparse map.
//
// Sparse files are represented using a series of sparseEntrys.
// Despite the name, a sparseEntry represents an actual data fragment that
// references data found in the underlying archive stream. All regions not
// covered by a sparseEntry are logically filled with zeros.
//
// For example, if the underlying raw file contains the 10-byte data:
// var compactData = "abcdefgh"
//
// And the sparse map has the following entries:
// var sp = []sparseEntry{
// {offset: 2, numBytes: 5} // Data fragment for [2..7]
// {offset: 18, numBytes: 3} // Data fragment for [18..21]
// }
//
// Then the content of the resulting sparse file with a "real" size of 25 is:
// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
type sparseEntry struct {
offset int64 // Starting position of the fragment
numBytes int64 // Length of the fragment
}
// Keywords for GNU sparse files in a PAX extended header
const (
paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
paxGNUSparseOffset = "GNU.sparse.offset"
paxGNUSparseNumBytes = "GNU.sparse.numbytes"
paxGNUSparseMap = "GNU.sparse.map"
paxGNUSparseName = "GNU.sparse.name"
paxGNUSparseMajor = "GNU.sparse.major"
paxGNUSparseMinor = "GNU.sparse.minor"
paxGNUSparseSize = "GNU.sparse.size"
paxGNUSparseRealSize = "GNU.sparse.realsize"
)
// Keywords for old GNU sparse headers
const (
oldGNUSparseMainHeaderOffset = 386
oldGNUSparseMainHeaderIsExtendedOffset = 482
oldGNUSparseMainHeaderNumEntries = 4
oldGNUSparseExtendedHeaderIsExtendedOffset = 504
oldGNUSparseExtendedHeaderNumEntries = 21
oldGNUSparseOffsetSize = 12
oldGNUSparseNumBytesSize = 12
)
// NewReader creates a new Reader reading from r.
func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
// Next advances to the next entry in the tar archive.
//
// io.EOF is returned at the end of the input.
func (tr *Reader) Next() (*Header, error) {
if tr.err != nil {
return nil, tr.err
}
var hdr *Header
var extHdrs map[string]string
// Externally, Next iterates through the tar archive as if it is a series of
// files. Internally, the tar format often uses fake "files" to add meta
// data that describes the next file. These meta data "files" should not
// normally be visible to the outside. As such, this loop iterates through
// one or more "header files" until it finds a "normal file".
loop:
for {
tr.err = tr.skipUnread()
if tr.err != nil {
return nil, tr.err
}
hdr = tr.readHeader()
if tr.err != nil {
return nil, tr.err
}
// Check for PAX/GNU special headers and files.
switch hdr.Typeflag {
case TypeXHeader:
extHdrs, tr.err = parsePAX(tr)
if tr.err != nil {
return nil, tr.err
}
continue loop // This is a meta header affecting the next header
case TypeGNULongName, TypeGNULongLink:
var realname []byte
realname, tr.err = ioutil.ReadAll(tr)
if tr.err != nil {
return nil, tr.err
}
// Convert GNU extensions to use PAX headers.
if extHdrs == nil {
extHdrs = make(map[string]string)
}
var p parser
switch hdr.Typeflag {
case TypeGNULongName:
extHdrs[paxPath] = p.parseString(realname)
case TypeGNULongLink:
extHdrs[paxLinkpath] = p.parseString(realname)
}
if p.err != nil {
tr.err = p.err
return nil, tr.err
}
continue loop // This is a meta header affecting the next header
default:
mergePAX(hdr, extHdrs)
// Check for a PAX format sparse file
sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
if err != nil {
tr.err = err
return nil, err
}
if sp != nil {
// Current file is a PAX format GNU sparse file.
// Set the current file reader to a sparse file reader.
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
if tr.err != nil {
return nil, tr.err
}
}
break loop // This is a file, so stop
}
}
return hdr, nil
}
// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
// be treated as a regular file.
func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
var sparseFormat string
// Check for sparse format indicators
major, majorOk := headers[paxGNUSparseMajor]
minor, minorOk := headers[paxGNUSparseMinor]
sparseName, sparseNameOk := headers[paxGNUSparseName]
_, sparseMapOk := headers[paxGNUSparseMap]
sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
// Identify which, if any, sparse format applies from which PAX headers are set
if majorOk && minorOk {
sparseFormat = major + "." + minor
} else if sparseNameOk && sparseMapOk {
sparseFormat = "0.1"
} else if sparseSizeOk {
sparseFormat = "0.0"
} else {
// Not a PAX format GNU sparse file.
return nil, nil
}
// Check for unknown sparse format
if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
return nil, nil
}
// Update hdr from GNU sparse PAX headers
if sparseNameOk {
hdr.Name = sparseName
}
if sparseSizeOk {
realSize, err := strconv.ParseInt(sparseSize, 10, 0)
if err != nil {
return nil, ErrHeader
}
hdr.Size = realSize
} else if sparseRealSizeOk {
realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
if err != nil {
return nil, ErrHeader
}
hdr.Size = realSize
}
// Set up the sparse map, according to the particular sparse format in use
var sp []sparseEntry
var err error
switch sparseFormat {
case "0.0", "0.1":
sp, err = readGNUSparseMap0x1(headers)
case "1.0":
sp, err = readGNUSparseMap1x0(tr.curr)
}
return sp, err
}
// mergePAX merges well known headers according to PAX standard.
// In general headers with the same name as those found
// in the header struct overwrite those found in the header
// struct with higher precision or longer values. Esp. useful
// for name and linkname fields.
func mergePAX(hdr *Header, headers map[string]string) error {
for k, v := range headers {
switch k {
case paxPath:
hdr.Name = v
case paxLinkpath:
hdr.Linkname = v
case paxGname:
hdr.Gname = v
case paxUname:
hdr.Uname = v
case paxUid:
uid, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Uid = int(uid)
case paxGid:
gid, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Gid = int(gid)
case paxAtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.AccessTime = t
case paxMtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.ModTime = t
case paxCtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.ChangeTime = t
case paxSize:
size, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Size = int64(size)
default:
if strings.HasPrefix(k, paxXattr) {
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
}
hdr.Xattrs[k[len(paxXattr):]] = v
} else if strings.HasPrefix(k, paxWindows) {
if hdr.Winheaders == nil {
hdr.Winheaders = make(map[string]string)
}
hdr.Winheaders[k[len(paxWindows):]] = v
}
}
}
return nil
}
// parsePAXTime takes a string of the form %d.%d as described in
// the PAX specification.
func parsePAXTime(t string) (time.Time, error) {
buf := []byte(t)
pos := bytes.IndexByte(buf, '.')
var seconds, nanoseconds int64
var err error
if pos == -1 {
seconds, err = strconv.ParseInt(t, 10, 0)
if err != nil {
return time.Time{}, err
}
} else {
seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
if err != nil {
return time.Time{}, err
}
nano_buf := string(buf[pos+1:])
// Pad as needed before converting to a decimal.
// For example .030 -> .030000000 -> 30000000 nanoseconds
if len(nano_buf) < maxNanoSecondIntSize {
// Right pad
nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
} else if len(nano_buf) > maxNanoSecondIntSize {
// Right truncate
nano_buf = nano_buf[:maxNanoSecondIntSize]
}
nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
if err != nil {
return time.Time{}, err
}
}
ts := time.Unix(seconds, nanoseconds)
return ts, nil
}
// parsePAX parses PAX headers.
// If an extended header (type 'x') is invalid, ErrHeader is returned
func parsePAX(r io.Reader) (map[string]string, error) {
buf, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
sbuf := string(buf)
// For GNU PAX sparse format 0.0 support.
// This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
var sparseMap bytes.Buffer
headers := make(map[string]string)
// Each record is constructed as
// "%d %s=%s\n", length, keyword, value
for len(sbuf) > 0 {
key, value, residual, err := parsePAXRecord(sbuf)
if err != nil {
return nil, ErrHeader
}
sbuf = residual
keyStr := string(key)
if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
// GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
sparseMap.WriteString(value)
sparseMap.Write([]byte{','})
} else {
// Normal key. Set the value in the headers map.
headers[keyStr] = string(value)
}
}
if sparseMap.Len() != 0 {
// Add sparse info to headers, chopping off the extra comma
sparseMap.Truncate(sparseMap.Len() - 1)
headers[paxGNUSparseMap] = sparseMap.String()
}
return headers, nil
}
// parsePAXRecord parses the input PAX record string into a key-value pair.
// If parsing is successful, it will slice off the currently read record and
// return the remainder as r.
//
// A PAX record is of the following form:
// "%d %s=%s\n" % (size, key, value)
func parsePAXRecord(s string) (k, v, r string, err error) {
// The size field ends at the first space.
sp := strings.IndexByte(s, ' ')
if sp == -1 {
return "", "", s, ErrHeader
}
// Parse the first token as a decimal integer.
n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
if perr != nil || n < 5 || int64(len(s)) < n {
return "", "", s, ErrHeader
}
// Extract everything between the space and the final newline.
rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
if nl != "\n" {
return "", "", s, ErrHeader
}
// The first equals separates the key from the value.
eq := strings.IndexByte(rec, '=')
if eq == -1 {
return "", "", s, ErrHeader
}
return rec[:eq], rec[eq+1:], rem, nil
}
// parseString parses bytes as a NUL-terminated C-style string.
// If a NUL byte is not found then the whole slice is returned as a string.
func (*parser) parseString(b []byte) string {
n := 0
for n < len(b) && b[n] != 0 {
n++
}
return string(b[0:n])
}
// parseNumeric parses the input as being encoded in either base-256 or octal.
// This function may return negative numbers.
// If parsing fails or an integer overflow occurs, err will be set.
func (p *parser) parseNumeric(b []byte) int64 {
// Check for base-256 (binary) format first.
// If the first bit is set, then all following bits constitute a two's
// complement encoded number in big-endian byte order.
if len(b) > 0 && b[0]&0x80 != 0 {
// Handling negative numbers relies on the following identity:
// -a-1 == ^a
//
// If the number is negative, we use an inversion mask to invert the
// data bytes and treat the value as an unsigned number.
var inv byte // 0x00 if positive or zero, 0xff if negative
if b[0]&0x40 != 0 {
inv = 0xff
}
var x uint64
for i, c := range b {
c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
if i == 0 {
c &= 0x7f // Ignore signal bit in first byte
}
if (x >> 56) > 0 {
p.err = ErrHeader // Integer overflow
return 0
}
x = x<<8 | uint64(c)
}
if (x >> 63) > 0 {
p.err = ErrHeader // Integer overflow
return 0
}
if inv == 0xff {
return ^int64(x)
}
return int64(x)
}
// Normal case is base-8 (octal) format.
return p.parseOctal(b)
}
func (p *parser) parseOctal(b []byte) int64 {
// Because unused fields are filled with NULs, we need
// to skip leading NULs. Fields may also be padded with
// spaces or NULs.
// So we remove leading and trailing NULs and spaces to
// be sure.
b = bytes.Trim(b, " \x00")
if len(b) == 0 {
return 0
}
x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
if perr != nil {
p.err = ErrHeader
}
return int64(x)
}
// skipUnread skips any unread bytes in the existing file entry, as well as any
// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
// encountered in the data portion; it is okay to hit io.EOF in the padding.
//
// Note that this function still works properly even when sparse files are being
// used since numBytes returns the bytes remaining in the underlying io.Reader.
func (tr *Reader) skipUnread() error {
dataSkip := tr.numBytes() // Number of data bytes to skip
totalSkip := dataSkip + tr.pad // Total number of bytes to skip
tr.curr, tr.pad = nil, 0
// If possible, Seek to the last byte before the end of the data section.
// Do this because Seek is often lazy about reporting errors; this will mask
// the fact that the tar stream may be truncated. We can rely on the
// io.CopyN done shortly afterwards to trigger any IO errors.
var seekSkipped int64 // Number of bytes skipped via Seek
if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
// Not all io.Seeker can actually Seek. For example, os.Stdin implements
// io.Seeker, but calling Seek always returns an error and performs
// no action. Thus, we try an innocent seek to the current position
// to see if Seek is really supported.
pos1, err := sr.Seek(0, os.SEEK_CUR)
if err == nil {
// Seek seems supported, so perform the real Seek.
pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR)
if err != nil {
tr.err = err
return tr.err
}
seekSkipped = pos2 - pos1
}
}
var copySkipped int64 // Number of bytes skipped via CopyN
copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip {
tr.err = io.ErrUnexpectedEOF
}
return tr.err
}
func (tr *Reader) verifyChecksum(header []byte) bool {
if tr.err != nil {
return false
}
var p parser
given := p.parseOctal(header[148:156])
unsigned, signed := checksum(header)
return p.err == nil && (given == unsigned || given == signed)
}
// readHeader reads the next block header and assumes that the underlying reader
// is already aligned to a block boundary.
//
// The err will be set to io.EOF only when one of the following occurs:
// * Exactly 0 bytes are read and EOF is hit.
// * Exactly 1 block of zeros is read and EOF is hit.
// * At least 2 blocks of zeros are read.
func (tr *Reader) readHeader() *Header {
header := tr.hdrBuff[:]
copy(header, zeroBlock)
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
return nil // io.EOF is okay here
}
// Two blocks of zero bytes marks the end of the archive.
if bytes.Equal(header, zeroBlock[0:blockSize]) {
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
return nil // io.EOF is okay here
}
if bytes.Equal(header, zeroBlock[0:blockSize]) {
tr.err = io.EOF
} else {
tr.err = ErrHeader // zero block and then non-zero block
}
return nil
}
if !tr.verifyChecksum(header) {
tr.err = ErrHeader
return nil
}
// Unpack
var p parser
hdr := new(Header)
s := slicer(header)
hdr.Name = p.parseString(s.next(100))
hdr.Mode = p.parseNumeric(s.next(8))
hdr.Uid = int(p.parseNumeric(s.next(8)))
hdr.Gid = int(p.parseNumeric(s.next(8)))
hdr.Size = p.parseNumeric(s.next(12))
hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0)
s.next(8) // chksum
hdr.Typeflag = s.next(1)[0]
hdr.Linkname = p.parseString(s.next(100))
// The remainder of the header depends on the value of magic.
// The original (v7) version of tar had no explicit magic field,
// so its magic bytes, like the rest of the block, are NULs.
magic := string(s.next(8)) // contains version field as well.
var format string
switch {
case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
if string(header[508:512]) == "tar\x00" {
format = "star"
} else {
format = "posix"
}
case magic == "ustar \x00": // old GNU tar
format = "gnu"
}
switch format {
case "posix", "gnu", "star":
hdr.Uname = p.parseString(s.next(32))
hdr.Gname = p.parseString(s.next(32))
devmajor := s.next(8)
devminor := s.next(8)
if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
hdr.Devmajor = p.parseNumeric(devmajor)
hdr.Devminor = p.parseNumeric(devminor)
}
var prefix string
switch format {
case "posix", "gnu":
prefix = p.parseString(s.next(155))
case "star":
prefix = p.parseString(s.next(131))
hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0)
hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0)
}
if len(prefix) > 0 {
hdr.Name = prefix + "/" + hdr.Name
}
}
if p.err != nil {
tr.err = p.err
return nil
}
nb := hdr.Size
if isHeaderOnlyType(hdr.Typeflag) {
nb = 0
}
if nb < 0 {
tr.err = ErrHeader
return nil
}
// Set the current file reader.
tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
tr.curr = &regFileReader{r: tr.r, nb: nb}
// Check for old GNU sparse format entry.
if hdr.Typeflag == TypeGNUSparse {
// Get the real size of the file.
hdr.Size = p.parseNumeric(header[483:495])
if p.err != nil {
tr.err = p.err
return nil
}
// Read the sparse map.
sp := tr.readOldGNUSparseMap(header)
if tr.err != nil {
return nil
}
// Current file is a GNU sparse file. Update the current file reader.
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
if tr.err != nil {
return nil
}
}
return hdr
}
// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
// then one or more extension headers are used to store the rest of the sparse map.
func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
var p parser
isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
spCap := oldGNUSparseMainHeaderNumEntries
if isExtended {
spCap += oldGNUSparseExtendedHeaderNumEntries
}
sp := make([]sparseEntry, 0, spCap)
s := slicer(header[oldGNUSparseMainHeaderOffset:])
// Read the four entries from the main tar header
for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
if p.err != nil {
tr.err = p.err
return nil
}
if offset == 0 && numBytes == 0 {
break
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
for isExtended {
// There are more entries. Read an extension header and parse its entries.
sparseHeader := make([]byte, blockSize)
if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
return nil
}
isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
s = slicer(sparseHeader)
for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
if p.err != nil {
tr.err = p.err
return nil
}
if offset == 0 && numBytes == 0 {
break
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
}
return sp
}
// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
// version 1.0. The format of the sparse map consists of a series of
// newline-terminated numeric fields. The first field is the number of entries
// and is always present. Following this are the entries, consisting of two
// fields (offset, numBytes). This function must stop reading at the end
// boundary of the block containing the last newline.
//
// Note that the GNU manual says that numeric values should be encoded in octal
// format. However, the GNU tar utility itself outputs these values in decimal.
// As such, this library treats values as being encoded in decimal.
func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
var cntNewline int64
var buf bytes.Buffer
var blk = make([]byte, blockSize)
// feedTokens copies data in numBlock chunks from r into buf until there are
// at least cnt newlines in buf. It will not read more blocks than needed.
var feedTokens = func(cnt int64) error {
for cntNewline < cnt {
if _, err := io.ReadFull(r, blk); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return err
}
buf.Write(blk)
for _, c := range blk {
if c == '\n' {
cntNewline++
}
}
}
return nil
}
// nextToken gets the next token delimited by a newline. This assumes that
// at least one newline exists in the buffer.
var nextToken = func() string {
cntNewline--
tok, _ := buf.ReadString('\n')
return tok[:len(tok)-1] // Cut off newline
}
// Parse for the number of entries.
// Use integer overflow resistant math to check this.
if err := feedTokens(1); err != nil {
return nil, err
}
numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
return nil, ErrHeader
}
// Parse for all member entries.
// numEntries is trusted after this since a potential attacker must have
// committed resources proportional to what this library used.
if err := feedTokens(2 * numEntries); err != nil {
return nil, err
}
sp := make([]sparseEntry, 0, numEntries)
for i := int64(0); i < numEntries; i++ {
offset, err := strconv.ParseInt(nextToken(), 10, 64)
if err != nil {
return nil, ErrHeader
}
numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
if err != nil {
return nil, ErrHeader
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
return sp, nil
}
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
// version 0.1. The sparse map is stored in the PAX headers.
func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
// Get number of entries.
// Use integer overflow resistant math to check this.
numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
return nil, ErrHeader
}
// There should be two numbers in sparseMap for each entry.
sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
if int64(len(sparseMap)) != 2*numEntries {
return nil, ErrHeader
}
// Loop through the entries in the sparse map.
// numEntries is trusted now.
sp := make([]sparseEntry, 0, numEntries)
for i := int64(0); i < numEntries; i++ {
offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
if err != nil {
return nil, ErrHeader
}
numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
if err != nil {
return nil, ErrHeader
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
return sp, nil
}
// numBytes returns the number of bytes left to read in the current file's entry
// in the tar archive, or 0 if there is no current file.
func (tr *Reader) numBytes() int64 {
if tr.curr == nil {
// No current file, so no bytes
return 0
}
return tr.curr.numBytes()
}
// Read reads from the current entry in the tar archive.
// It returns 0, io.EOF when it reaches the end of that entry,
// until Next is called to advance to the next entry.
//
// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
// the Header.Size claims.
func (tr *Reader) Read(b []byte) (n int, err error) {
if tr.err != nil {
return 0, tr.err
}
if tr.curr == nil {
return 0, io.EOF
}
n, err = tr.curr.Read(b)
if err != nil && err != io.EOF {
tr.err = err
}
return
}
func (rfr *regFileReader) Read(b []byte) (n int, err error) {
if rfr.nb == 0 {
// file consumed
return 0, io.EOF
}
if int64(len(b)) > rfr.nb {
b = b[0:rfr.nb]
}
n, err = rfr.r.Read(b)
rfr.nb -= int64(n)
if err == io.EOF && rfr.nb > 0 {
err = io.ErrUnexpectedEOF
}
return
}
// numBytes returns the number of bytes left to read in the file's data in the tar archive.
func (rfr *regFileReader) numBytes() int64 {
return rfr.nb
}
// newSparseFileReader creates a new sparseFileReader, but validates all of the
// sparse entries before doing so.
func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
if total < 0 {
return nil, ErrHeader // Total size cannot be negative
}
// Validate all sparse entries. These are the same checks as performed by
// the BSD tar utility.
for i, s := range sp {
switch {
case s.offset < 0 || s.numBytes < 0:
return nil, ErrHeader // Negative values are never okay
case s.offset > math.MaxInt64-s.numBytes:
return nil, ErrHeader // Integer overflow with large length
case s.offset+s.numBytes > total:
return nil, ErrHeader // Region extends beyond the "real" size
case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
return nil, ErrHeader // Regions can't overlap and must be in order
}
}
return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
}
// readHole reads a sparse hole ending at endOffset.
func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
n64 := endOffset - sfr.pos
if n64 > int64(len(b)) {
n64 = int64(len(b))
}
n := int(n64)
for i := 0; i < n; i++ {
b[i] = 0
}
sfr.pos += n64
return n
}
// Read reads the sparse file data in expanded form.
func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
// Skip past all empty fragments.
for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
sfr.sp = sfr.sp[1:]
}
// If there are no more fragments, then it is possible that there
// is one last sparse hole.
if len(sfr.sp) == 0 {
// This behavior matches the BSD tar utility.
// However, GNU tar stops returning data even if sfr.total is unmet.
if sfr.pos < sfr.total {
return sfr.readHole(b, sfr.total), nil
}
return 0, io.EOF
}
// In front of a data fragment, so read a hole.
if sfr.pos < sfr.sp[0].offset {
return sfr.readHole(b, sfr.sp[0].offset), nil
}
// In a data fragment, so read from it.
// This math is overflow free since we verify that offset and numBytes can
// be safely added when creating the sparseFileReader.
endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
bytesLeft := endPos - sfr.pos // Bytes left in fragment
if int64(len(b)) > bytesLeft {
b = b[:bytesLeft]
}
n, err = sfr.rfr.Read(b)
sfr.pos += int64(n)
if err == io.EOF {
if sfr.pos < endPos {
err = io.ErrUnexpectedEOF // There was supposed to be more data
} else if sfr.pos < sfr.total {
err = nil // There is still an implicit sparse hole at the end
}
}
if sfr.pos == endPos {
sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
}
return n, err
}
// numBytes returns the number of bytes left to read in the sparse file's
// sparse-encoded data in the tar archive.
func (sfr *sparseFileReader) numBytes() int64 {
return sfr.rfr.numBytes()
}

View file

@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux dragonfly openbsd solaris
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atim.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctim.Unix())
}

View file

@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd netbsd
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atimespec.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctimespec.Unix())
}

View file

@ -0,0 +1,32 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
package tar
import (
"os"
"syscall"
)
func init() {
sysStat = statUnix
}
func statUnix(fi os.FileInfo, h *Header) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
h.Uid = int(sys.Uid)
h.Gid = int(sys.Gid)
// TODO(bradfitz): populate username & group. os/user
// doesn't cache LookupId lookups, and lacks group
// lookup functions.
h.AccessTime = statAtime(sys)
h.ChangeTime = statCtime(sys)
// TODO(bradfitz): major/minor device numbers?
return nil
}

View file

@ -0,0 +1,419 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
// TODO(dsymonds):
// - catch more errors (no first header, etc.)
import (
"bytes"
"errors"
"fmt"
"io"
"path"
"sort"
"strconv"
"strings"
"time"
)
var (
ErrWriteTooLong = errors.New("archive/tar: write too long")
ErrFieldTooLong = errors.New("archive/tar: header field too long")
ErrWriteAfterClose = errors.New("archive/tar: write after close")
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
)
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
// A tar archive consists of a sequence of files.
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
// writing at most hdr.Size bytes in total.
type Writer struct {
w io.Writer
err error
nb int64 // number of unwritten bytes for current file entry
pad int64 // amount of padding to write after current file entry
closed bool
usedBinary bool // whether the binary numeric field extension was used
preferPax bool // use pax header instead of binary numeric header
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
}
type formatter struct {
err error // Last error seen
}
// NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
// Flush finishes writing the current file (optional).
func (tw *Writer) Flush() error {
if tw.nb > 0 {
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
return tw.err
}
n := tw.nb + tw.pad
for n > 0 && tw.err == nil {
nr := n
if nr > blockSize {
nr = blockSize
}
var nw int
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
n -= int64(nw)
}
tw.nb = 0
tw.pad = 0
return tw.err
}
// Write s into b, terminating it with a NUL if there is room.
func (f *formatter) formatString(b []byte, s string) {
if len(s) > len(b) {
f.err = ErrFieldTooLong
return
}
ascii := toASCII(s)
copy(b, ascii)
if len(ascii) < len(b) {
b[len(ascii)] = 0
}
}
// Encode x as an octal ASCII string and write it into b with leading zeros.
func (f *formatter) formatOctal(b []byte, x int64) {
s := strconv.FormatInt(x, 8)
// leading zeros, but leave room for a NUL.
for len(s)+1 < len(b) {
s = "0" + s
}
f.formatString(b, s)
}
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
// encoding. Unlike octal encoding, base-256 encoding does not require that the
// string ends with a NUL character. Thus, all n bytes are available for output.
//
// If operating in binary mode, this assumes strict GNU binary mode; which means
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
// equivalent to the sign bit in two's complement form.
func fitsInBase256(n int, x int64) bool {
var binBits = uint(n-1) * 8
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
}
// Write x into b, as binary (GNUtar/star extension).
func (f *formatter) formatNumeric(b []byte, x int64) {
if fitsInBase256(len(b), x) {
for i := len(b) - 1; i >= 0; i-- {
b[i] = byte(x)
x >>= 8
}
b[0] |= 0x80 // Highest bit indicates binary format
return
}
f.formatOctal(b, 0) // Last resort, just write zero
f.err = ErrFieldTooLong
}
var (
minTime = time.Unix(0, 0)
// There is room for 11 octal digits (33 bits) of mtime.
maxTime = minTime.Add((1<<33 - 1) * time.Second)
)
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
func (tw *Writer) WriteHeader(hdr *Header) error {
return tw.writeHeader(hdr, true)
}
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
// As this method is called internally by writePax header to allow it to
// suppress writing the pax header.
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
if tw.closed {
return ErrWriteAfterClose
}
if tw.err == nil {
tw.Flush()
}
if tw.err != nil {
return tw.err
}
// a map to hold pax header records, if any are needed
paxHeaders := make(map[string]string)
// TODO(shanemhansen): we might want to use PAX headers for
// subsecond time resolution, but for now let's just capture
// too long fields or non ascii characters
var f formatter
var header []byte
// We need to select which scratch buffer to use carefully,
// since this method is called recursively to write PAX headers.
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
// already being used by the non-recursive call, so we must use paxHdrBuff.
header = tw.hdrBuff[:]
if !allowPax {
header = tw.paxHdrBuff[:]
}
copy(header, zeroBlock)
s := slicer(header)
// Wrappers around formatter that automatically sets paxHeaders if the
// argument extends beyond the capacity of the input byte slice.
var formatString = func(b []byte, s string, paxKeyword string) {
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
if needsPaxHeader {
paxHeaders[paxKeyword] = s
return
}
f.formatString(b, s)
}
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
// Try octal first.
s := strconv.FormatInt(x, 8)
if len(s) < len(b) {
f.formatOctal(b, x)
return
}
// If it is too long for octal, and PAX is preferred, use a PAX header.
if paxKeyword != paxNone && tw.preferPax {
f.formatOctal(b, 0)
s := strconv.FormatInt(x, 10)
paxHeaders[paxKeyword] = s
return
}
tw.usedBinary = true
f.formatNumeric(b, x)
}
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
pathHeaderBytes := s.next(fileNameSize)
formatString(pathHeaderBytes, hdr.Name, paxPath)
// Handle out of range ModTime carefully.
var modTime int64
if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
modTime = hdr.ModTime.Unix()
}
f.formatOctal(s.next(8), hdr.Mode) // 100:108
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
formatNumeric(s.next(12), modTime, paxNone) // 136:148 --- consider using pax for finer granularity
s.next(8) // chksum (148:156)
s.next(1)[0] = hdr.Typeflag // 156:157
formatString(s.next(100), hdr.Linkname, paxLinkpath)
copy(s.next(8), []byte("ustar\x0000")) // 257:265
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
prefixHeaderBytes := s.next(155)
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
if tw.usedBinary {
copy(header[257:265], []byte("ustar \x00"))
}
_, paxPathUsed := paxHeaders[paxPath]
// try to use a ustar header when only the name is too long
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
prefix, suffix, ok := splitUSTARPath(hdr.Name)
if ok {
// Since we can encode in USTAR format, disable PAX header.
delete(paxHeaders, paxPath)
// Update the path fields
formatString(pathHeaderBytes, suffix, paxNone)
formatString(prefixHeaderBytes, prefix, paxNone)
}
}
// The chksum field is terminated by a NUL and a space.
// This is different from the other octal fields.
chksum, _ := checksum(header)
f.formatOctal(header[148:155], chksum) // Never fails
header[155] = ' '
// Check if there were any formatting errors.
if f.err != nil {
tw.err = f.err
return tw.err
}
if allowPax {
for k, v := range hdr.Xattrs {
paxHeaders[paxXattr+k] = v
}
for k, v := range hdr.Winheaders {
paxHeaders[paxWindows+k] = v
}
}
if len(paxHeaders) > 0 {
if !allowPax {
return errInvalidHeader
}
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
return err
}
}
tw.nb = int64(hdr.Size)
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
_, tw.err = tw.w.Write(header)
return tw.err
}
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
// If the path is not splittable, then it will return ("", "", false).
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
length := len(name)
if length <= fileNameSize || !isASCII(name) {
return "", "", false
} else if length > fileNamePrefixSize+1 {
length = fileNamePrefixSize + 1
} else if name[length-1] == '/' {
length--
}
i := strings.LastIndex(name[:length], "/")
nlen := len(name) - i - 1 // nlen is length of suffix
plen := i // plen is length of prefix
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
return "", "", false
}
return name[:i], name[i+1:], true
}
// writePaxHeader writes an extended pax header to the
// archive.
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
// Prepare extended header
ext := new(Header)
ext.Typeflag = TypeXHeader
// Setting ModTime is required for reader parsing to
// succeed, and seems harmless enough.
ext.ModTime = hdr.ModTime
// The spec asks that we namespace our pseudo files
// with the current pid. However, this results in differing outputs
// for identical inputs. As such, the constant 0 is now used instead.
// golang.org/issue/12358
dir, file := path.Split(hdr.Name)
fullName := path.Join(dir, "PaxHeaders.0", file)
ascii := toASCII(fullName)
if len(ascii) > 100 {
ascii = ascii[:100]
}
ext.Name = ascii
// Construct the body
var buf bytes.Buffer
// Keys are sorted before writing to body to allow deterministic output.
var keys []string
for k := range paxHeaders {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
}
ext.Size = int64(len(buf.Bytes()))
if err := tw.writeHeader(ext, false); err != nil {
return err
}
if _, err := tw.Write(buf.Bytes()); err != nil {
return err
}
if err := tw.Flush(); err != nil {
return err
}
return nil
}
// formatPAXRecord formats a single PAX record, prefixing it with the
// appropriate length.
func formatPAXRecord(k, v string) string {
const padding = 3 // Extra padding for ' ', '=', and '\n'
size := len(k) + len(v) + padding
size += len(strconv.Itoa(size))
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
// Final adjustment if adding size field increased the record size.
if len(record) != size {
size = len(record)
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
}
return record
}
// Write writes to the current entry in the tar archive.
// Write returns the error ErrWriteTooLong if more than
// hdr.Size bytes are written after WriteHeader.
func (tw *Writer) Write(b []byte) (n int, err error) {
if tw.closed {
err = ErrWriteAfterClose
return
}
overwrite := false
if int64(len(b)) > tw.nb {
b = b[0:tw.nb]
overwrite = true
}
n, err = tw.w.Write(b)
tw.nb -= int64(n)
if err == nil && overwrite {
err = ErrWriteTooLong
return
}
tw.err = err
return
}
// Close closes the tar archive, flushing any unwritten
// data to the underlying writer.
func (tw *Writer) Close() error {
if tw.err != nil || tw.closed {
return tw.err
}
tw.Flush()
tw.closed = true
if tw.err != nil {
return tw.err
}
// trailer: two zero blocks
for i := 0; i < 2; i++ {
_, tw.err = tw.w.Write(zeroBlock)
if tw.err != nil {
break
}
}
return tw.err
}

362
vendor/github.com/Microsoft/go-winio/backuptar/tar.go generated vendored Normal file
View file

@ -0,0 +1,362 @@
package backuptar
import (
"errors"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
)
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
const (
hdrFileAttributes = "fileattr"
hdrAccessTime = "accesstime"
hdrChangeTime = "changetime"
hdrCreateTime = "createtime"
hdrWriteTime = "writetime"
hdrSecurityDescriptor = "sd"
hdrMountPoint = "mountpoint"
)
func writeZeroes(w io.Writer, count int64) error {
buf := make([]byte, 8192)
c := len(buf)
for i := int64(0); i < count; i += int64(c) {
if int64(c) > count-i {
c = int(count - i)
}
_, err := w.Write(buf[:c])
if err != nil {
return err
}
}
return nil
}
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
curOffset := int64(0)
for {
bhdr, err := br.Next()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
return err
}
if bhdr.Id != winio.BackupSparseBlock {
return fmt.Errorf("unexpected stream %d", bhdr.Id)
}
// archive/tar does not support writing sparse files
// so just write zeroes to catch up to the current offset.
err = writeZeroes(t, bhdr.Offset-curOffset)
if bhdr.Size == 0 {
break
}
n, err := io.Copy(t, br)
if err != nil {
return err
}
curOffset = bhdr.Offset + n
}
return nil
}
func win32TimeFromTar(key string, hdrs map[string]string, unixTime time.Time) syscall.Filetime {
if s, ok := hdrs[key]; ok {
n, err := strconv.ParseUint(s, 10, 64)
if err == nil {
return syscall.Filetime{uint32(n & 0xffffffff), uint32(n >> 32)}
}
}
return syscall.NsecToFiletime(unixTime.UnixNano())
}
func win32TimeToTar(ft syscall.Filetime) (string, time.Time) {
return fmt.Sprintf("%d", uint64(ft.LowDateTime)+(uint64(ft.HighDateTime)<<32)), time.Unix(0, ft.Nanoseconds())
}
// Writes a file to a tar writer using data from a Win32 backup stream.
//
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
//
// The additional Win32 metadata is:
//
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
//
// MSWINDOWS.accesstime: The last access time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.createtime: The creation time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.changetime: The creation time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.writetime: The creation time, as a Filetime expressed as a 64-bit decimal value.
//
// MSWINDOWS.sd: The Win32 security descriptor, in SDDL (string) format
//
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
name = filepath.ToSlash(name)
hdr := &tar.Header{
Name: name,
Size: size,
Typeflag: tar.TypeReg,
Winheaders: make(map[string]string),
}
hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
hdr.Winheaders[hdrAccessTime], hdr.AccessTime = win32TimeToTar(fileInfo.LastAccessTime)
hdr.Winheaders[hdrChangeTime], hdr.ChangeTime = win32TimeToTar(fileInfo.ChangeTime)
hdr.Winheaders[hdrCreateTime], _ = win32TimeToTar(fileInfo.CreationTime)
hdr.Winheaders[hdrWriteTime], hdr.ModTime = win32TimeToTar(fileInfo.LastWriteTime)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
hdr.Mode |= c_ISDIR
hdr.Size = 0
hdr.Typeflag = tar.TypeDir
}
br := winio.NewBackupStreamReader(r)
var dataHdr *winio.BackupHeader
for dataHdr == nil {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupData:
hdr.Mode |= c_ISREG
dataHdr = bhdr
case winio.BackupSecurity:
sd, err := ioutil.ReadAll(br)
if err != nil {
return err
}
sddl, err := winio.SecurityDescriptorToSddl(sd)
if err != nil {
return err
}
hdr.Winheaders[hdrSecurityDescriptor] = sddl
case winio.BackupReparseData:
hdr.Mode |= c_ISLNK
hdr.Typeflag = tar.TypeSymlink
reparseBuffer, err := ioutil.ReadAll(br)
rp, err := winio.DecodeReparsePoint(reparseBuffer)
if err != nil {
return err
}
if rp.IsMountPoint {
hdr.Winheaders[hdrMountPoint] = "1"
}
hdr.Linkname = rp.Target
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
}
}
err := t.WriteHeader(hdr)
if err != nil {
return err
}
if dataHdr != nil {
// A data stream was found. Copy the data.
if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
if size != dataHdr.Size {
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
} else {
err = copySparse(t, br)
if err != nil {
return err
}
}
}
// Look for streams after the data stream. The only ones we handle are alternate data streams.
// Other streams may have metadata that could be serialized, but the tar header has already
// been written. In practice, this means that we don't get EA or TXF metadata.
for {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupAlternateData:
altName := bhdr.Name
if strings.HasSuffix(altName, ":$DATA") {
altName = altName[:len(altName)-len(":$DATA")]
}
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
hdr = &tar.Header{
Name: name + altName,
Mode: hdr.Mode,
Typeflag: tar.TypeReg,
Size: bhdr.Size,
ModTime: hdr.ModTime,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
err = t.WriteHeader(hdr)
if err != nil {
return err
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
} else {
// Unsupported for now, since the size of the alternate stream is not present
// in the backup stream until after the data has been read.
return errors.New("tar of sparse alternate data streams is unsupported")
}
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
}
}
return nil
}
// Retrieves basic Win32 file information from a tar header, using the additional metadata written by
// WriteTarFileFromBackupStream.
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
name = hdr.Name
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
size = hdr.Size
}
fileInfo = &winio.FileBasicInfo{
LastAccessTime: win32TimeFromTar(hdrAccessTime, hdr.Winheaders, hdr.AccessTime),
LastWriteTime: win32TimeFromTar(hdrWriteTime, hdr.Winheaders, hdr.ModTime),
ChangeTime: win32TimeFromTar(hdrChangeTime, hdr.Winheaders, hdr.ChangeTime),
CreationTime: win32TimeFromTar(hdrCreateTime, hdr.Winheaders, hdr.ModTime),
}
if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
attr, err := strconv.ParseUint(attrStr, 10, 32)
if err != nil {
return "", 0, nil, err
}
fileInfo.FileAttributes = uintptr(attr)
} else {
if hdr.Typeflag == tar.TypeDir {
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
}
}
return
}
// Writes a Win32 backup stream from the current tar file. Since this function may process multiple
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
// tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
bw := winio.NewBackupStreamWriter(w)
if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
sd, err := winio.SddlToSecurityDescriptor(sddl)
if err != nil {
return nil, err
}
bhdr := winio.BackupHeader{
Id: winio.BackupSecurity,
Size: int64(len(sd)),
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(sd)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeSymlink {
_, isMountPoint := hdr.Winheaders[hdrMountPoint]
rp := winio.ReparsePoint{
Target: hdr.Linkname,
IsMountPoint: isMountPoint,
}
reparse := winio.EncodeReparsePoint(&rp)
bhdr := winio.BackupHeader{
Id: winio.BackupReparseData,
Size: int64(len(reparse)),
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(reparse)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
bhdr := winio.BackupHeader{
Id: winio.BackupData,
Size: hdr.Size,
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
// Copy all the alternate data streams and return the next non-ADS header.
for {
ahdr, err := t.Next()
if err != nil {
return nil, err
}
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
return ahdr, nil
}
bhdr := winio.BackupHeader{
Id: winio.BackupAlternateData,
Size: ahdr.Size,
Name: ahdr.Name[len(hdr.Name)+1:] + ":$DATA",
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
}

21
vendor/github.com/Microsoft/hcsshim/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

28
vendor/github.com/Microsoft/hcsshim/activatelayer.go generated vendored Normal file
View file

@ -0,0 +1,28 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// ActivateLayer will find the layer with the given id and mount it's filesystem.
// For a read/write layer, the mounted filesystem will appear as a volume on the
// host, while a read-only layer is generally expected to be a no-op.
// An activated layer must later be deactivated via DeactivateLayer.
func ActivateLayer(info DriverInfo, id string) error {
title := "hcsshim::ActivateLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = activateLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+" - succeeded id=%s flavour=%d", id, info.Flavour)
return nil
}

172
vendor/github.com/Microsoft/hcsshim/baselayer.go generated vendored Normal file
View file

@ -0,0 +1,172 @@
package hcsshim
import (
"errors"
"os"
"path/filepath"
"syscall"
"github.com/Microsoft/go-winio"
)
type baseLayerWriter struct {
root string
f *os.File
bw *winio.BackupFileWriter
err error
hasUtilityVM bool
dirInfo []dirInfo
}
type dirInfo struct {
path string
fileInfo winio.FileBasicInfo
}
func (w *baseLayerWriter) closeCurrentFile() error {
if w.f != nil {
err := w.bw.Close()
err2 := w.f.Close()
w.f = nil
w.bw = nil
if err != nil {
return err
}
if err2 != nil {
return err2
}
}
return nil
}
func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) {
defer func() {
if err != nil {
w.err = err
}
}()
err = w.closeCurrentFile()
if err != nil {
return err
}
if filepath.ToSlash(name) == `UtilityVM/Files` {
w.hasUtilityVM = true
}
path := filepath.Join(w.root, name)
path, err = makeLongAbsPath(path)
if err != nil {
return err
}
var f *os.File
defer func() {
if f != nil {
f.Close()
}
}()
createmode := uint32(syscall.CREATE_NEW)
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
err := os.Mkdir(path, 0)
if err != nil && !os.IsExist(err) {
return err
}
createmode = syscall.OPEN_EXISTING
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
w.dirInfo = append(w.dirInfo, dirInfo{path, *fileInfo})
}
}
mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY)
f, err = winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createmode)
if err != nil {
return makeError(err, "Failed to OpenForBackup", path)
}
err = winio.SetFileBasicInfo(f, fileInfo)
if err != nil {
return makeError(err, "Failed to SetFileBasicInfo", path)
}
w.f = f
w.bw = winio.NewBackupFileWriter(f, true)
f = nil
return nil
}
func (w *baseLayerWriter) AddLink(name string, target string) (err error) {
defer func() {
if err != nil {
w.err = err
}
}()
err = w.closeCurrentFile()
if err != nil {
return err
}
linkpath, err := makeLongAbsPath(filepath.Join(w.root, name))
if err != nil {
return err
}
linktarget, err := makeLongAbsPath(filepath.Join(w.root, target))
if err != nil {
return err
}
return os.Link(linktarget, linkpath)
}
func (w *baseLayerWriter) Remove(name string) error {
return errors.New("base layer cannot have tombstones")
}
func (w *baseLayerWriter) Write(b []byte) (int, error) {
n, err := w.bw.Write(b)
if err != nil {
w.err = err
}
return n, err
}
func (w *baseLayerWriter) Close() error {
err := w.closeCurrentFile()
if err != nil {
return err
}
if w.err == nil {
// Restore the file times of all the directories, since they may have
// been modified by creating child directories.
for i := range w.dirInfo {
di := &w.dirInfo[len(w.dirInfo)-i-1]
f, err := winio.OpenForBackup(di.path, uint32(syscall.GENERIC_READ|syscall.GENERIC_WRITE), syscall.FILE_SHARE_READ, syscall.OPEN_EXISTING)
if err != nil {
return makeError(err, "Failed to OpenForBackup", di.path)
}
err = winio.SetFileBasicInfo(f, &di.fileInfo)
f.Close()
if err != nil {
return makeError(err, "Failed to SetFileBasicInfo", di.path)
}
}
err = ProcessBaseLayer(w.root)
if err != nil {
return err
}
if w.hasUtilityVM {
err = ProcessUtilityVMImage(filepath.Join(w.root, "UtilityVM"))
if err != nil {
return err
}
}
}
return w.err
}

79
vendor/github.com/Microsoft/hcsshim/callback.go generated vendored Normal file
View file

@ -0,0 +1,79 @@
package hcsshim
import (
"sync"
"syscall"
)
var (
nextCallback uintptr
callbackMap = map[uintptr]*notifcationWatcherContext{}
callbackMapLock = sync.RWMutex{}
notificationWatcherCallback = syscall.NewCallback(notificationWatcher)
// Notifications for HCS_SYSTEM handles
hcsNotificationSystemExited hcsNotification = 0x00000001
hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
// Notifications for HCS_PROCESS handles
hcsNotificationProcessExited hcsNotification = 0x00010000
// Common notifications
hcsNotificationInvalid hcsNotification = 0x00000000
hcsNotificationServiceDisconnect hcsNotification = 0x01000000
)
type hcsNotification uint32
type notificationChannel chan error
type notifcationWatcherContext struct {
channels notificationChannels
handle hcsCallback
}
type notificationChannels map[hcsNotification]notificationChannel
func newChannels() notificationChannels {
channels := make(notificationChannels)
channels[hcsNotificationSystemExited] = make(notificationChannel, 1)
channels[hcsNotificationSystemCreateCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1)
channels[hcsNotificationProcessExited] = make(notificationChannel, 1)
channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1)
return channels
}
func closeChannels(channels notificationChannels) {
close(channels[hcsNotificationSystemExited])
close(channels[hcsNotificationSystemCreateCompleted])
close(channels[hcsNotificationSystemStartCompleted])
close(channels[hcsNotificationSystemPauseCompleted])
close(channels[hcsNotificationSystemResumeCompleted])
close(channels[hcsNotificationProcessExited])
close(channels[hcsNotificationServiceDisconnect])
}
func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr {
var result error
if int32(notificationStatus) < 0 {
result = syscall.Errno(win32FromHresult(notificationStatus))
}
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return 0
}
context.channels[notificationType] <- result
return 0
}

7
vendor/github.com/Microsoft/hcsshim/cgo.go generated vendored Normal file
View file

@ -0,0 +1,7 @@
package hcsshim
import "C"
// This import is needed to make the library compile as CGO because HCSSHIM
// only works with CGO due to callbacks from HCS comming back from a C thread
// which is not supported without CGO. See https://github.com/golang/go/issues/10973

565
vendor/github.com/Microsoft/hcsshim/container.go generated vendored Normal file
View file

@ -0,0 +1,565 @@
package hcsshim
import (
"encoding/json"
"runtime"
"syscall"
"time"
"github.com/Sirupsen/logrus"
)
var (
defaultTimeout = time.Minute * 4
)
const (
pendingUpdatesQuery = `{ "PropertyTypes" : ["PendingUpdates"]}`
statisticsQuery = `{ "PropertyTypes" : ["Statistics"]}`
processListQuery = `{ "PropertyTypes" : ["ProcessList"]}`
)
type container struct {
handle hcsSystem
id string
callbackNumber uintptr
}
type containerProperties struct {
ID string `json:"Id"`
Name string
SystemType string
Owner string
SiloGUID string `json:"SiloGuid,omitempty"`
IsDummy bool `json:",omitempty"`
RuntimeID string `json:"RuntimeId,omitempty"`
Stopped bool `json:",omitempty"`
ExitType string `json:",omitempty"`
AreUpdatesPending bool `json:",omitempty"`
ObRoot string `json:",omitempty"`
Statistics Statistics `json:",omitempty"`
ProcessList []ProcessListItem `json:",omitempty"`
}
// MemoryStats holds the memory statistics for a container
type MemoryStats struct {
UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"`
UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"`
UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"`
}
// ProcessorStats holds the processor statistics for a container
type ProcessorStats struct {
TotalRuntime100ns uint64 `json:",omitempty"`
RuntimeUser100ns uint64 `json:",omitempty"`
RuntimeKernel100ns uint64 `json:",omitempty"`
}
// StorageStats holds the storage statistics for a container
type StorageStats struct {
ReadCountNormalized uint64 `json:",omitempty"`
ReadSizeBytes uint64 `json:",omitempty"`
WriteCountNormalized uint64 `json:",omitempty"`
WriteSizeBytes uint64 `json:",omitempty"`
}
// NetworkStats holds the network statistics for a container
type NetworkStats struct {
BytesReceived uint64 `json:",omitempty"`
BytesSent uint64 `json:",omitempty"`
PacketsReceived uint64 `json:",omitempty"`
PacketsSent uint64 `json:",omitempty"`
DroppedPacketsIncoming uint64 `json:",omitempty"`
DroppedPacketsOutgoing uint64 `json:",omitempty"`
EndpointId string `json:",omitempty"`
InstanceId string `json:",omitempty"`
}
// Statistics is the structure returned by a statistics call on a container
type Statistics struct {
Timestamp time.Time `json:",omitempty"`
ContainerStartTime time.Time `json:",omitempty"`
Uptime100ns uint64 `json:",omitempty"`
Memory MemoryStats `json:",omitempty"`
Processor ProcessorStats `json:",omitempty"`
Storage StorageStats `json:",omitempty"`
Network []NetworkStats `json:",omitempty"`
}
// ProcessList is the structure of an item returned by a ProcessList call on a container
type ProcessListItem struct {
CreateTimestamp time.Time `json:",omitempty"`
ImageName string `json:",omitempty"`
KernelTime100ns uint64 `json:",omitempty"`
MemoryCommitBytes uint64 `json:",omitempty"`
MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"`
MemoryWorkingSetSharedBytes uint64 `json:",omitempty"`
ProcessId uint32 `json:",omitempty"`
UserTime100ns uint64 `json:",omitempty"`
}
// CreateContainer creates a new container with the given configuration but does not start it.
func CreateContainer(id string, c *ContainerConfig) (Container, error) {
operation := "CreateContainer"
title := "HCSShim::" + operation
container := &container{
id: id,
}
configurationb, err := json.Marshal(c)
if err != nil {
return nil, err
}
configuration := string(configurationb)
logrus.Debugf(title+" id=%s config=%s", id, configuration)
var (
resultp *uint16
createError error
)
if hcsCallbacksSupported {
var identity syscall.Handle
createError = hcsCreateComputeSystem(id, configuration, identity, &container.handle, &resultp)
if createError == nil || IsPending(createError) {
if err := container.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
}
} else {
createError = hcsCreateComputeSystemTP5(id, configuration, &container.handle, &resultp)
}
err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout)
if err != nil {
return nil, makeContainerError(container, operation, configuration, err)
}
logrus.Debugf(title+" succeeded id=%s handle=%d", id, container.handle)
runtime.SetFinalizer(container, closeContainer)
return container, nil
}
// OpenContainer opens an existing container by ID.
func OpenContainer(id string) (Container, error) {
operation := "OpenContainer"
title := "HCSShim::" + operation
logrus.Debugf(title+" id=%s", id)
container := &container{
id: id,
}
var (
handle hcsSystem
resultp *uint16
)
err := hcsOpenComputeSystem(id, &handle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
container.handle = handle
logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle)
runtime.SetFinalizer(container, closeContainer)
return container, nil
}
// Start synchronously starts the container.
func (container *container) Start() error {
operation := "Start"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
var resultp *uint16
err := hcsStartComputeSystemTP5(container.handle, nil, &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemStartCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Shutdown requests a container shutdown, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (container *container) Shutdown() error {
operation := "Shutdown"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
var resultp *uint16
err := hcsShutdownComputeSystemTP5(container.handle, nil, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Terminate requests a container terminate, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (container *container) Terminate() error {
operation := "Terminate"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
var resultp *uint16
err := hcsTerminateComputeSystemTP5(container.handle, nil, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Wait synchronously waits for the container to shutdown or terminate.
func (container *container) Wait() error {
operation := "Wait"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if hcsCallbacksSupported {
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil {
return makeContainerError(container, operation, "", err)
}
} else {
_, err := container.waitTimeoutInternal(syscall.INFINITE)
if err != nil {
return makeContainerError(container, operation, "", err)
}
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
func (container *container) waitTimeoutInternal(timeout uint32) (bool, error) {
return waitTimeoutInternalHelper(container, timeout)
}
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse.
// If the timeout expires, IsTimeout(err) == true
func (container *container) WaitTimeout(timeout time.Duration) error {
operation := "WaitTimeout"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if hcsCallbacksSupported {
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, &timeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
} else {
finished, err := waitTimeoutHelper(container, timeout)
if !finished {
err = ErrTimeout
}
if err != nil {
return makeContainerError(container, operation, "", err)
}
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
func (container *container) hcsWait(timeout uint32) (bool, error) {
var (
resultp *uint16
exitEvent syscall.Handle
)
err := hcsCreateComputeSystemWait(container.handle, &exitEvent, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return false, err
}
defer syscall.CloseHandle(exitEvent)
return waitForSingleObject(exitEvent, timeout)
}
func (container *container) properties(query string) (*containerProperties, error) {
var (
resultp *uint16
propertiesp *uint16
)
err := hcsGetComputeSystemProperties(container.handle, query, &propertiesp, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, err
}
if propertiesp == nil {
return nil, ErrUnexpectedValue
}
propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
properties := &containerProperties{}
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, err
}
return properties, nil
}
// HasPendingUpdates returns true if the container has updates pending to install
func (container *container) HasPendingUpdates() (bool, error) {
operation := "HasPendingUpdates"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
properties, err := container.properties(pendingUpdatesQuery)
if err != nil {
return false, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.AreUpdatesPending, nil
}
// Statistics returns statistics for the container
func (container *container) Statistics() (Statistics, error) {
operation := "Statistics"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
properties, err := container.properties(statisticsQuery)
if err != nil {
return Statistics{}, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.Statistics, nil
}
// ProcessList returns an array of ProcessListItems for the container
func (container *container) ProcessList() ([]ProcessListItem, error) {
operation := "ProcessList"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
properties, err := container.properties(processListQuery)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.ProcessList, nil
}
// Pause pauses the execution of the container. This feature is not enabled in TP5.
func (container *container) Pause() error {
operation := "Pause"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
var resultp *uint16
err := hcsPauseComputeSystemTP5(container.handle, nil, &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemPauseCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Resume resumes the execution of the container. This feature is not enabled in TP5.
func (container *container) Resume() error {
operation := "Resume"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
var (
resultp *uint16
)
err := hcsResumeComputeSystemTP5(container.handle, nil, &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemResumeCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// CreateProcess launches a new process within the container.
func (container *container) CreateProcess(c *ProcessConfig) (Process, error) {
operation := "CreateProcess"
title := "HCSShim::Container::" + operation
var (
processInfo hcsProcessInformation
processHandle hcsProcess
resultp *uint16
)
// If we are not emulating a console, ignore any console size passed to us
if !c.EmulateConsole {
c.ConsoleSize[0] = 0
c.ConsoleSize[1] = 0
}
configurationb, err := json.Marshal(c)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
configuration := string(configurationb)
logrus.Debugf(title+" id=%s config=%s", container.id, configuration)
err = hcsCreateProcess(container.handle, configuration, &processInfo, &processHandle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, makeContainerError(container, operation, configuration, err)
}
process := &process{
handle: processHandle,
processID: int(processInfo.ProcessId),
container: container,
cachedPipes: &cachedPipes{
stdIn: processInfo.StdInput,
stdOut: processInfo.StdOutput,
stdErr: processInfo.StdError,
},
}
if hcsCallbacksSupported {
if err := process.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
}
logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID)
runtime.SetFinalizer(process, closeProcess)
return process, nil
}
// OpenProcess gets an interface to an existing process within the container.
func (container *container) OpenProcess(pid int) (Process, error) {
operation := "OpenProcess"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s, processid=%d", container.id, pid)
var (
processHandle hcsProcess
resultp *uint16
)
err := hcsOpenProcess(container.handle, uint32(pid), &processHandle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
process := &process{
handle: processHandle,
processID: pid,
container: container,
}
if hcsCallbacksSupported {
if err := process.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
}
logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID)
runtime.SetFinalizer(process, closeProcess)
return process, nil
}
// Close cleans up any state associated with the container but does not terminate or wait for it.
func (container *container) Close() error {
operation := "Close"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
// Don't double free this
if container.handle == 0 {
return nil
}
if hcsCallbacksSupported {
if err := container.unregisterCallback(); err != nil {
return makeContainerError(container, operation, "", err)
}
}
if err := hcsCloseComputeSystem(container.handle); err != nil {
return makeContainerError(container, operation, "", err)
}
container.handle = 0
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// closeContainer wraps container.Close for use by a finalizer
func closeContainer(container *container) {
container.Close()
}
func (container *container) registerCallback() error {
context := &notifcationWatcherContext{
channels: newChannels(),
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = context
callbackMapLock.Unlock()
var callbackHandle hcsCallback
err := hcsRegisterComputeSystemCallback(container.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
if err != nil {
return err
}
context.handle = callbackHandle
container.callbackNumber = callbackNumber
return nil
}
func (container *container) unregisterCallback() error {
callbackNumber := container.callbackNumber
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return nil
}
handle := context.handle
if handle == 0 {
return nil
}
// hcsUnregisterComputeSystemCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := hcsUnregisterComputeSystemCallback(handle)
if err != nil {
return err
}
closeChannels(context.channels)
callbackMapLock.Lock()
callbackMap[callbackNumber] = nil
callbackMapLock.Unlock()
handle = 0
return nil
}

27
vendor/github.com/Microsoft/hcsshim/createlayer.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// CreateLayer creates a new, empty, read-only layer on the filesystem based on
// the parent layer provided.
func CreateLayer(info DriverInfo, id, parent string) error {
title := "hcsshim::CreateLayer "
logrus.Debugf(title+"Flavour %d ID %s parent %s", info.Flavour, id, parent)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = createLayer(&infop, id, parent)
if err != nil {
err = makeErrorf(err, title, "id=%s parent=%s flavour=%d", id, parent, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+" - succeeded id=%s parent=%s flavour=%d", id, parent, info.Flavour)
return nil
}

View file

@ -0,0 +1,35 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// CreateSandboxLayer creates and populates new read-write layer for use by a container.
// This requires both the id of the direct parent layer, as well as the full list
// of paths to all parent layers up to the base (and including the direct parent
// whose id was provided).
func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
title := "hcsshim::CreateSandboxLayer "
logrus.Debugf(title+"layerId %s parentId %s", layerId, parentId)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = createSandboxLayer(&infop, layerId, parentId, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s parentId=%s", layerId, parentId)
logrus.Error(err)
return err
}
logrus.Debugf(title+"- succeeded layerId=%s parentId=%s", layerId, parentId)
return nil
}

26
vendor/github.com/Microsoft/hcsshim/deactivatelayer.go generated vendored Normal file
View file

@ -0,0 +1,26 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
func DeactivateLayer(info DriverInfo, id string) error {
title := "hcsshim::DeactivateLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = deactivateLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
return nil
}

27
vendor/github.com/Microsoft/hcsshim/destroylayer.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// DestroyLayer will remove the on-disk files representing the layer with the given
// id, including that layer's containing folder, if any.
func DestroyLayer(info DriverInfo, id string) error {
title := "hcsshim::DestroyLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = destroyLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
return nil
}

190
vendor/github.com/Microsoft/hcsshim/errors.go generated vendored Normal file
View file

@ -0,0 +1,190 @@
package hcsshim
import (
"errors"
"fmt"
"syscall"
)
var (
// ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists
ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e)
// ErrElementNotFound is an error encountered when the object being referenced does not exist
ErrElementNotFound = syscall.Errno(0x490)
// ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed
ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed")
// ErrInvalidNotificationType is an error encountered when an invalid notification type is used
ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type")
// ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation
ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation")
// ErrTimeout is an error encountered when waiting on a notification times out
ErrTimeout = errors.New("hcsshim: timeout waiting for notification")
// ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for
// a different expected notification
ErrUnexpectedContainerExit = errors.New("unexpected container exit")
// ErrUnexpectedProcessAbort is the error encountered when communication with the compute service
// is lost while waiting for a notification
ErrUnexpectedProcessAbort = errors.New("lost communication with compute service")
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
// ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously
ErrVmcomputeOperationPending = syscall.Errno(0xC0370103)
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105)
// ErrProcNotFound is an error encountered when the the process cannot be found
ErrProcNotFound = syscall.Errno(0x7f)
)
// ProcessError is an error encountered in HCS during an operation on a Process object
type ProcessError struct {
Process *process
Operation string
ExtraInfo string
Err error
}
// ContainerError is an error encountered in HCS during an operation on a Container object
type ContainerError struct {
Container *container
Operation string
ExtraInfo string
Err error
}
func (e *ContainerError) Error() string {
if e == nil {
return "<nil>"
}
if e.Container == nil {
return "unexpected nil container for error: " + e.Err.Error()
}
s := "container " + e.Container.id
if e.Operation != "" {
s += " encountered an error during " + e.Operation
}
if e.Err != nil {
s += fmt.Sprintf(" failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err))
}
if e.ExtraInfo != "" {
s += " extra info: " + e.ExtraInfo
}
return s
}
func makeContainerError(container *container, operation string, extraInfo string, err error) error {
// Don't double wrap errors
if _, ok := err.(*ContainerError); ok {
return err
}
containerError := &ContainerError{Container: container, Operation: operation, ExtraInfo: extraInfo, Err: err}
return containerError
}
func (e *ProcessError) Error() string {
if e == nil {
return "<nil>"
}
if e.Process == nil {
return "Unexpected nil process for error: " + e.Err.Error()
}
s := fmt.Sprintf("process %d", e.Process.processID)
if e.Process.container != nil {
s += " in container " + e.Process.container.id
}
if e.Operation != "" {
s += " " + e.Operation
}
switch e.Err.(type) {
case nil:
break
case syscall.Errno:
s += fmt.Sprintf(" failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err))
default:
s += fmt.Sprintf(" failed: %s", e.Error())
}
return s
}
func makeProcessError(process *process, operation string, extraInfo string, err error) error {
// Don't double wrap errors
if _, ok := err.(*ProcessError); ok {
return err
}
processError := &ProcessError{Process: process, Operation: operation, ExtraInfo: extraInfo, Err: err}
return processError
}
// IsNotExist checks if an error is caused by the Container or Process not existing.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
func IsNotExist(err error) bool {
err = getInnerError(err)
return err == ErrComputeSystemDoesNotExist ||
err == ErrElementNotFound ||
err == ErrProcNotFound
}
// IsPending returns a boolean indicating whether the error is that
// the requested operation is being completed in the background.
func IsPending(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeOperationPending
}
// IsTimeout returns a boolean indicating whether the error is caused by
// a timeout waiting for the operation to complete.
func IsTimeout(err error) bool {
err = getInnerError(err)
return err == ErrTimeout
}
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
// a Container or Process being already stopped.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
func IsAlreadyStopped(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeAlreadyStopped ||
err == ErrElementNotFound ||
err == ErrProcNotFound
}
func getInnerError(err error) error {
switch pe := err.(type) {
case nil:
return nil
case *ContainerError:
err = pe.Err
case *ProcessError:
err = pe.Err
}
return err
}

View file

@ -0,0 +1,26 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// ExpandSandboxSize expands the size of a layer to at least size bytes.
func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error {
title := "hcsshim::ExpandSandboxSize "
logrus.Debugf(title+"layerId=%s size=%d", layerId, size)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = expandSandboxSize(&infop, layerId, size)
if err != nil {
err = makeErrorf(err, title, "layerId=%s size=%d", layerId, size)
logrus.Error(err)
return err
}
logrus.Debugf(title+"- succeeded layerId=%s size=%d", layerId, size)
return nil
}

158
vendor/github.com/Microsoft/hcsshim/exportlayer.go generated vendored Normal file
View file

@ -0,0 +1,158 @@
package hcsshim
import (
"io"
"io/ioutil"
"os"
"runtime"
"syscall"
"github.com/Microsoft/go-winio"
"github.com/Sirupsen/logrus"
)
// ExportLayer will create a folder at exportFolderPath and fill that folder with
// the transport format version of the layer identified by layerId. This transport
// format includes any metadata required for later importing the layer (using
// ImportLayer), and requires the full list of parent layer paths in order to
// perform the export.
func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error {
title := "hcsshim::ExportLayer "
logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerId, exportFolderPath)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = exportLayer(&infop, layerId, exportFolderPath, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerId, info.Flavour, exportFolderPath)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerId, exportFolderPath)
return nil
}
type LayerReader interface {
Next() (string, int64, *winio.FileBasicInfo, error)
Read(b []byte) (int, error)
Close() error
}
// FilterLayerReader provides an interface for extracting the contents of an on-disk layer.
type FilterLayerReader struct {
context uintptr
}
// Next reads the next available file from a layer, ensuring that parent directories are always read
// before child files and directories.
//
// Next returns the file's relative path, size, and basic file metadata. Read() should be used to
// extract a Win32 backup stream with the remainder of the metadata and the data.
func (r *FilterLayerReader) Next() (string, int64, *winio.FileBasicInfo, error) {
var fileNamep *uint16
fileInfo := &winio.FileBasicInfo{}
var deleted uint32
var fileSize int64
err := exportLayerNext(r.context, &fileNamep, fileInfo, &fileSize, &deleted)
if err != nil {
if err == syscall.ERROR_NO_MORE_FILES {
err = io.EOF
} else {
err = makeError(err, "ExportLayerNext", "")
}
return "", 0, nil, err
}
fileName := convertAndFreeCoTaskMemString(fileNamep)
if deleted != 0 {
fileInfo = nil
}
if fileName[0] == '\\' {
fileName = fileName[1:]
}
return fileName, fileSize, fileInfo, nil
}
// Read reads from the current file's Win32 backup stream.
func (r *FilterLayerReader) Read(b []byte) (int, error) {
var bytesRead uint32
err := exportLayerRead(r.context, b, &bytesRead)
if err != nil {
return 0, makeError(err, "ExportLayerRead", "")
}
if bytesRead == 0 {
return 0, io.EOF
}
return int(bytesRead), nil
}
// Close frees resources associated with the layer reader. It will return an
// error if there was an error while reading the layer or of the layer was not
// completely read.
func (r *FilterLayerReader) Close() (err error) {
if r.context != 0 {
err = exportLayerEnd(r.context)
if err != nil {
err = makeError(err, "ExportLayerEnd", "")
}
r.context = 0
}
return
}
// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer.
// The caller must have taken the SeBackupPrivilege privilege
// to call this and any methods on the resulting LayerReader.
func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) {
if procExportLayerBegin.Find() != nil {
// The new layer reader is not available on this Windows build. Fall back to the
// legacy export code path.
path, err := ioutil.TempDir("", "hcs")
if err != nil {
return nil, err
}
err = ExportLayer(info, layerID, path, parentLayerPaths)
if err != nil {
os.RemoveAll(path)
return nil, err
}
return &legacyLayerReaderWrapper{newLegacyLayerReader(path)}, nil
}
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return nil, err
}
infop, err := convertDriverInfo(info)
if err != nil {
return nil, err
}
r := &FilterLayerReader{}
err = exportLayerBegin(&infop, layerID, layers, &r.context)
if err != nil {
return nil, makeError(err, "ExportLayerBegin", "")
}
runtime.SetFinalizer(r, func(r *FilterLayerReader) { r.Close() })
return r, err
}
type legacyLayerReaderWrapper struct {
*legacyLayerReader
}
func (r *legacyLayerReaderWrapper) Close() error {
err := r.legacyLayerReader.Close()
os.RemoveAll(r.root)
return err
}

View file

@ -0,0 +1,55 @@
package hcsshim
import (
"syscall"
"github.com/Sirupsen/logrus"
)
// GetLayerMountPath will look for a mounted layer with the given id and return
// the path at which that layer can be accessed. This path may be a volume path
// if the layer is a mounted read-write layer, otherwise it is expected to be the
// folder path at which the layer is stored.
func GetLayerMountPath(info DriverInfo, id string) (string, error) {
title := "hcsshim::GetLayerMountPath "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return "", err
}
var mountPathLength uintptr
mountPathLength = 0
// Call the procedure itself.
logrus.Debugf("Calling proc (1)")
err = getLayerMountPath(&infop, id, &mountPathLength, nil)
if err != nil {
err = makeErrorf(err, title, "(first call) id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return "", err
}
// Allocate a mount path of the returned length.
if mountPathLength == 0 {
return "", nil
}
mountPathp := make([]uint16, mountPathLength)
mountPathp[0] = 0
// Call the procedure again
logrus.Debugf("Calling proc (2)")
err = getLayerMountPath(&infop, id, &mountPathLength, &mountPathp[0])
if err != nil {
err = makeErrorf(err, title, "(second call) id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return "", err
}
path := syscall.UTF16ToString(mountPathp[0:])
logrus.Debugf(title+"succeeded flavour=%d id=%s path=%s", info.Flavour, id, path)
return path, nil
}

View file

@ -0,0 +1,22 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// GetSharedBaseImages will enumerate the images stored in the common central
// image store and return descriptive info about those images for the purpose
// of registering them with the graphdriver, graph, and tagstore.
func GetSharedBaseImages() (imageData string, err error) {
title := "hcsshim::GetSharedBaseImages "
logrus.Debugf("Calling proc")
var buffer *uint16
err = getBaseImages(&buffer)
if err != nil {
err = makeError(err, title, "")
logrus.Error(err)
return
}
imageData = convertAndFreeCoTaskMemString(buffer)
logrus.Debugf(title+" - succeeded output=%s", imageData)
return
}

19
vendor/github.com/Microsoft/hcsshim/guid.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
package hcsshim
import (
"crypto/sha1"
"fmt"
)
type GUID [16]byte
func NewGUID(source string) *GUID {
h := sha1.Sum([]byte(source))
var g GUID
copy(g[0:], h[0:16])
return &g
}
func (g *GUID) ToString() string {
return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:])
}

183
vendor/github.com/Microsoft/hcsshim/hcsshim.go generated vendored Normal file
View file

@ -0,0 +1,183 @@
// Shim for the Host Compute Service (HSC) to manage Windows Server
// containers and Hyper-V containers.
package hcsshim
import (
"fmt"
"syscall"
"unsafe"
"github.com/Sirupsen/logrus"
)
//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go
//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer?
//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer?
//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer?
//sys createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer?
//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize?
//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer?
//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer?
//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer?
//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath?
//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages?
//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer?
//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists?
//sys nameToGuid(name string, guid *GUID) (hr error) = vmcompute.NameToGuid?
//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer?
//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer?
//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage?
//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage?
//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin?
//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext?
//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite?
//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd?
//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin?
//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext?
//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead?
//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd?
//sys createComputeSystem(id string, configuration string) (hr error) = vmcompute.CreateComputeSystem?
//sys createProcessWithStdHandlesInComputeSystem(id string, paramsJson string, pid *uint32, stdin *syscall.Handle, stdout *syscall.Handle, stderr *syscall.Handle) (hr error) = vmcompute.CreateProcessWithStdHandlesInComputeSystem?
//sys resizeConsoleInComputeSystem(id string, pid uint32, height uint16, width uint16, flags uint32) (hr error) = vmcompute.ResizeConsoleInComputeSystem?
//sys shutdownComputeSystem(id string, timeout uint32) (hr error) = vmcompute.ShutdownComputeSystem?
//sys startComputeSystem(id string) (hr error) = vmcompute.StartComputeSystem?
//sys terminateComputeSystem(id string) (hr error) = vmcompute.TerminateComputeSystem?
//sys terminateProcessInComputeSystem(id string, pid uint32) (hr error) = vmcompute.TerminateProcessInComputeSystem?
//sys waitForProcessInComputeSystem(id string, pid uint32, timeout uint32, exitCode *uint32) (hr error) = vmcompute.WaitForProcessInComputeSystem?
//sys getComputeSystemProperties(id string, flags uint32, properties **uint16) (hr error) = vmcompute.GetComputeSystemProperties?
//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems?
//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem?
//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem?
//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
//sys hcsCreateComputeSystemWait(computeSystem hcsSystem, exitEvent *syscall.Handle, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystemWait?
//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess?
//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess?
//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess?
//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo?
//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties?
//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess?
//sys hcsCreateProcessWait(process hcsProcess, settings *syscall.Handle, result **uint16) (hr error) = vmcompute.HcsCreateProcessWait?
//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties?
//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings?
//sys hcsCreateComputeSystemTP5(id string, configuration string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
//sys hcsStartComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
//sys hcsShutdownComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
//sys hcsTerminateComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
//sys hcsPauseComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
//sys hcsResumeComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback?
//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback?
//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall?
const (
// Specific user-visible exit codes
WaitErrExecFailed = 32767
ERROR_GEN_FAILURE = syscall.Errno(31)
ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115)
WSAEINVAL = syscall.Errno(10022)
// Timeout on wait calls
TimeoutInfinite = 0xFFFFFFFF
)
type HcsError struct {
title string
rest string
Err error
}
type hcsSystem syscall.Handle
type hcsProcess syscall.Handle
type hcsCallback syscall.Handle
type hcsProcessInformation struct {
ProcessId uint32
Reserved uint32
StdInput syscall.Handle
StdOutput syscall.Handle
StdError syscall.Handle
}
func makeError(err error, title, rest string) error {
// Pass through DLL errors directly since they do not originate from HCS.
if _, ok := err.(*syscall.DLLError); ok {
return err
}
return &HcsError{title, rest, err}
}
func makeErrorf(err error, title, format string, a ...interface{}) error {
return makeError(err, title, fmt.Sprintf(format, a...))
}
func win32FromError(err error) uint32 {
if herr, ok := err.(*HcsError); ok {
return win32FromError(herr.Err)
}
if code, ok := err.(syscall.Errno); ok {
return uint32(code)
}
return uint32(ERROR_GEN_FAILURE)
}
func win32FromHresult(hr uintptr) uintptr {
if hr&0x1fff0000 == 0x00070000 {
return hr & 0xffff
}
return hr
}
func (e *HcsError) Error() string {
s := e.title
if len(s) > 0 && s[len(s)-1] != ' ' {
s += " "
}
s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err))
if e.rest != "" {
if e.rest[0] != ' ' {
s += " "
}
s += e.rest
}
return s
}
func convertAndFreeCoTaskMemString(buffer *uint16) string {
str := syscall.UTF16ToString((*[1 << 30]uint16)(unsafe.Pointer(buffer))[:])
coTaskMemFree(unsafe.Pointer(buffer))
return str
}
func convertAndFreeCoTaskMemBytes(buffer *uint16) []byte {
return []byte(convertAndFreeCoTaskMemString(buffer))
}
func processHcsResult(err error, resultp *uint16) error {
if resultp != nil {
result := convertAndFreeCoTaskMemString(resultp)
logrus.Debugf("Result: %s", result)
}
return err
}

152
vendor/github.com/Microsoft/hcsshim/hnsfuncs.go generated vendored Normal file
View file

@ -0,0 +1,152 @@
package hcsshim
import (
"encoding/json"
"fmt"
"net"
"github.com/Sirupsen/logrus"
)
type NatPolicy struct {
Type string
Protocol string
InternalPort uint16
ExternalPort uint16
}
type QosPolicy struct {
Type string
MaximumOutgoingBandwidthInBytes uint64
}
type VlanPolicy struct {
Type string
VLAN uint
}
type VsidPolicy struct {
Type string
VSID uint
}
// Subnet is assoicated with a network and represents a list
// of subnets available to the network
type Subnet struct {
AddressPrefix string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
}
// MacPool is assoicated with a network and represents a list
// of macaddresses available to the network
type MacPool struct {
StartMacAddress string `json:",omitempty"`
EndMacAddress string `json:",omitempty"`
}
// HNSNetwork represents a network in HNS
type HNSNetwork struct {
Id string `json:",omitempty"`
Name string `json:",omitempty"`
Type string `json:",omitempty"`
NetworkAdapterName string `json:",omitempty"`
SourceMac string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
MacPools []MacPool `json:",omitempty"`
Subnets []Subnet `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
DNSServerCompartment uint32 `json:",omitempty"`
}
// HNSEndpoint represents a network endpoint in HNS
type HNSEndpoint struct {
Id string `json:",omitempty"`
Name string `json:",omitempty"`
VirtualNetwork string `json:",omitempty"`
VirtualNetworkName string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
MacAddress string `json:",omitempty"`
IPAddress net.IP `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
EnableInternalDNS bool `json:",omitempty"`
PrefixLength uint8 `json:",omitempty"`
}
type hnsNetworkResponse struct {
Success bool
Error string
Output HNSNetwork
}
type hnsResponse struct {
Success bool
Error string
Output json.RawMessage
}
func hnsCall(method, path, request string, returnResponse interface{}) error {
var responseBuffer *uint16
err := _hnsCall(method, path, request, &responseBuffer)
if err != nil {
return makeError(err, "hnsCall ", "")
}
response := convertAndFreeCoTaskMemString(responseBuffer)
hnsresponse := &hnsResponse{}
if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil {
return err
}
if !hnsresponse.Success {
return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error)
}
if len(hnsresponse.Output) == 0 {
return nil
}
logrus.Debugf("Network Response : %s", hnsresponse.Output)
err = json.Unmarshal(hnsresponse.Output, returnResponse)
if err != nil {
return err
}
return nil
}
// HNSNetworkRequest makes a call into HNS to update/query a single network
func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) {
var network HNSNetwork
err := hnsCall(method, "/networks/"+path, request, &network)
if err != nil {
return nil, err
}
return &network, nil
}
// HNSListNetworkRequest makes a HNS call to query the list of available networks
func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) {
var network []HNSNetwork
err := hnsCall(method, "/networks/"+path, request, &network)
if err != nil {
return nil, err
}
return network, nil
}
// HNSEndpointRequest makes a HNS call to modify/query a network endpoint
func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) {
endpoint := &HNSEndpoint{}
err := hnsCall(method, "/endpoints/"+path, request, &endpoint)
if err != nil {
return nil, err
}
return endpoint, nil
}

193
vendor/github.com/Microsoft/hcsshim/importlayer.go generated vendored Normal file
View file

@ -0,0 +1,193 @@
package hcsshim
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/Microsoft/go-winio"
"github.com/Sirupsen/logrus"
)
// ImportLayer will take the contents of the folder at importFolderPath and import
// that into a layer with the id layerId. Note that in order to correctly populate
// the layer and interperet the transport format, all parent layers must already
// be present on the system at the paths provided in parentLayerPaths.
func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error {
title := "hcsshim::ImportLayer "
logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerID, importFolderPath)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = importLayer(&infop, layerID, importFolderPath, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerID, info.Flavour, importFolderPath)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerID, importFolderPath)
return nil
}
// LayerWriter is an interface that supports writing a new container image layer.
type LayerWriter interface {
// Add adds a file to the layer with given metadata.
Add(name string, fileInfo *winio.FileBasicInfo) error
// AddLink adds a hard link to the layer. The target must already have been added.
AddLink(name string, target string) error
// Remove removes a file that was present in a parent layer from the layer.
Remove(name string) error
// Write writes data to the current file. The data must be in the format of a Win32
// backup stream.
Write(b []byte) (int, error)
// Close finishes the layer writing process and releases any resources.
Close() error
}
// FilterLayerWriter provides an interface to write the contents of a layer to the file system.
type FilterLayerWriter struct {
context uintptr
}
// Add adds a file or directory to the layer. The file's parent directory must have already been added.
//
// name contains the file's relative path. fileInfo contains file times and file attributes; the rest
// of the file metadata and the file data must be written as a Win32 backup stream to the Write() method.
// winio.BackupStreamWriter can be used to facilitate this.
func (w *FilterLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
if name[0] != '\\' {
name = `\` + name
}
err := importLayerNext(w.context, name, fileInfo)
if err != nil {
return makeError(err, "ImportLayerNext", "")
}
return nil
}
// AddLink adds a hard link to the layer. The target of the link must have already been added.
func (w *FilterLayerWriter) AddLink(name string, target string) error {
return errors.New("hard links not yet supported")
}
// Remove removes a file from the layer. The file must have been present in the parent layer.
//
// name contains the file's relative path.
func (w *FilterLayerWriter) Remove(name string) error {
if name[0] != '\\' {
name = `\` + name
}
err := importLayerNext(w.context, name, nil)
if err != nil {
return makeError(err, "ImportLayerNext", "")
}
return nil
}
// Write writes more backup stream data to the current file.
func (w *FilterLayerWriter) Write(b []byte) (int, error) {
err := importLayerWrite(w.context, b)
if err != nil {
err = makeError(err, "ImportLayerWrite", "")
return 0, err
}
return len(b), err
}
// Close completes the layer write operation. The error must be checked to ensure that the
// operation was successful.
func (w *FilterLayerWriter) Close() (err error) {
if w.context != 0 {
err = importLayerEnd(w.context)
if err != nil {
err = makeError(err, "ImportLayerEnd", "")
}
w.context = 0
}
return
}
type legacyLayerWriterWrapper struct {
*legacyLayerWriter
info DriverInfo
layerID string
path string
parentLayerPaths []string
}
func (r *legacyLayerWriterWrapper) Close() error {
err := r.legacyLayerWriter.Close()
if err == nil {
var fullPath string
// Use the original path here because ImportLayer does not support long paths for the source in TP5.
// But do use a long path for the destination to work around another bug with directories
// with MAX_PATH - 12 < length < MAX_PATH.
info := r.info
fullPath, err = makeLongAbsPath(filepath.Join(info.HomeDir, r.layerID))
if err == nil {
info.HomeDir = ""
err = ImportLayer(info, fullPath, r.path, r.parentLayerPaths)
}
}
os.RemoveAll(r.root)
return err
}
// NewLayerWriter returns a new layer writer for creating a layer on disk.
// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges
// to call this and any methods on the resulting LayerWriter.
func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) {
if len(parentLayerPaths) == 0 {
// This is a base layer. It gets imported differently.
return &baseLayerWriter{
root: filepath.Join(info.HomeDir, layerID),
}, nil
}
if procImportLayerBegin.Find() != nil {
// The new layer reader is not available on this Windows build. Fall back to the
// legacy export code path.
path, err := ioutil.TempDir("", "hcs")
if err != nil {
return nil, err
}
return &legacyLayerWriterWrapper{
legacyLayerWriter: newLegacyLayerWriter(path),
info: info,
layerID: layerID,
path: path,
parentLayerPaths: parentLayerPaths,
}, nil
}
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return nil, err
}
infop, err := convertDriverInfo(info)
if err != nil {
return nil, err
}
w := &FilterLayerWriter{}
err = importLayerBegin(&infop, layerID, layers, &w.context)
if err != nil {
return nil, makeError(err, "ImportLayerStart", "")
}
runtime.SetFinalizer(w, func(w *FilterLayerWriter) { w.Close() })
return w, nil
}

146
vendor/github.com/Microsoft/hcsshim/interface.go generated vendored Normal file
View file

@ -0,0 +1,146 @@
package hcsshim
import (
"io"
"time"
)
// ProcessConfig is used as both the input of Container.CreateProcess
// and to convert the parameters to JSON for passing onto the HCS
type ProcessConfig struct {
ApplicationName string
CommandLine string
WorkingDirectory string
Environment map[string]string
EmulateConsole bool
CreateStdInPipe bool
CreateStdOutPipe bool
CreateStdErrPipe bool
ConsoleSize [2]uint
}
type Layer struct {
ID string
Path string
}
type MappedDir struct {
HostPath string
ContainerPath string
ReadOnly bool
BandwidthMaximum uint64
IOPSMaximum uint64
}
type HvRuntime struct {
ImagePath string `json:",omitempty"`
SkipTemplate bool `json:",omitempty"`
}
// ContainerConfig is used as both the input of CreateContainer
// and to convert the parameters to JSON for passing onto the HCS
type ContainerConfig struct {
SystemType string // HCS requires this to be hard-coded to "Container"
Name string // Name of the container. We use the docker ID.
Owner string // The management platform that created this container
IsDummy bool // Used for development purposes.
VolumePath string // Windows volume path for scratch space
IgnoreFlushesDuringBoot bool // Optimization hint for container startup in Windows
LayerFolderPath string // Where the layer folders are located
Layers []Layer // List of storage layers
Credentials string `json:",omitempty"` // Credentials information
ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container.
ProcessorWeight uint64 `json:",omitempty"` // CPU Shares 0..10000 on Windows; where 0 will be omitted and HCS will default.
ProcessorMaximum int64 `json:",omitempty"` // CPU maximum usage percent 1..100
StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS
StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second
StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller
MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes
HostName string // Hostname
MappedDirectories []MappedDir // List of mapped directories (volumes/mounts)
SandboxPath string // Location of unmounted sandbox (used for Hyper-V containers)
HvPartition bool // True if it a Hyper-V Container
EndpointList []string // List of networking endpoints to be attached to container
HvRuntime *HvRuntime // Hyper-V container settings
Servicing bool // True if this container is for servicing
AllowUnqualifiedDNSQuery bool // True to allow unqualified DNS name resolution
}
// Container represents a created (but not necessarily running) container.
type Container interface {
// Start synchronously starts the container.
Start() error
// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds.
Shutdown() error
// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds.
Terminate() error
// Waits synchronously waits for the container to shutdown or terminate.
Wait() error
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It
// returns false if timeout occurs.
WaitTimeout(time.Duration) error
// Pause pauses the execution of a container.
Pause() error
// Resume resumes the execution of a container.
Resume() error
// HasPendingUpdates returns true if the container has updates pending to install.
HasPendingUpdates() (bool, error)
// Statistics returns statistics for a container.
Statistics() (Statistics, error)
// ProcessList returns details for the processes in a container.
ProcessList() ([]ProcessListItem, error)
// CreateProcess launches a new process within the container.
CreateProcess(c *ProcessConfig) (Process, error)
// OpenProcess gets an interface to an existing process within the container.
OpenProcess(pid int) (Process, error)
// Close cleans up any state associated with the container but does not terminate or wait for it.
Close() error
}
// Process represents a running or exited process.
type Process interface {
// Pid returns the process ID of the process within the container.
Pid() int
// Kill signals the process to terminate but does not wait for it to finish terminating.
Kill() error
// Wait waits for the process to exit.
Wait() error
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
// false if timeout occurs.
WaitTimeout(time.Duration) error
// ExitCode returns the exit code of the process. The process must have
// already terminated.
ExitCode() (int, error)
// ResizeConsole resizes the console of the process.
ResizeConsole(width, height uint16) error
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
// these pipes does not close the underlying pipes; it should be possible to
// call this multiple times to get multiple interfaces.
Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error)
// CloseStdin closes the write side of the stdin pipe so that the process is
// notified on the read side that there is no more data in stdin.
CloseStdin() error
// Close cleans up any state associated with the process but does not kill
// or wait on it.
Close() error
}

30
vendor/github.com/Microsoft/hcsshim/layerexists.go generated vendored Normal file
View file

@ -0,0 +1,30 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// LayerExists will return true if a layer with the given id exists and is known
// to the system.
func LayerExists(info DriverInfo, id string) (bool, error) {
title := "hcsshim::LayerExists "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return false, err
}
// Call the procedure itself.
var exists uint32
err = layerExists(&infop, id, &exists)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return false, err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s exists=%d", info.Flavour, id, exists)
return exists != 0, nil
}

111
vendor/github.com/Microsoft/hcsshim/layerutils.go generated vendored Normal file
View file

@ -0,0 +1,111 @@
package hcsshim
// This file contains utility functions to support storage (graph) related
// functionality.
import (
"path/filepath"
"syscall"
"github.com/Sirupsen/logrus"
)
/* To pass into syscall, we need a struct matching the following:
enum GraphDriverType
{
DiffDriver,
FilterDriver
};
struct DriverInfo {
GraphDriverType Flavour;
LPCWSTR HomeDir;
};
*/
type DriverInfo struct {
Flavour int
HomeDir string
}
type driverInfo struct {
Flavour int
HomeDirp *uint16
}
func convertDriverInfo(info DriverInfo) (driverInfo, error) {
homedirp, err := syscall.UTF16PtrFromString(info.HomeDir)
if err != nil {
logrus.Debugf("Failed conversion of home to pointer for driver info: %s", err.Error())
return driverInfo{}, err
}
return driverInfo{
Flavour: info.Flavour,
HomeDirp: homedirp,
}, nil
}
/* To pass into syscall, we need a struct matching the following:
typedef struct _WC_LAYER_DESCRIPTOR {
//
// The ID of the layer
//
GUID LayerId;
//
// Additional flags
//
union {
struct {
ULONG Reserved : 31;
ULONG Dirty : 1; // Created from sandbox as a result of snapshot
};
ULONG Value;
} Flags;
//
// Path to the layer root directory, null-terminated
//
PCWSTR Path;
} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR;
*/
type WC_LAYER_DESCRIPTOR struct {
LayerId GUID
Flags uint32
Pathp *uint16
}
func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) {
// Array of descriptors that gets constructed.
var layers []WC_LAYER_DESCRIPTOR
for i := 0; i < len(parentLayerPaths); i++ {
// Create a layer descriptor, using the folder name
// as the source for a GUID LayerId
_, folderName := filepath.Split(parentLayerPaths[i])
g, err := NameToGuid(folderName)
if err != nil {
logrus.Debugf("Failed to convert name to guid %s", err)
return nil, err
}
p, err := syscall.UTF16PtrFromString(parentLayerPaths[i])
if err != nil {
logrus.Debugf("Failed conversion of parentLayerPath to pointer %s", err)
return nil, err
}
layers = append(layers, WC_LAYER_DESCRIPTOR{
LayerId: g,
Flags: 0,
Pathp: p,
})
}
return layers, nil
}

441
vendor/github.com/Microsoft/hcsshim/legacy.go generated vendored Normal file
View file

@ -0,0 +1,441 @@
package hcsshim
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/Microsoft/go-winio"
)
var errorIterationCanceled = errors.New("")
func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) {
return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition)
}
func makeLongAbsPath(path string) (string, error) {
if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) {
return path, nil
}
if !filepath.IsAbs(path) {
absPath, err := filepath.Abs(path)
if err != nil {
return "", err
}
path = absPath
}
if strings.HasPrefix(path, `\\`) {
return `\\?\UNC\` + path[2:], nil
}
return `\\?\` + path, nil
}
type fileEntry struct {
path string
fi os.FileInfo
err error
}
type legacyLayerReader struct {
root string
result chan *fileEntry
proceed chan bool
currentFile *os.File
backupReader *winio.BackupFileReader
isTP4Format bool
}
// newLegacyLayerReader returns a new LayerReader that can read the Windows
// TP4 transport format from disk.
func newLegacyLayerReader(root string) *legacyLayerReader {
r := &legacyLayerReader{
root: root,
result: make(chan *fileEntry),
proceed: make(chan bool),
isTP4Format: IsTP4(),
}
go r.walk()
return r
}
func readTombstones(path string) (map[string]([]string), error) {
tf, err := os.Open(filepath.Join(path, "tombstones.txt"))
if err != nil {
return nil, err
}
defer tf.Close()
s := bufio.NewScanner(tf)
if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" {
return nil, errors.New("Invalid tombstones file")
}
ts := make(map[string]([]string))
for s.Scan() {
t := filepath.Join("Files", s.Text()[1:]) // skip leading `\`
dir := filepath.Dir(t)
ts[dir] = append(ts[dir], t)
}
if err = s.Err(); err != nil {
return nil, err
}
return ts, nil
}
func (r *legacyLayerReader) walkUntilCancelled() error {
root, err := makeLongAbsPath(r.root)
if err != nil {
return err
}
r.root = root
ts, err := readTombstones(r.root)
if err != nil {
return err
}
err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") {
return nil
}
r.result <- &fileEntry{path, info, nil}
if !<-r.proceed {
return errorIterationCanceled
}
// List all the tombstones.
if info.IsDir() {
relPath, err := filepath.Rel(r.root, path)
if err != nil {
return err
}
if dts, ok := ts[relPath]; ok {
for _, t := range dts {
r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil}
if !<-r.proceed {
return errorIterationCanceled
}
}
}
}
return nil
})
if err == errorIterationCanceled {
return nil
}
if err == nil {
return io.EOF
}
return err
}
func (r *legacyLayerReader) walk() {
defer close(r.result)
if !<-r.proceed {
return
}
err := r.walkUntilCancelled()
if err != nil {
for {
r.result <- &fileEntry{err: err}
if !<-r.proceed {
return
}
}
}
}
func (r *legacyLayerReader) reset() {
if r.backupReader != nil {
r.backupReader.Close()
r.backupReader = nil
}
if r.currentFile != nil {
r.currentFile.Close()
r.currentFile = nil
}
}
func findBackupStreamSize(r io.Reader) (int64, error) {
br := winio.NewBackupStreamReader(r)
for {
hdr, err := br.Next()
if err != nil {
if err == io.EOF {
err = nil
}
return 0, err
}
if hdr.Id == winio.BackupData {
return hdr.Size, nil
}
}
}
func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) {
r.reset()
r.proceed <- true
fe := <-r.result
if fe == nil {
err = errors.New("LegacyLayerReader closed")
return
}
if fe.err != nil {
err = fe.err
return
}
path, err = filepath.Rel(r.root, fe.path)
if err != nil {
return
}
if fe.fi == nil {
// This is a tombstone. Return a nil fileInfo.
return
}
if fe.fi.IsDir() && strings.HasPrefix(path, `Files\`) {
fe.path += ".$wcidirs$"
}
f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING)
if err != nil {
return
}
defer func() {
if f != nil {
f.Close()
}
}()
fileInfo, err = winio.GetFileBasicInfo(f)
if err != nil {
return
}
if !strings.HasPrefix(path, `Files\`) {
size = fe.fi.Size()
r.backupReader = winio.NewBackupFileReader(f, false)
if path == "Hives" || path == "Files" {
// The Hives directory has a non-deterministic file time because of the
// nature of the import process. Use the times from System_Delta.
var g *os.File
g, err = os.Open(filepath.Join(r.root, `Hives\System_Delta`))
if err != nil {
return
}
attr := fileInfo.FileAttributes
fileInfo, err = winio.GetFileBasicInfo(g)
g.Close()
if err != nil {
return
}
fileInfo.FileAttributes = attr
}
// The creation time and access time get reset for files outside of the Files path.
fileInfo.CreationTime = fileInfo.LastWriteTime
fileInfo.LastAccessTime = fileInfo.LastWriteTime
} else {
beginning := int64(0)
if !r.isTP4Format {
// In TP5, the file attributes were added before the backup stream
var attr uint32
err = binary.Read(f, binary.LittleEndian, &attr)
if err != nil {
return
}
fileInfo.FileAttributes = uintptr(attr)
beginning = 4
}
// Find the accurate file size.
if !fe.fi.IsDir() {
size, err = findBackupStreamSize(f)
if err != nil {
err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err}
return
}
}
// Return back to the beginning of the backup stream.
_, err = f.Seek(beginning, 0)
if err != nil {
return
}
}
r.currentFile = f
f = nil
return
}
func (r *legacyLayerReader) Read(b []byte) (int, error) {
if r.backupReader == nil {
if r.currentFile == nil {
return 0, io.EOF
}
return r.currentFile.Read(b)
}
return r.backupReader.Read(b)
}
func (r *legacyLayerReader) Close() error {
r.proceed <- false
<-r.result
r.reset()
return nil
}
type legacyLayerWriter struct {
root string
currentFile *os.File
backupWriter *winio.BackupFileWriter
tombstones []string
isTP4Format bool
pathFixed bool
}
// newLegacyLayerWriter returns a LayerWriter that can write the TP4 transport format
// to disk.
func newLegacyLayerWriter(root string) *legacyLayerWriter {
return &legacyLayerWriter{
root: root,
isTP4Format: IsTP4(),
}
}
func (w *legacyLayerWriter) init() error {
if !w.pathFixed {
path, err := makeLongAbsPath(w.root)
if err != nil {
return err
}
w.root = path
w.pathFixed = true
}
return nil
}
func (w *legacyLayerWriter) reset() {
if w.backupWriter != nil {
w.backupWriter.Close()
w.backupWriter = nil
}
if w.currentFile != nil {
w.currentFile.Close()
w.currentFile = nil
}
}
func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
w.reset()
err := w.init()
if err != nil {
return err
}
path := filepath.Join(w.root, name)
createDisposition := uint32(syscall.CREATE_NEW)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
err := os.Mkdir(path, 0)
if err != nil {
return err
}
path += ".$wcidirs$"
}
f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, createDisposition)
if err != nil {
return err
}
defer func() {
if f != nil {
f.Close()
os.Remove(path)
}
}()
strippedFi := *fileInfo
strippedFi.FileAttributes = 0
err = winio.SetFileBasicInfo(f, &strippedFi)
if err != nil {
return err
}
if strings.HasPrefix(name, `Hives\`) {
w.backupWriter = winio.NewBackupFileWriter(f, false)
} else {
if !w.isTP4Format {
// In TP5, the file attributes were added to the header
err = binary.Write(f, binary.LittleEndian, uint32(fileInfo.FileAttributes))
if err != nil {
return err
}
}
}
w.currentFile = f
f = nil
return nil
}
func (w *legacyLayerWriter) AddLink(name string, target string) error {
return errors.New("hard links not supported with legacy writer")
}
func (w *legacyLayerWriter) Remove(name string) error {
if !strings.HasPrefix(name, `Files\`) {
return fmt.Errorf("invalid tombstone %s", name)
}
w.tombstones = append(w.tombstones, name[len(`Files\`):])
return nil
}
func (w *legacyLayerWriter) Write(b []byte) (int, error) {
if w.backupWriter == nil {
if w.currentFile == nil {
return 0, errors.New("closed")
}
return w.currentFile.Write(b)
}
return w.backupWriter.Write(b)
}
func (w *legacyLayerWriter) Close() error {
w.reset()
err := w.init()
if err != nil {
return err
}
tf, err := os.Create(filepath.Join(w.root, "tombstones.txt"))
if err != nil {
return err
}
defer tf.Close()
_, err = tf.Write([]byte("\xef\xbb\xbfVersion 1.0\n"))
if err != nil {
return err
}
for _, t := range w.tombstones {
_, err = tf.Write([]byte(filepath.Join(`\`, t) + "\n"))
if err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,818 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
mksyscall_windows generates windows system call bodies
It parses all files specified on command line containing function
prototypes (like syscall_windows.go) and prints system call bodies
to standard output.
The prototypes are marked by lines beginning with "//sys" and read
like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument. This
includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named err.
* If go func name needs to be different from it's winapi dll name,
the winapi name could be specified at the end, after "=" sign, like
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
* Each function that returns err needs to supply a condition, that
return value of winapi will be tested against to detect failure.
This would set err to windows "last-error", otherwise it will be nil.
The value can be provided at end of //sys declaration, like
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
and is [failretval==0] by default.
Usage:
mksyscall_windows [flags] [path ...]
The flags are:
-output
Specify output file name (outputs to console if blank).
-trace
Generate print statement after every syscall.
*/
package main
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"go/format"
"go/parser"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"text/template"
)
var (
filename = flag.String("output", "", "output file name (standard output if omitted)")
printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
)
func trim(s string) string {
return strings.Trim(s, " \t")
}
var packageName string
func packagename() string {
return packageName
}
func syscalldot() string {
if packageName == "syscall" {
return ""
}
return "syscall."
}
// Param is function parameter
type Param struct {
Name string
Type string
fn *Fn
tmpVarIdx int
}
// tmpVar returns temp variable name that will be used to represent p during syscall.
func (p *Param) tmpVar() string {
if p.tmpVarIdx < 0 {
p.tmpVarIdx = p.fn.curTmpVarIdx
p.fn.curTmpVarIdx++
}
return fmt.Sprintf("_p%d", p.tmpVarIdx)
}
// BoolTmpVarCode returns source code for bool temp variable.
func (p *Param) BoolTmpVarCode() string {
const code = `var %s uint32
if %s {
%s = 1
} else {
%s = 0
}`
tmp := p.tmpVar()
return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
}
// SliceTmpVarCode returns source code for slice temp variable.
func (p *Param) SliceTmpVarCode() string {
const code = `var %s *%s
if len(%s) > 0 {
%s = &%s[0]
}`
tmp := p.tmpVar()
return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
}
// StringTmpVarCode returns source code for string temp variable.
func (p *Param) StringTmpVarCode() string {
errvar := p.fn.Rets.ErrorVarName()
if errvar == "" {
errvar = "_"
}
tmp := p.tmpVar()
const code = `var %s %s
%s, %s = %s(%s)`
s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
if errvar == "-" {
return s
}
const morecode = `
if %s != nil {
return
}`
return s + fmt.Sprintf(morecode, errvar)
}
// TmpVarCode returns source code for temp variable.
func (p *Param) TmpVarCode() string {
switch {
case p.Type == "bool":
return p.BoolTmpVarCode()
case strings.HasPrefix(p.Type, "[]"):
return p.SliceTmpVarCode()
default:
return ""
}
}
// TmpVarHelperCode returns source code for helper's temp variable.
func (p *Param) TmpVarHelperCode() string {
if p.Type != "string" {
return ""
}
return p.StringTmpVarCode()
}
// SyscallArgList returns source code fragments representing p parameter
// in syscall. Slices are translated into 2 syscall parameters: pointer to
// the first element and length.
func (p *Param) SyscallArgList() []string {
t := p.HelperType()
var s string
switch {
case t[0] == '*':
s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
case t == "bool":
s = p.tmpVar()
case strings.HasPrefix(t, "[]"):
return []string{
fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
fmt.Sprintf("uintptr(len(%s))", p.Name),
}
default:
s = p.Name
}
return []string{fmt.Sprintf("uintptr(%s)", s)}
}
// IsError determines if p parameter is used to return error.
func (p *Param) IsError() bool {
return p.Name == "err" && p.Type == "error"
}
// HelperType returns type of parameter p used in helper function.
func (p *Param) HelperType() string {
if p.Type == "string" {
return p.fn.StrconvType()
}
return p.Type
}
// join concatenates parameters ps into a string with sep separator.
// Each parameter is converted into string by applying fn to it
// before conversion.
func join(ps []*Param, fn func(*Param) string, sep string) string {
if len(ps) == 0 {
return ""
}
a := make([]string, 0)
for _, p := range ps {
a = append(a, fn(p))
}
return strings.Join(a, sep)
}
// Rets describes function return parameters.
type Rets struct {
Name string
Type string
ReturnsError bool
FailCond string
}
// ErrorVarName returns error variable name for r.
func (r *Rets) ErrorVarName() string {
if r.ReturnsError {
return "err"
}
if r.Type == "error" {
return r.Name
}
return ""
}
// ToParams converts r into slice of *Param.
func (r *Rets) ToParams() []*Param {
ps := make([]*Param, 0)
if len(r.Name) > 0 {
ps = append(ps, &Param{Name: r.Name, Type: r.Type})
}
if r.ReturnsError {
ps = append(ps, &Param{Name: "err", Type: "error"})
}
return ps
}
// List returns source code of syscall return parameters.
func (r *Rets) List() string {
s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
if len(s) > 0 {
s = "(" + s + ")"
}
return s
}
// PrintList returns source code of trace printing part correspondent
// to syscall return values.
func (r *Rets) PrintList() string {
return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
}
// SetReturnValuesCode returns source code that accepts syscall return values.
func (r *Rets) SetReturnValuesCode() string {
if r.Name == "" && !r.ReturnsError {
return ""
}
retvar := "r0"
if r.Name == "" {
retvar = "r1"
}
errvar := "_"
if r.ReturnsError {
errvar = "e1"
}
return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
}
func (r *Rets) useLongHandleErrorCode(retvar string) string {
const code = `if %s {
if e1 != 0 {
err = error(e1)
} else {
err = %sEINVAL
}
}`
cond := retvar + " == 0"
if r.FailCond != "" {
cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
}
return fmt.Sprintf(code, cond, syscalldot())
}
// SetErrorCode returns source code that sets return parameters.
func (r *Rets) SetErrorCode() string {
const code = `if r0 != 0 {
%s = %sErrno(r0)
}`
const hrCode = `if int32(r0) < 0 {
%s = %sErrno(win32FromHresult(r0))
}`
if r.Name == "" && !r.ReturnsError {
return ""
}
if r.Name == "" {
return r.useLongHandleErrorCode("r1")
}
if r.Type == "error" {
if r.Name == "hr" {
return fmt.Sprintf(hrCode, r.Name, syscalldot())
} else {
return fmt.Sprintf(code, r.Name, syscalldot())
}
}
s := ""
switch {
case r.Type[0] == '*':
s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
case r.Type == "bool":
s = fmt.Sprintf("%s = r0 != 0", r.Name)
default:
s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
}
if !r.ReturnsError {
return s
}
return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
}
// Fn describes syscall function.
type Fn struct {
Name string
Params []*Param
Rets *Rets
PrintTrace bool
confirmproc bool
dllname string
dllfuncname string
src string
// TODO: get rid of this field and just use parameter index instead
curTmpVarIdx int // insure tmp variables have uniq names
}
// extractParams parses s to extract function parameters.
func extractParams(s string, f *Fn) ([]*Param, error) {
s = trim(s)
if s == "" {
return nil, nil
}
a := strings.Split(s, ",")
ps := make([]*Param, len(a))
for i := range ps {
s2 := trim(a[i])
b := strings.Split(s2, " ")
if len(b) != 2 {
b = strings.Split(s2, "\t")
if len(b) != 2 {
return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
}
}
ps[i] = &Param{
Name: trim(b[0]),
Type: trim(b[1]),
fn: f,
tmpVarIdx: -1,
}
}
return ps, nil
}
// extractSection extracts text out of string s starting after start
// and ending just before end. found return value will indicate success,
// and prefix, body and suffix will contain correspondent parts of string s.
func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
s = trim(s)
if strings.HasPrefix(s, string(start)) {
// no prefix
body = s[1:]
} else {
a := strings.SplitN(s, string(start), 2)
if len(a) != 2 {
return "", "", s, false
}
prefix = a[0]
body = a[1]
}
a := strings.SplitN(body, string(end), 2)
if len(a) != 2 {
return "", "", "", false
}
return prefix, a[0], a[1], true
}
// newFn parses string s and return created function Fn.
func newFn(s string) (*Fn, error) {
s = trim(s)
f := &Fn{
Rets: &Rets{},
src: s,
PrintTrace: *printTraceFlag,
}
// function name and args
prefix, body, s, found := extractSection(s, '(', ')')
if !found || prefix == "" {
return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
}
f.Name = prefix
var err error
f.Params, err = extractParams(body, f)
if err != nil {
return nil, err
}
// return values
_, body, s, found = extractSection(s, '(', ')')
if found {
r, err := extractParams(body, f)
if err != nil {
return nil, err
}
switch len(r) {
case 0:
case 1:
if r[0].IsError() {
f.Rets.ReturnsError = true
} else {
f.Rets.Name = r[0].Name
f.Rets.Type = r[0].Type
}
case 2:
if !r[1].IsError() {
return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
}
f.Rets.ReturnsError = true
f.Rets.Name = r[0].Name
f.Rets.Type = r[0].Type
default:
return nil, errors.New("Too many return values in \"" + f.src + "\"")
}
}
// fail condition
_, body, s, found = extractSection(s, '[', ']')
if found {
f.Rets.FailCond = body
}
// dll and dll function names
s = trim(s)
if s == "" {
return f, nil
}
if !strings.HasPrefix(s, "=") {
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
}
s = trim(s[1:])
a := strings.Split(s, ".")
switch len(a) {
case 1:
f.dllfuncname = a[0]
case 2:
f.dllname = a[0]
f.dllfuncname = a[1]
default:
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
}
if f.dllfuncname[len(f.dllfuncname)-1] == '?' {
f.confirmproc = true
f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1]
}
return f, nil
}
// DLLName returns DLL name for function f.
func (f *Fn) DLLName() string {
if f.dllname == "" {
return "kernel32"
}
return f.dllname
}
// DLLName returns DLL function name for function f.
func (f *Fn) DLLFuncName() string {
if f.dllfuncname == "" {
return f.Name
}
return f.dllfuncname
}
func (f *Fn) ConfirmProc() bool {
return f.confirmproc
}
// ParamList returns source code for function f parameters.
func (f *Fn) ParamList() string {
return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
}
// HelperParamList returns source code for helper function f parameters.
func (f *Fn) HelperParamList() string {
return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
}
// ParamPrintList returns source code of trace printing part correspondent
// to syscall input parameters.
func (f *Fn) ParamPrintList() string {
return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
}
// ParamCount return number of syscall parameters for function f.
func (f *Fn) ParamCount() int {
n := 0
for _, p := range f.Params {
n += len(p.SyscallArgList())
}
return n
}
// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
// to use. It returns parameter count for correspondent SyscallX function.
func (f *Fn) SyscallParamCount() int {
n := f.ParamCount()
switch {
case n <= 3:
return 3
case n <= 6:
return 6
case n <= 9:
return 9
case n <= 12:
return 12
case n <= 15:
return 15
default:
panic("too many arguments to system call")
}
}
// Syscall determines which SyscallX function to use for function f.
func (f *Fn) Syscall() string {
c := f.SyscallParamCount()
if c == 3 {
return syscalldot() + "Syscall"
}
return syscalldot() + "Syscall" + strconv.Itoa(c)
}
// SyscallParamList returns source code for SyscallX parameters for function f.
func (f *Fn) SyscallParamList() string {
a := make([]string, 0)
for _, p := range f.Params {
a = append(a, p.SyscallArgList()...)
}
for len(a) < f.SyscallParamCount() {
a = append(a, "0")
}
return strings.Join(a, ", ")
}
// HelperCallParamList returns source code of call into function f helper.
func (f *Fn) HelperCallParamList() string {
a := make([]string, 0, len(f.Params))
for _, p := range f.Params {
s := p.Name
if p.Type == "string" {
s = p.tmpVar()
}
a = append(a, s)
}
return strings.Join(a, ", ")
}
// IsUTF16 is true, if f is W (utf16) function. It is false
// for all A (ascii) functions.
func (_ *Fn) IsUTF16() bool {
return true
}
// StrconvFunc returns name of Go string to OS string function for f.
func (f *Fn) StrconvFunc() string {
if f.IsUTF16() {
return syscalldot() + "UTF16PtrFromString"
}
return syscalldot() + "BytePtrFromString"
}
// StrconvType returns Go type name used for OS string for f.
func (f *Fn) StrconvType() string {
if f.IsUTF16() {
return "*uint16"
}
return "*byte"
}
// HasStringParam is true, if f has at least one string parameter.
// Otherwise it is false.
func (f *Fn) HasStringParam() bool {
for _, p := range f.Params {
if p.Type == "string" {
return true
}
}
return false
}
var uniqDllFuncName = make(map[string]bool)
// IsNotDuplicate is true if f is not a duplicated function
func (f *Fn) IsNotDuplicate() bool {
funcName := f.DLLFuncName()
if uniqDllFuncName[funcName] == false {
uniqDllFuncName[funcName] = true
return true
}
return false
}
// HelperName returns name of function f helper.
func (f *Fn) HelperName() string {
if !f.HasStringParam() {
return f.Name
}
return "_" + f.Name
}
// Source files and functions.
type Source struct {
Funcs []*Fn
Files []string
}
// ParseFiles parses files listed in fs and extracts all syscall
// functions listed in sys comments. It returns source files
// and functions collection *Source if successful.
func ParseFiles(fs []string) (*Source, error) {
src := &Source{
Funcs: make([]*Fn, 0),
Files: make([]string, 0),
}
for _, file := range fs {
if err := src.ParseFile(file); err != nil {
return nil, err
}
}
return src, nil
}
// DLLs return dll names for a source set src.
func (src *Source) DLLs() []string {
uniq := make(map[string]bool)
r := make([]string, 0)
for _, f := range src.Funcs {
name := f.DLLName()
if _, found := uniq[name]; !found {
uniq[name] = true
r = append(r, name)
}
}
return r
}
// ParseFile adds additional file path to a source set src.
func (src *Source) ParseFile(path string) error {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
s := bufio.NewScanner(file)
for s.Scan() {
t := trim(s.Text())
if len(t) < 7 {
continue
}
if !strings.HasPrefix(t, "//sys") {
continue
}
t = t[5:]
if !(t[0] == ' ' || t[0] == '\t') {
continue
}
f, err := newFn(t[1:])
if err != nil {
return err
}
src.Funcs = append(src.Funcs, f)
}
if err := s.Err(); err != nil {
return err
}
src.Files = append(src.Files, path)
// get package name
fset := token.NewFileSet()
_, err = file.Seek(0, 0)
if err != nil {
return err
}
pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
if err != nil {
return err
}
packageName = pkg.Name.Name
return nil
}
// Generate output source file from a source set src.
func (src *Source) Generate(w io.Writer) error {
funcMap := template.FuncMap{
"packagename": packagename,
"syscalldot": syscalldot,
}
t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
err := t.Execute(w, src)
if err != nil {
return errors.New("Failed to execute template: " + err.Error())
}
return nil
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
flag.PrintDefaults()
os.Exit(1)
}
func main() {
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
src, err := ParseFiles(flag.Args())
if err != nil {
log.Fatal(err)
}
var buf bytes.Buffer
if err := src.Generate(&buf); err != nil {
log.Fatal(err)
}
data, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
}
if *filename == "" {
_, err = os.Stdout.Write(data)
} else {
err = ioutil.WriteFile(*filename, data, 0644)
}
if err != nil {
log.Fatal(err)
}
}
// TODO: use println instead to print in the following template
const srcTemplate = `
{{define "main"}}// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package {{packagename}}
import "github.com/Microsoft/go-winio"
import "unsafe"{{if syscalldot}}
import "syscall"{{end}}
var _ unsafe.Pointer
var (
{{template "dlls" .}}
{{template "funcnames" .}})
{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
{{end}}
{{/* help functions */}}
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{syscalldot}}NewLazyDLL("{{.}}.dll")
{{end}}{{end}}
{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}}
{{end}}{{end}}
{{define "helperbody"}}
func {{.Name}}({{.ParamList}}) {{template "results" .}}{
{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
}
{{end}}
{{define "funcbody"}}
func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}}
{{template "seterror" .}}{{template "printtrace" .}} return
}
{{end}}
{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
{{end}}{{end}}{{end}}
{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
{{end}}{{end}}{{end}}
{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil {
return
}
{{end}}{{end}}
{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
{{end}}{{end}}
{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
{{end}}{{end}}
`

20
vendor/github.com/Microsoft/hcsshim/nametoguid.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// NameToGuid converts the given string into a GUID using the algorithm in the
// Host Compute Service, ensuring GUIDs generated with the same string are common
// across all clients.
func NameToGuid(name string) (id GUID, err error) {
title := "hcsshim::NameToGuid "
logrus.Debugf(title+"Name %s", name)
err = nameToGuid(name, &id)
if err != nil {
err = makeErrorf(err, title, "name=%s", name)
logrus.Error(err)
return
}
return
}

36
vendor/github.com/Microsoft/hcsshim/preparelayer.go generated vendored Normal file
View file

@ -0,0 +1,36 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// PrepareLayer finds a mounted read-write layer matching layerId and enables the
// the filesystem filter for use on that layer. This requires the paths to all
// parent layers, and is necessary in order to view or interact with the layer
// as an actual filesystem (reading and writing files, creating directories, etc).
// Disabling the filter must be done via UnprepareLayer.
func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error {
title := "hcsshim::PrepareLayer "
logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = prepareLayer(&infop, layerId, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s", info.Flavour, layerId)
return nil
}

389
vendor/github.com/Microsoft/hcsshim/process.go generated vendored Normal file
View file

@ -0,0 +1,389 @@
package hcsshim
import (
"encoding/json"
"io"
"syscall"
"time"
"github.com/Sirupsen/logrus"
)
// ContainerError is an error encountered in HCS
type process struct {
handle hcsProcess
processID int
container *container
cachedPipes *cachedPipes
callbackNumber uintptr
}
type cachedPipes struct {
stdIn syscall.Handle
stdOut syscall.Handle
stdErr syscall.Handle
}
type processModifyRequest struct {
Operation string
ConsoleSize *consoleSize `json:",omitempty"`
CloseHandle *closeHandle `json:",omitempty"`
}
type consoleSize struct {
Height uint16
Width uint16
}
type closeHandle struct {
Handle string
}
type processStatus struct {
ProcessID uint32
Exited bool
ExitCode uint32
LastWaitResult int32
}
const (
stdIn string = "StdIn"
stdOut string = "StdOut"
stdErr string = "StdErr"
)
const (
modifyConsoleSize string = "ConsoleSize"
modifyCloseHandle string = "CloseHandle"
)
// Pid returns the process ID of the process within the container.
func (process *process) Pid() int {
return process.processID
}
// Kill signals the process to terminate but does not wait for it to finish terminating.
func (process *process) Kill() error {
operation := "Kill"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
var resultp *uint16
err := hcsTerminateProcess(process.handle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// Wait waits for the process to exit.
func (process *process) Wait() error {
operation := "Wait"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if hcsCallbacksSupported {
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
if err != nil {
return makeProcessError(process, operation, "", err)
}
} else {
_, err := process.waitTimeoutInternal(syscall.INFINITE)
if err != nil {
return makeProcessError(process, operation, "", err)
}
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
// false if timeout occurs.
func (process *process) WaitTimeout(timeout time.Duration) error {
operation := "WaitTimeout"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if hcsCallbacksSupported {
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
if err != nil {
return makeProcessError(process, operation, "", err)
}
} else {
finished, err := waitTimeoutHelper(process, timeout)
if !finished {
err = ErrTimeout
}
if err != nil {
return makeProcessError(process, operation, "", err)
}
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
func (process *process) hcsWait(timeout uint32) (bool, error) {
var (
resultp *uint16
exitEvent syscall.Handle
)
err := hcsCreateProcessWait(process.handle, &exitEvent, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return false, err
}
defer syscall.CloseHandle(exitEvent)
return waitForSingleObject(exitEvent, timeout)
}
func (process *process) waitTimeoutInternal(timeout uint32) (bool, error) {
return waitTimeoutInternalHelper(process, timeout)
}
// ExitCode returns the exit code of the process. The process must have
// already terminated.
func (process *process) ExitCode() (int, error) {
operation := "ExitCode"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
properties, err := process.properties()
if err != nil {
return 0, makeProcessError(process, operation, "", err)
}
if properties.Exited == false {
return 0, makeProcessError(process, operation, "", ErrInvalidProcessState)
}
logrus.Debugf(title+" succeeded processid=%d exitCode=%d", process.processID, properties.ExitCode)
return int(properties.ExitCode), nil
}
// ResizeConsole resizes the console of the process.
func (process *process) ResizeConsole(width, height uint16) error {
operation := "ResizeConsole"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
modifyRequest := processModifyRequest{
Operation: modifyConsoleSize,
ConsoleSize: &consoleSize{
Height: height,
Width: width,
},
}
modifyRequestb, err := json.Marshal(modifyRequest)
if err != nil {
return err
}
modifyRequestStr := string(modifyRequestb)
var resultp *uint16
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
func (process *process) properties() (*processStatus, error) {
operation := "properties"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
var (
resultp *uint16
propertiesp *uint16
)
err := hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, err
}
if propertiesp == nil {
return nil, ErrUnexpectedValue
}
propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
properties := &processStatus{}
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, err
}
logrus.Debugf(title+" succeeded processid=%d, properties=%s", process.processID, propertiesRaw)
return properties, nil
}
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
// these pipes does not close the underlying pipes; it should be possible to
// call this multiple times to get multiple interfaces.
func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) {
operation := "Stdio"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
var stdIn, stdOut, stdErr syscall.Handle
if process.cachedPipes == nil {
var (
processInfo hcsProcessInformation
resultp *uint16
)
err := hcsGetProcessInfo(process.handle, &processInfo, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, "", err)
}
stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError
} else {
// Use cached pipes
stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr
// Invalidate the cache
process.cachedPipes = nil
}
pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr})
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return pipes[0], pipes[1], pipes[2], nil
}
// CloseStdin closes the write side of the stdin pipe so that the process is
// notified on the read side that there is no more data in stdin.
func (process *process) CloseStdin() error {
operation := "CloseStdin"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
modifyRequest := processModifyRequest{
Operation: modifyCloseHandle,
CloseHandle: &closeHandle{
Handle: stdIn,
},
}
modifyRequestb, err := json.Marshal(modifyRequest)
if err != nil {
return err
}
modifyRequestStr := string(modifyRequestb)
var resultp *uint16
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// Close cleans up any state associated with the process but does not kill
// or wait on it.
func (process *process) Close() error {
operation := "Close"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
// Don't double free this
if process.handle == 0 {
return nil
}
if hcsCallbacksSupported {
if err := process.unregisterCallback(); err != nil {
return makeProcessError(process, operation, "", err)
}
}
if err := hcsCloseProcess(process.handle); err != nil {
return makeProcessError(process, operation, "", err)
}
process.handle = 0
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// closeProcess wraps process.Close for use by a finalizer
func closeProcess(process *process) {
process.Close()
}
func (process *process) registerCallback() error {
context := &notifcationWatcherContext{
channels: newChannels(),
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = context
callbackMapLock.Unlock()
var callbackHandle hcsCallback
err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
if err != nil {
return err
}
context.handle = callbackHandle
process.callbackNumber = callbackNumber
return nil
}
func (process *process) unregisterCallback() error {
callbackNumber := process.callbackNumber
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return nil
}
handle := context.handle
if handle == 0 {
return nil
}
// hcsUnregisterProcessCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := hcsUnregisterProcessCallback(handle)
if err != nil {
return err
}
closeChannels(context.channels)
callbackMapLock.Lock()
callbackMap[callbackNumber] = nil
callbackMapLock.Unlock()
handle = 0
return nil
}

23
vendor/github.com/Microsoft/hcsshim/processimage.go generated vendored Normal file
View file

@ -0,0 +1,23 @@
package hcsshim
import "os"
// ProcessBaseLayer post-processes a base layer that has had its files extracted.
// The files should have been extracted to <path>\Files.
func ProcessBaseLayer(path string) error {
err := processBaseImage(path)
if err != nil {
return &os.PathError{Op: "ProcessBaseLayer", Path: path, Err: err}
}
return nil
}
// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted.
// The files should have been extracted to <path>\Files.
func ProcessUtilityVMImage(path string) error {
err := processUtilityImage(path)
if err != nil {
return &os.PathError{Op: "ProcessUtilityVMImage", Path: path, Err: err}
}
return nil
}

27
vendor/github.com/Microsoft/hcsshim/unpreparelayer.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
package hcsshim
import "github.com/Sirupsen/logrus"
// UnprepareLayer disables the filesystem filter for the read-write layer with
// the given id.
func UnprepareLayer(info DriverInfo, layerId string) error {
title := "hcsshim::UnprepareLayer "
logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = unprepareLayer(&infop, layerId)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour %d layerId=%s", info.Flavour, layerId)
return nil
}

39
vendor/github.com/Microsoft/hcsshim/utils.go generated vendored Normal file
View file

@ -0,0 +1,39 @@
package hcsshim
import (
"io"
"syscall"
"github.com/Microsoft/go-winio"
)
var (
vmcomputedll = syscall.NewLazyDLL("vmcompute.dll")
hcsCallbackAPI = vmcomputedll.NewProc("HcsRegisterComputeSystemCallback")
hcsCallbacksSupported = hcsCallbackAPI.Find() == nil
)
// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles
// if there is an error.
func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
fs := make([]io.ReadWriteCloser, len(hs))
for i, h := range hs {
if h != syscall.Handle(0) {
if err == nil {
fs[i], err = winio.MakeOpenFile(h)
}
if err != nil {
syscall.Close(h)
}
}
}
if err != nil {
for _, f := range fs {
if f != nil {
f.Close()
}
}
return nil, err
}
return fs, nil
}

7
vendor/github.com/Microsoft/hcsshim/version.go generated vendored Normal file
View file

@ -0,0 +1,7 @@
package hcsshim
// IsTP4 returns whether the currently running Windows build is at least TP4.
func IsTP4() bool {
// HNSCall was not present in TP4
return procHNSCall.Find() != nil
}

126
vendor/github.com/Microsoft/hcsshim/waithelper.go generated vendored Normal file
View file

@ -0,0 +1,126 @@
package hcsshim
import (
"github.com/Sirupsen/logrus"
"syscall"
"time"
)
type waitable interface {
waitTimeoutInternal(timeout uint32) (bool, error)
hcsWait(timeout uint32) (bool, error)
}
func waitTimeoutHelper(object waitable, timeout time.Duration) (bool, error) {
var (
millis uint32
)
for totalMillis := uint64(timeout / time.Millisecond); totalMillis > 0; totalMillis = totalMillis - uint64(millis) {
if totalMillis >= syscall.INFINITE {
millis = syscall.INFINITE - 1
} else {
millis = uint32(totalMillis)
}
result, err := object.waitTimeoutInternal(millis)
if err != nil {
return result, err
}
}
return true, nil
}
func waitTimeoutInternalHelper(object waitable, timeout uint32) (bool, error) {
return object.hcsWait(timeout)
}
func waitForSingleObject(handle syscall.Handle, timeout uint32) (bool, error) {
s, e := syscall.WaitForSingleObject(handle, timeout)
switch s {
case syscall.WAIT_OBJECT_0:
return true, nil
case syscall.WAIT_TIMEOUT:
return false, nil
default:
return false, e
}
}
func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
err = processHcsResult(err, resultp)
if IsPending(err) {
return waitForNotification(callbackNumber, expectedNotification, timeout)
}
return err
}
func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
callbackMapLock.RLock()
channels := callbackMap[callbackNumber].channels
callbackMapLock.RUnlock()
expectedChannel := channels[expectedNotification]
if expectedChannel == nil {
logrus.Errorf("unknown notification type in waitForNotification %x", expectedNotification)
return ErrInvalidNotificationType
}
if timeout != nil {
timer := time.NewTimer(*timeout)
defer timer.Stop()
select {
case err, ok := <-expectedChannel:
if !ok {
return ErrHandleClose
}
return err
case err, ok := <-channels[hcsNotificationSystemExited]:
if !ok {
return ErrHandleClose
}
// If the expected notification is hcsNotificationSystemExited which of the two selects
// chosen is random. Return the raw error if hcsNotificationSystemExited is expected
if channels[hcsNotificationSystemExited] == expectedChannel {
return err
}
return ErrUnexpectedContainerExit
case _, ok := <-channels[hcsNotificationServiceDisconnect]:
if !ok {
return ErrHandleClose
}
// hcsNotificationServiceDisconnect should never be an expected notification
// it does not need the same handling as hcsNotificationSystemExited
return ErrUnexpectedProcessAbort
case <-timer.C:
return ErrTimeout
}
}
select {
case err, ok := <-expectedChannel:
if !ok {
return ErrHandleClose
}
return err
case err, ok := <-channels[hcsNotificationSystemExited]:
if !ok {
return ErrHandleClose
}
// If the expected notification is hcsNotificationSystemExited which of the two selects
// chosen is random. Return the raw error if hcsNotificationSystemExited is expected
if channels[hcsNotificationSystemExited] == expectedChannel {
return err
}
return ErrUnexpectedContainerExit
case _, ok := <-channels[hcsNotificationServiceDisconnect]:
if !ok {
return ErrHandleClose
}
// hcsNotificationServiceDisconnect should never be an expected notification
// it does not need the same handling as hcsNotificationSystemExited
return ErrUnexpectedProcessAbort
}
}

1317
vendor/github.com/Microsoft/hcsshim/zhcsshim.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -36,24 +36,23 @@ const (
// dockerClient is configuration for dealing with a single Docker registry.
type dockerClient struct {
ctx *types.SystemContext
registry string
username string
password string
wwwAuthenticate string // Cache of a value set by ping() if scheme is not empty
scheme string // Cache of a value returned by a successful ping() if not empty
client *http.Client
signatureBase signatureStorageBase
}
// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool) (*dockerClient, error) {
registry := ref.ref.Hostname()
if registry == dockerHostname {
func newDockerClient(ctx *types.SystemContext, refHostname string) (*dockerClient, error) {
var registry string
if refHostname == dockerHostname {
registry = dockerRegistry
} else {
registry = refHostname
}
username, password, err := getAuth(ref.ref.Hostname())
username, password, err := getAuth(refHostname)
if err != nil {
return nil, err
}
@ -79,19 +78,11 @@ func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool)
if tr != nil {
client.Transport = tr
}
sigBase, err := configuredSignatureStorageBase(ctx, ref, write)
if err != nil {
return nil, err
}
return &dockerClient{
ctx: ctx,
registry: registry,
username: username,
password: password,
client: client,
signatureBase: sigBase,
registry: registry,
username: username,
password: password,
client: client,
}, nil
}
@ -334,9 +325,14 @@ func (c *dockerClient) ping() (*pingResponse, error) {
}
return pr, nil
}
pr, err := ping("https")
if err != nil && c.ctx.DockerInsecureSkipTLSVerify {
pr, err = ping("http")
scheme := "https"
pr, err := ping(scheme)
if err != nil {
scheme = "http"
pr, err = ping(scheme)
if err == nil {
return pr, nil
}
}
return pr, err
}

View file

@ -8,9 +8,6 @@ import (
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"github.com/Sirupsen/logrus"
@ -21,13 +18,11 @@ import (
type dockerImageDestination struct {
ref dockerReference
c *dockerClient
// State
manifestDigest string // or "" if not yet known.
}
// newImageDestination creates a new ImageDestination for the specified image reference.
func newImageDestination(ctx *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
c, err := newDockerClient(ctx, ref, true)
c, err := newDockerClient(ctx, ref.ref.Hostname())
if err != nil {
return nil, err
}
@ -149,7 +144,6 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
if err != nil {
return err
}
d.manifestDigest = digest
url := fmt.Sprintf(manifestURL, d.ref.ref.RemoteName(), digest)
headers := map[string][]string{}
@ -174,91 +168,12 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
}
func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
// FIXME? This overwrites files one at a time, definitely not atomic.
// A failure when updating signatures with a reordered copy could lose some of them.
// Skip dealing with the manifest digest if not necessary.
if len(signatures) == 0 {
return nil
if len(signatures) != 0 {
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported")
}
if d.c.signatureBase == nil {
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported, and there is no applicable signature storage configured")
}
// FIXME: This assumption that signatures are stored after the manifest rather breaks the model.
if d.manifestDigest == "" {
return fmt.Errorf("Unknown manifest digest, can't add signatures")
}
for i, signature := range signatures {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil {
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
err := d.putOneSignature(url, signature)
if err != nil {
return err
}
}
// Remove any other signatures, if present.
// We stop at the first missing signature; if a previous deleting loop aborted
// prematurely, this may not clean up all of them, but one missing signature
// is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient.
for i := len(signatures); ; i++ {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil {
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
missing, err := d.c.deleteOneSignature(url)
if err != nil {
return err
}
if missing {
break
}
}
return nil
}
// putOneSignature stores one signature to url.
func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
switch url.Scheme {
case "file":
logrus.Debugf("Writing to %s", url.Path)
err := os.MkdirAll(filepath.Dir(url.Path), 0755)
if err != nil {
return err
}
err = ioutil.WriteFile(url.Path, signature, 0644)
if err != nil {
return err
}
return nil
default:
return fmt.Errorf("Unsupported scheme when writing signature to %s", url.String())
}
}
// deleteOneSignature deletes a signature from url, if it exists.
// If it successfully determines that the signature does not exist, returns (true, nil)
func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
switch url.Scheme {
case "file":
logrus.Debugf("Deleting %s", url.Path)
err := os.Remove(url.Path)
if err != nil && os.IsNotExist(err) {
return true, nil
}
return false, err
default:
return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.String())
}
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called

View file

@ -6,8 +6,6 @@ import (
"io/ioutil"
"mime"
"net/http"
"net/url"
"os"
"strconv"
"github.com/Sirupsen/logrus"
@ -28,9 +26,6 @@ type dockerImageSource struct {
ref dockerReference
requestedManifestMIMETypes []string
c *dockerClient
// State
cachedManifest []byte // nil if not loaded yet
cachedManifestMIMEType string // Only valid if cachedManifest != nil
}
// newImageSource creates a new ImageSource for the specified image reference,
@ -38,7 +33,7 @@ type dockerImageSource struct {
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(ctx *types.SystemContext, ref dockerReference, requestedManifestMIMETypes []string) (*dockerImageSource, error) {
c, err := newDockerClient(ctx, ref, false)
c, err := newDockerClient(ctx, ref.ref.Hostname())
if err != nil {
return nil, err
}
@ -76,28 +71,9 @@ func simplifyContentType(contentType string) string {
}
func (s *dockerImageSource) GetManifest() ([]byte, string, error) {
err := s.ensureManifestIsLoaded()
if err != nil {
return nil, "", err
}
return s.cachedManifest, s.cachedManifestMIMEType, nil
}
// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
//
// ImageSource implementations are not required or expected to do any caching,
// but because our signatures are “attached” to the manifest digest,
// we need to ensure that the digest of the manifest returned by GetManifest
// and used by GetSignatures are consistent, otherwise we would get spurious
// signature verification failures when pulling while a tag is being updated.
func (s *dockerImageSource) ensureManifestIsLoaded() error {
if s.cachedManifest != nil {
return nil
}
reference, err := s.ref.tagOrDigest()
if err != nil {
return err
return nil, "", err
}
url := fmt.Sprintf(manifestURL, s.ref.ref.RemoteName(), reference)
// TODO(runcom) set manifest version header! schema1 for now - then schema2 etc etc and v1
@ -106,20 +82,18 @@ func (s *dockerImageSource) ensureManifestIsLoaded() error {
headers["Accept"] = s.requestedManifestMIMETypes
res, err := s.c.makeRequest("GET", url, headers, nil)
if err != nil {
return err
return nil, "", err
}
defer res.Body.Close()
manblob, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
return nil, "", err
}
if res.StatusCode != http.StatusOK {
return errFetchManifest{res.StatusCode, manblob}
return nil, "", errFetchManifest{res.StatusCode, manblob}
}
// We might validate manblob against the Docker-Content-Digest header here to protect against transport errors.
s.cachedManifest = manblob
s.cachedManifestMIMEType = simplifyContentType(res.Header.Get("Content-Type"))
return nil
return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
@ -142,77 +116,12 @@ func (s *dockerImageSource) GetBlob(digest string) (io.ReadCloser, int64, error)
}
func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
if s.c.signatureBase == nil { // Skip dealing with the manifest digest if not necessary.
return [][]byte{}, nil
}
if err := s.ensureManifestIsLoaded(); err != nil {
return nil, err
}
manifestDigest, err := manifest.Digest(s.cachedManifest)
if err != nil {
return nil, err
}
signatures := [][]byte{}
for i := 0; ; i++ {
url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
if url == nil {
return nil, fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
signature, missing, err := s.getOneSignature(url)
if err != nil {
return nil, err
}
if missing {
break
}
signatures = append(signatures, signature)
}
return signatures, nil
}
// getOneSignature downloads one signature from url.
// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, missing bool, err error) {
switch url.Scheme {
case "file":
logrus.Debugf("Reading %s", url.Path)
sig, err := ioutil.ReadFile(url.Path)
if err != nil {
if os.IsNotExist(err) {
return nil, true, nil
}
return nil, false, err
}
return sig, false, nil
case "http", "https":
logrus.Debugf("GET %s", url)
res, err := s.c.client.Get(url.String())
if err != nil {
return nil, false, err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return nil, true, nil
} else if res.StatusCode != http.StatusOK {
return nil, false, fmt.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
}
sig, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, false, err
}
return sig, false, nil
default:
return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", url.String())
}
return [][]byte{}, nil
}
// deleteImage deletes the named image from the registry, if supported.
func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
c, err := newDockerClient(ctx, ref, true)
c, err := newDockerClient(ctx, ref.ref.Hostname())
if err != nil {
return err
}
@ -232,7 +141,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
return err
}
defer get.Body.Close()
manifestBody, err := ioutil.ReadAll(get.Body)
body, err := ioutil.ReadAll(get.Body)
if err != nil {
return err
}
@ -241,7 +150,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
case http.StatusNotFound:
return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry.", ref.ref)
default:
return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, string(body), get.Status)
}
digest := get.Header.Get("Docker-Content-Digest")
@ -255,7 +164,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
}
defer delete.Body.Close()
body, err := ioutil.ReadAll(delete.Body)
body, err = ioutil.ReadAll(delete.Body)
if err != nil {
return err
}
@ -263,26 +172,5 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
return fmt.Errorf("Failed to delete %v: %s (%v)", deleteURL, string(body), delete.Status)
}
if c.signatureBase != nil {
manifestDigest, err := manifest.Digest(manifestBody)
if err != nil {
return err
}
for i := 0; ; i++ {
url := signatureStorageURL(c.signatureBase, manifestDigest, i)
if url == nil {
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
missing, err := c.deleteOneSignature(url)
if err != nil {
return err
}
if missing {
break
}
}
}
return nil
}

View file

@ -1,198 +0,0 @@
package docker
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"github.com/ghodss/yaml"
"github.com/Sirupsen/logrus"
"github.com/containers/image/types"
)
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
// You can override this at build time with
// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path'
var systemRegistriesDirPath = builtinRegistriesDirPath
// builtinRegistriesDirPath is the path to registries.d.
// DO NOT change this, instead see systemRegistriesDirPath above.
const builtinRegistriesDirPath = "/etc/containers/registries.d"
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
// NOTE: Keep this in sync with docs/registries.d.md!
type registryConfiguration struct {
DefaultDocker *registryNamespace `json:"default-docker"`
// The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
Docker map[string]registryNamespace `json:"docker"`
}
// registryNamespace defines lookaside locations for a single namespace.
type registryNamespace struct {
SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing.
SigStoreStaging string `json:"sigstore-staging"` // For writing only.
}
// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below.
type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported.
// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”.
func configuredSignatureStorageBase(ctx *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) {
// FIXME? Loading and parsing the config could be cached across calls.
dirPath := registriesDirPath(ctx)
logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath)
config, err := loadAndMergeConfig(dirPath)
if err != nil {
return nil, err
}
topLevel := config.signatureTopLevel(ref, write)
if topLevel == "" {
return nil, nil
}
url, err := url.Parse(topLevel)
if err != nil {
return nil, fmt.Errorf("Invalid signature storage URL %s: %v", topLevel, err)
}
// FIXME? Restrict to explicitly supported schemes?
repo := ref.ref.FullName() // Note that this is without a tag or digest.
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
}
url.Path = url.Path + "/" + repo
return url, nil
}
// registriesDirPath returns a path to registries.d
func registriesDirPath(ctx *types.SystemContext) string {
if ctx != nil {
if ctx.RegistriesDirPath != "" {
return ctx.RegistriesDirPath
}
if ctx.RootForImplicitAbsolutePaths != "" {
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
}
}
return systemRegistriesDirPath
}
// loadAndMergeConfig loads configuration files in dirPath
func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
dockerDefaultMergedFrom := ""
nsMergedFrom := map[string]string{}
dir, err := os.Open(dirPath)
if err != nil {
if os.IsNotExist(err) {
return &mergedConfig, nil
}
return nil, err
}
configNames, err := dir.Readdirnames(0)
if err != nil {
return nil, err
}
for _, configName := range configNames {
if !strings.HasSuffix(configName, ".yaml") {
continue
}
configPath := filepath.Join(dirPath, configName)
configBytes, err := ioutil.ReadFile(configPath)
if err != nil {
return nil, err
}
var config registryConfiguration
err = yaml.Unmarshal(configBytes, &config)
if err != nil {
return nil, fmt.Errorf("Error parsing %s: %v", configPath, err)
}
if config.DefaultDocker != nil {
if mergedConfig.DefaultDocker != nil {
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
dockerDefaultMergedFrom, configPath)
}
mergedConfig.DefaultDocker = config.DefaultDocker
dockerDefaultMergedFrom = configPath
}
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
if _, ok := mergedConfig.Docker[nsName]; ok {
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
nsName, nsMergedFrom[nsName], configPath)
}
mergedConfig.Docker[nsName] = nsConfig
nsMergedFrom[nsName] = configPath
}
}
return &mergedConfig, nil
}
// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”.
// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used.
func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
if config.Docker != nil {
// Look for a full match.
identity := ref.PolicyConfigurationIdentity()
if ns, ok := config.Docker[identity]; ok {
logrus.Debugf(` Using "docker" namespace %s`, identity)
if url := ns.signatureTopLevel(write); url != "" {
return url
}
}
// Look for a match of the possible parent namespaces.
for _, name := range ref.PolicyConfigurationNamespaces() {
if ns, ok := config.Docker[name]; ok {
logrus.Debugf(` Using "docker" namespace %s`, name)
if url := ns.signatureTopLevel(write); url != "" {
return url
}
}
}
}
// Look for a default location
if config.DefaultDocker != nil {
logrus.Debugf(` Using "default-docker" configuration`)
if url := config.DefaultDocker.signatureTopLevel(write); url != "" {
return url
}
}
logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity())
return ""
}
// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”.
// or "" if nothing has been configured.
func (ns registryNamespace) signatureTopLevel(write bool) string {
if write && ns.SigStoreStaging != "" {
logrus.Debugf(` Using %s`, ns.SigStoreStaging)
return ns.SigStoreStaging
}
if ns.SigStore != "" {
logrus.Debugf(` Using %s`, ns.SigStore)
return ns.SigStore
}
return ""
}
// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable.
// Returns nil iff base == nil.
func signatureStorageURL(base signatureStorageBase, manifestDigest string, index int) *url.URL {
if base == nil {
return nil
}
url := *base
url.Path = fmt.Sprintf("%s@%s/signature-%d", url.Path, manifestDigest, index+1)
return &url
}

View file

@ -4,7 +4,6 @@ import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@ -107,12 +106,12 @@ func createManifest(m []byte) ([]byte, string, error) {
om := imgspecv1.Manifest{}
mt := manifest.GuessMIMEType(m)
switch mt {
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
case manifest.DockerV2Schema1MediaType:
// There a simple reason about not yet implementing this.
// OCI image-spec assure about backward compatibility with docker v2s2 but not v2s1
// generating a v2s2 is a migration docker does when upgrading to 1.10.3
// and I don't think we should bother about this now (I don't want to have migration code here in skopeo)
return nil, "", errors.New("can't create an OCI manifest from Docker V2 schema 1 manifest")
return nil, "", fmt.Errorf("can't create OCI manifest from Docker V2 schema 1 manifest")
case manifest.DockerV2Schema2MediaType:
if err := json.Unmarshal(m, &om); err != nil {
return nil, "", err
@ -128,13 +127,13 @@ func createManifest(m []byte) ([]byte, string, error) {
}
return b, om.MediaType, nil
case manifest.DockerV2ListMediaType:
return nil, "", errors.New("can't create an OCI manifest from Docker V2 schema 2 manifest list")
return nil, "", fmt.Errorf("can't create OCI manifest from Docker V2 schema 2 manifest list")
case imgspecv1.MediaTypeImageManifestList:
return nil, "", errors.New("can't create an OCI manifest from OCI manifest list")
return nil, "", fmt.Errorf("can't create OCI manifest from OCI manifest list")
case imgspecv1.MediaTypeImageManifest:
return m, mt, nil
}
return nil, "", fmt.Errorf("unrecognized manifest media type %q", mt)
return nil, "", fmt.Errorf("Unrecognized manifest media type")
}
func (d *ociImageDestination) PutManifest(m []byte) error {

View file

@ -950,8 +950,7 @@ func (m *clustersMap) UnmarshalJSON(data []byte) error {
return err
}
for _, e := range a {
cluster := e.Cluster // Allocates a new instance in each iteration
(*m)[e.Name] = &cluster
(*m)[e.Name] = &e.Cluster
}
return nil
}
@ -964,8 +963,7 @@ func (m *authInfosMap) UnmarshalJSON(data []byte) error {
return err
}
for _, e := range a {
authInfo := e.AuthInfo // Allocates a new instance in each iteration
(*m)[e.Name] = &authInfo
(*m)[e.Name] = &e.AuthInfo
}
return nil
}
@ -978,8 +976,7 @@ func (m *contextsMap) UnmarshalJSON(data []byte) error {
return err
}
for _, e := range a {
context := e.Context // Allocates a new instance in each iteration
(*m)[e.Name] = &context
(*m)[e.Name] = &e.Context
}
return nil
}

View file

@ -9,7 +9,6 @@ import (
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
@ -22,8 +21,7 @@ import (
// openshiftClient is configuration for dealing with a single image stream, for reading or writing.
type openshiftClient struct {
ref openshiftReference
baseURL *url.URL
ref openshiftReference
// Values from Kubernetes configuration
httpClient *http.Client
bearerToken string // "" if not used
@ -53,7 +51,9 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) {
return nil, err
}
logrus.Debugf("URL: %#v", *baseURL)
if *baseURL != *ref.baseURL {
return nil, fmt.Errorf("Unexpected baseURL mismatch: default %#v, reference %#v", *baseURL, *ref.baseURL)
}
if httpClient == nil {
httpClient = http.DefaultClient
}
@ -61,7 +61,6 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) {
return &openshiftClient{
ref: ref,
baseURL: baseURL,
httpClient: httpClient,
bearerToken: restConfig.BearerToken,
username: restConfig.Username,
@ -71,7 +70,7 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) {
// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object.
func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]byte, error) {
url := *c.baseURL
url := *c.ref.baseURL
url.Path = path
var requestBodyReader io.Reader
if requestBody != nil {
@ -154,7 +153,19 @@ func (c *openshiftClient) convertDockerImageReference(ref string) (string, error
if len(parts) != 2 {
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
}
return c.ref.dockerReference.Hostname() + "/" + parts[1], nil
// Sanity check that the reference is at least plausibly similar, i.e. uses the hard-coded port we expect.
if !strings.HasSuffix(parts[0], ":5000") {
return "", fmt.Errorf("Invalid format of docker reference %s: expecting port 5000", ref)
}
return c.dockerRegistryHostPart() + "/" + parts[1], nil
}
// dockerRegistryHostPart returns the host:port of the embedded Docker Registry API endpoint
// FIXME: There seems to be no way to discover the correct:host port using the API, so hard-code our knowledge
// about how the OpenShift Atomic Registry is configured, per examples/atomic-registry/run.sh:
// -p OPENSHIFT_OAUTH_PROVIDER_URL=https://${INSTALL_HOST}:8443,COCKPIT_KUBE_URL=https://${INSTALL_HOST},REGISTRY_HOST=${INSTALL_HOST}:5000
func (c *openshiftClient) dockerRegistryHostPart() string {
return strings.SplitN(c.ref.baseURL.Host, ":", 2)[0] + ":5000"
}
type openshiftImageSource struct {
@ -250,7 +261,7 @@ func (s *openshiftImageSource) ensureImageIsResolved() error {
}
var te *tagEvent
for _, tag := range is.Status.Tags {
if tag.Tag != s.client.ref.dockerReference.Tag() {
if tag.Tag != s.client.ref.tag {
continue
}
if len(tag.Items) > 0 {
@ -297,7 +308,7 @@ func newImageDestination(ctx *types.SystemContext, ref openshiftReference) (type
// FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
// i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
// the manifest digest at this point.
dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", client.ref.dockerReference.Hostname(), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", client.dockerRegistryHostPart(), client.ref.namespace, client.ref.stream, client.ref.tag)
dockerRef, err := docker.ParseReference(dockerRefString)
if err != nil {
return nil, err
@ -359,7 +370,7 @@ func (d *openshiftImageDestination) PutManifest(m []byte) error {
}
d.imageStreamImageName = manifestDigest
// FIXME: We can't do what respositorymiddleware.go does because we don't know the internal address. Does any of this matter?
dockerImageReference := fmt.Sprintf("%s/%s/%s@%s", d.client.ref.dockerReference.Hostname(), d.client.ref.namespace, d.client.ref.stream, manifestDigest)
dockerImageReference := fmt.Sprintf("%s/%s/%s@%s", d.client.dockerRegistryHostPart(), d.client.ref.namespace, d.client.ref.stream, manifestDigest)
ism := imageStreamMapping{
typeMeta: typeMeta{
Kind: "ImageStreamMapping",
@ -376,7 +387,7 @@ func (d *openshiftImageDestination) PutManifest(m []byte) error {
DockerImageReference: dockerImageReference,
DockerImageManifest: string(m),
},
Tag: d.client.ref.dockerReference.Tag(),
Tag: d.client.ref.tag,
}
body, err := json.Marshal(ism)
if err != nil {

View file

@ -3,9 +3,10 @@ package openshift
import (
"errors"
"fmt"
"net/url"
"regexp"
"strings"
"github.com/Sirupsen/logrus"
"github.com/containers/image/docker/policyconfiguration"
"github.com/containers/image/types"
"github.com/docker/docker/reference"
@ -43,33 +44,67 @@ func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error
// openshiftReference is an ImageReference for OpenShift images.
type openshiftReference struct {
dockerReference reference.NamedTagged
namespace string // Computed from dockerReference in advance.
stream string // Computed from dockerReference in advance.
baseURL *url.URL
namespace string
stream string
tag string
dockerReference reference.Named // Computed from the above in advance, so that later references can not fail.
}
// FIXME: Is imageName like this a good way to refer to OpenShift images?
// Keep this in sync with scopeRegexp!
var imageNameRegexp = regexp.MustCompile("^([^:/]*)/([^:/]*):([^:/]*)$")
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference.
func ParseReference(ref string) (types.ImageReference, error) {
r, err := reference.ParseNamed(ref)
func ParseReference(reference string) (types.ImageReference, error) {
// Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client.
cmdConfig := defaultClientConfig()
logrus.Debugf("cmdConfig: %#v", cmdConfig)
restConfig, err := cmdConfig.ClientConfig()
if err != nil {
return nil, fmt.Errorf("failed to parse image reference %q, %v", ref, err)
return nil, err
}
tagged, ok := r.(reference.NamedTagged)
if !ok {
return nil, fmt.Errorf("invalid image reference %s, %#v", ref, r)
// REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.)
logrus.Debugf("restConfig: %#v", restConfig)
baseURL, _, err := restClientFor(restConfig)
if err != nil {
return nil, err
}
return NewReference(tagged)
logrus.Debugf("URL: %#v", *baseURL)
m := imageNameRegexp.FindStringSubmatch(reference)
if m == nil || len(m) != 4 {
return nil, fmt.Errorf("Invalid image reference %s, %#v", reference, m)
}
return NewReference(baseURL, m[1], m[2], m[3])
}
// NewReference returns an OpenShift reference for a reference.NamedTagged
func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) {
r := strings.SplitN(dockerRef.RemoteName(), "/", 3)
if len(r) != 2 {
return nil, fmt.Errorf("invalid image reference %s", dockerRef.String())
// NewReference returns an OpenShift reference for a base URL, namespace, stream and tag.
func NewReference(baseURL *url.URL, namespace, stream, tag string) (types.ImageReference, error) {
// Precompute also dockerReference so that later references can not fail.
//
// This discards ref.baseURL.Path, which is unexpected for a “base URL”;
// but openshiftClient.doRequest actually completely overrides url.Path
// (and defaultServerURL rejects non-trivial Path values), so it is OK for
// us to ignore it as well.
//
// FIXME: This is, strictly speaking, a namespace conflict with images placed in a Docker registry running on the same host.
// Do we need to do something else, perhaps disambiguate (port number?) or namespace Docker and OpenShift separately?
dockerRef, err := reference.WithName(fmt.Sprintf("%s/%s/%s", baseURL.Host, namespace, stream))
if err != nil {
return nil, err
}
dockerRef, err = reference.WithTag(dockerRef, tag)
if err != nil {
return nil, err
}
return openshiftReference{
namespace: r[0],
stream: r[1],
baseURL: baseURL,
namespace: namespace,
stream: stream,
tag: tag,
dockerReference: dockerRef,
}, nil
}
@ -84,7 +119,7 @@ func (ref openshiftReference) Transport() types.ImageTransport {
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref openshiftReference) StringWithinTransport() string {
return ref.dockerReference.String()
return fmt.Sprintf("%s/%s:%s", ref.namespace, ref.stream, ref.tag)
}
// DockerReference returns a Docker reference associated with this reference

View file

@ -0,0 +1,454 @@
package storage
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/containers/image/image"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/storage"
)
var (
// ErrInvalidBlobDigest is returned when PutBlob() is given a blob
// with a digest-based name that can't be used as a map key.
ErrInvalidBlobDigest = errors.New("invalid blob digest")
// ErrBlobDigestMismatch is returned when PutBlob() is given a blob
// with a digest-based name that doesn't match its contents.
ErrBlobDigestMismatch = errors.New("blob digest mismatch")
)
type storageImage struct {
store storage.Store
imageRef *storageReference
Tag string `json:"tag,omitempty"`
Created time.Time `json:"created-time,omitempty"`
ID string `json:"id"`
BlobList []string `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle
Layers map[string]string `json:"layers,omitempty"` // Map from names of blobs that are layers to layer IDs
BlobData map[string][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary
SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice
}
type storageLayerMetadata struct {
ExpectedSize int64 `json:"expected-size,omitempty"`
Digest string `json:"digest,omitempty"`
Size int64 `json:"size,omitempty"`
}
// newImageSource sets us up to read out an image, which we assume exists.
func newImageSource(imageRef *storageReference) *storageImage {
tag := ""
if imageRef.ID() == "" {
logrus.Errorf("no image matching reference %q found", imageRef.StringWithinTransport())
return nil
}
img, err := imageRef.store.GetImage(imageRef.ID())
if err != nil {
logrus.Errorf("error reading image %q: %v", imageRef.ID(), err)
return nil
}
tag = imageRef.StringWithinTransport()
image := storageImage{
store: imageRef.store,
imageRef: imageRef,
Tag: tag,
Created: time.Now(),
ID: img.ID,
BlobList: []string{},
Layers: make(map[string]string),
BlobData: make(map[string][]byte),
}
if err := image.loadMetadata(); err != nil {
logrus.Errorf("error decoding metadata for source image: %v", err)
return nil
}
return &image
}
// newImageDestination sets us up to write a new image.
func newImageDestination(imageRef *storageReference) *storageImage {
// We set the image's ID if the reference we got looked like one, since
// we take that as an indication that it's going to end up with the
// same contents as an image we already have. If the reference looks
// more like a name, we don't know yet if it'll be exactly the same as,
// or be an updated version of, an image we might already have, so we
// have to err on the side of caution and create a new image which will
// be assigned the name as its tag.
image := storageImage{
store: imageRef.store,
imageRef: imageRef,
Tag: imageRef.StringWithinTransport(),
Created: time.Now(),
ID: imageRef.ID(),
BlobList: []string{},
Layers: make(map[string]string),
BlobData: make(map[string][]byte),
}
return &image
}
func newImage(imageRef *storageReference) *storageImage {
return newImageSource(imageRef)
}
func (s *storageImage) loadMetadata() error {
if s.ID != "" {
image, err := s.store.GetImage(s.ID)
if image != nil && err == nil {
if err := json.Unmarshal([]byte(image.Metadata), s); err != nil {
return err
}
}
}
return nil
}
func (s *storageImage) saveMetadata() error {
if s.ID != "" {
if metadata, err := json.Marshal(s); len(metadata) != 0 && err == nil {
istore, err := s.store.GetImageStore()
if istore != nil && err == nil {
if err = istore.SetMetadata(s.ID, string(metadata)); err != nil {
logrus.Errorf("error setting metadata for image %q: %v", s.ID, err)
}
} else {
logrus.Errorf("error locating image store: %v", err)
}
return err
} else {
logrus.Errorf("error encoding metadata for image %q: %v", s.ID, err)
return err
}
}
return nil
}
func (s *storageImage) Reference() types.ImageReference {
return s.imageRef
}
func (s *storageImage) Close() {
}
// SupportsSignatures returns an error if we can't expect GetSignatures() to
// return data that was previously supplied to PutSignatures().
func (s *storageImage) SupportsSignatures() error {
return nil
}
// PutBlob is used to both store filesystem layers and binary data that is part
// of the image.
func (s *storageImage) PutBlob(stream io.Reader, digest string, expectedSize int64) (actualDigest string, actualSize int64, err error) {
// Try to read an initial snippet of the blob.
header := make([]byte, 10240)
n, err := stream.Read(header)
if err != nil && err != io.EOF {
return "", -1, err
}
// Set up to read the whole blob (the initial snippet, plus the rest)
// while digesting it with sha256.
hasher := sha256.New()
hash := []byte{}
counter := ioutils.NewWriteCounter(hasher)
defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), stream)
multi := io.TeeReader(defragmented, counter)
if (n > 0) && archive.IsArchive(header[:n]) {
// It's a filesystem layer. If it's not the first one in the
// image, we assume that the most recently added layer is its
// parent.
parentLayer := ""
if len(s.BlobList) > 0 {
for _, blob := range s.BlobList {
if layerID, ok := s.Layers[blob]; ok {
parentLayer = layerID
}
}
}
// Now try to figure out if the identifier we have is an ID or
// something else, so that we can do collision detection
// correctly.
names := []string{}
id := digest
if matches := idRegexp.FindStringSubmatch(digest); len(matches) > 1 {
// Looks like a digest. Use the value as a layer ID.
id = matches[len(matches)-1]
} else {
// If it isn't empty, it's a name.
if digest != "" {
names = append(names, digest)
id = ""
}
}
if len(names) == 0 {
names = nil
}
// Attempt to create the identified layer and import its contents.
layer, err := s.store.PutLayer(id, parentLayer, names, "", true, multi)
if err != nil && err != storage.ErrDuplicateID {
logrus.Debugf("error importing layer blob %q: %v", digest, err)
return "", -1, err
}
if err == storage.ErrDuplicateID {
// We specified an ID, and there's already a layer with
// the same ID. Drain the input so that we can look at
// its length and digest.
_, err := io.Copy(ioutil.Discard, multi)
if err != nil && err != io.EOF {
logrus.Debugf("error digesting layer blob %q: %v", digest, err)
return "", -1, err
}
hash = hasher.Sum(nil)
} else {
// Applied the layer, either with a specified ID or a
// new ID. Note the size info and computed digest.
hash = hasher.Sum(nil)
layerdata := storageLayerMetadata{
ExpectedSize: expectedSize,
Digest: "sha256:" + hex.EncodeToString(hash[:]),
Size: counter.Count,
}
if metadata, err := json.Marshal(layerdata); len(metadata) != 0 && err == nil {
s.store.SetMetadata(layer.ID, string(metadata))
}
// Hang on to the new layer's ID.
id = layer.ID
}
// If the ID was a digest, verify that our computed sha256sum
// matches the ID. */
if strings.HasPrefix(digest, "sha256:") && digest != "sha256:"+hex.EncodeToString(hash[:]) {
logrus.Debugf("blob %q digests to %q, rejecting", digest, hex.EncodeToString(hash[:]))
if layer != nil {
// Something's wrong; delete the newly-created layer.
s.store.DeleteLayer(layer.ID)
}
return "", -1, ErrBlobDigestMismatch
}
// If we didn't get a name, we might as well assign one using the hash.
if digest == "" {
digest = "sha256:" + hex.EncodeToString(hash[:])
}
// Record that this blob is a layer.
s.Layers[digest] = id
s.BlobList = append(s.BlobList, digest)
if layer != nil {
logrus.Debugf("blob %q imported as a filesystem layer", digest)
} else {
logrus.Debugf("layer blob %q already present", digest)
}
} else {
// It's just data. Finish scanning it in, check that our
// computed sha256sum matches the digest, and store it, but
// leave it out of the blob-to-layer-ID map so that we can tell
// that it's not a layer.
blob, err := ioutil.ReadAll(multi)
if err != nil && err != io.EOF {
return "", -1, err
}
actualSize = int64(len(blob))
hash = hasher.Sum(nil)
// If the ID was a digest, verify that our computed sha256sum
// matches it. */
if strings.HasPrefix(digest, "sha256:") && digest != "sha256:"+hex.EncodeToString(hash[:]) {
logrus.Debugf("blob %q digests to %q, rejecting", digest, hex.EncodeToString(hash[:]))
return "", -1, ErrBlobDigestMismatch
}
// If we didn't get a name, we might as well assign one using the hash.
if digest == "" {
digest = "sha256:" + hex.EncodeToString(hash[:])
}
// Save the blob for when we Commit().
s.BlobData[digest] = blob
s.BlobList = append(s.BlobList, digest)
logrus.Debugf("blob %q imported as opaque data", digest)
}
return digest, actualSize, nil
}
func (s *storageImage) Commit() error {
if s.ID != "" {
// We started with an image ID, or we've already registered
// this one and gotten one, so no need to do anything more.
if img, err := s.store.GetImage(s.ID); img != nil && err == nil {
return nil
}
}
lastLayer := ""
if len(s.BlobList) > 0 {
for _, blob := range s.BlobList {
if layerID, ok := s.Layers[blob]; ok {
lastLayer = layerID
}
}
}
img, err := s.store.CreateImage(s.ID, nil, lastLayer, "")
if err != nil {
return err
}
logrus.Debugf("created new image ID %q", img.ID)
if s.Tag != "" {
// We started with an image name rather than an ID, so move the
// name to this image.
if err := s.store.SetNames(img.ID, []string{s.Tag}); err != nil {
return err
}
logrus.Debugf("set name of image %q to %q", img.ID, s.Tag)
}
// Save the blob data to disk, and drop the contents from memory.
keys := []string{}
for k, v := range s.BlobData {
if err := s.store.SetImageBigData(img.ID, k, v); err != nil {
return err
}
keys = append(keys, k)
}
for _, key := range keys {
delete(s.BlobData, key)
}
s.ID = img.ID
return nil
}
func (s *storageImage) PutManifest(manifest []byte) error {
if err := s.Commit(); err != nil {
return err
}
defer s.saveMetadata()
return s.store.SetImageBigData(s.ID, "manifest", manifest)
}
func (s *storageImage) PutSignatures(signatures [][]byte) error {
if err := s.Commit(); err != nil {
return err
}
sizes := []int{}
sigblob := []byte{}
for _, sig := range signatures {
sizes = append(sizes, len(sig))
newblob := make([]byte, len(sigblob)+len(sig))
copy(newblob, sigblob)
copy(newblob[len(sigblob):], sig)
sigblob = newblob
}
s.SignatureSizes = sizes
defer s.saveMetadata()
return s.store.SetImageBigData(s.ID, "signatures", sigblob)
}
func (s *storageImage) SupportedManifestMIMETypes() []string {
return nil
}
func (s *storageImage) GetBlob(digest string) (rc io.ReadCloser, n int64, err error) {
if blob, ok := s.BlobData[digest]; ok {
r := bytes.NewReader(blob)
return ioutil.NopCloser(r), r.Size(), nil
}
if _, ok := s.Layers[digest]; !ok {
b, err := s.store.GetImageBigData(s.ID, digest)
if err != nil {
return nil, -1, err
}
r := bytes.NewReader(b)
logrus.Debugf("exporting opaque data as blob %q", digest)
return ioutil.NopCloser(r), r.Size(), nil
}
logrus.Debugf("exporting filesystem layer as blob %q", digest)
return s.diffLayer(s.Layers[digest], true)
}
func (s *storageImage) diffLayer(layerID string, computeSize bool) (rc io.ReadCloser, n int64, err error) {
layer, err := s.store.GetLayer(layerID)
if err != nil {
return nil, -1, err
}
layerMeta := storageLayerMetadata{
ExpectedSize: -1,
}
if layer.Metadata != "" {
if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
logrus.Errorf("error decoding metadata for layer %q: %v", layerID, err)
return nil, -1, err
}
}
if computeSize {
if layerMeta.ExpectedSize == -1 {
n, err = s.store.DiffSize("", layer.ID)
if err != nil {
return nil, -1, err
}
} else {
n = layerMeta.ExpectedSize
}
} else {
n = -1
}
diff, err := s.store.Diff("", layer.ID)
if err != nil {
return nil, -1, err
}
return diff, n, nil
}
func (s *storageImage) GetManifest() (manifest []byte, MIMEType string, err error) {
manifest, err = s.store.GetImageBigData(s.ID, "manifest")
return manifest, "", err
}
func (s *storageImage) GetSignatures() (signatures [][]byte, err error) {
var offset int
if err := s.loadMetadata(); err != nil {
logrus.Errorf("error decoding metadata for image: %v", err)
return nil, err
}
signature, err := s.store.GetImageBigData(s.ID, "signatures")
if err != nil {
return nil, err
}
sigslice := [][]byte{}
for _, length := range s.SignatureSizes {
sigslice = append(sigslice, signature[offset:offset+length])
offset += length
}
if offset != len(signature) {
return nil, fmt.Errorf("signatures data contained %d extra bytes", len(signatures)-offset)
}
return sigslice, nil
}
func (s *storageImage) DeleteImage() error {
if s.ID != "" {
if _, err := s.store.DeleteImage(s.ID, true); err != nil {
return err
}
s.ID = ""
}
return nil
}
func (s *storageImage) Manifest() (manifest []byte, MIMEType string, err error) {
return s.GetManifest()
}
func (s *storageImage) Signatures() (signatures [][]byte, err error) {
return s.GetSignatures()
}
func (s *storageImage) BlobDigests() (digests []string, err error) {
return image.FromSource(s).BlobDigests()
}
func (s *storageImage) Inspect() (info *types.ImageInspectInfo, err error) {
return image.FromSource(s).Inspect()
}

View file

@ -0,0 +1,89 @@
package storage
import (
"github.com/Sirupsen/logrus"
"github.com/containers/image/types"
"github.com/containers/storage/storage"
"github.com/docker/docker/reference"
)
// A storageReference holds a name and/or an ID, which is a 32-byte value
// hex-encoded into a 64-character string.
type storageReference struct {
store storage.Store
transport *storageTransport
reference string
id string
}
func newReference(store storage.Store, transport *storageTransport, reference, id string) *storageReference {
return &storageReference{
store: store,
transport: transport,
reference: reference,
id: id,
}
}
// Resolve the reference's name to an image ID in the storage library if
// there's already one present with the same name or ID.
func (s *storageReference) ID() string {
if s.id == "" {
image, err := s.store.GetImage(s.reference)
if image != nil && err == nil {
s.id = image.ID
}
}
return s.id
}
func (s *storageReference) Transport() types.ImageTransport {
return s.transport
}
func (s *storageReference) DockerReference() reference.Named {
return nil
}
func (s *storageReference) StringWithinTransport() string {
if s.reference == "" {
return s.id
}
return s.reference
}
func (s *storageReference) PolicyConfigurationIdentity() string {
if s.reference == "" {
return s.id
}
return s.reference
}
func (s *storageReference) PolicyConfigurationNamespaces() []string {
return nil
}
func (s *storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
return newImage(s), nil
}
func (s *storageReference) DeleteImage(ctx *types.SystemContext) error {
layers, err := s.store.DeleteImage(s.ID(), true)
if err == nil {
logrus.Debugf("deleted image %q", s.ID())
s.id = ""
for _, layer := range layers {
logrus.Debugf("deleted layer %q", layer)
}
}
s.id = ""
return err
}
func (s *storageReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
return newImageSource(s), nil
}
func (s *storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
return newImageDestination(s), nil
}

View file

@ -0,0 +1,79 @@
package storage
import (
"errors"
"regexp"
"github.com/Sirupsen/logrus"
"github.com/containers/image/types"
"github.com/containers/storage/storage"
)
var (
// Transport is an ImageTransport that uses a default storage.Store or
// one that's it's explicitly told to use.
Transport StoreTransport = &storageTransport{}
// ErrInvalidReference is returned when ParseReference() is passed an
// empty reference.
ErrInvalidReference = errors.New("invalid reference")
idRegexp = regexp.MustCompile("^(sha256:)?([0-9a-fA-F]{64})$")
)
// StoreTransport is an ImageTransport that uses a storage.Store to parse
// references, either its own or one that it's told to use.
type StoreTransport interface {
types.ImageTransport
SetStore(storage.Store)
ParseStoreReference(store storage.Store, reference string) (types.ImageReference, error)
}
type storageTransport struct {
store storage.Store
}
func (s *storageTransport) Name() string {
return "oci-storage"
}
// SetStore sets the Store object which the Transport will use for parsing
// references when a Store is not directly specified. If one is not set, the
// library will attempt to initialize one with default settings when a
// reference needs to be parsed. Calling SetStore does not affect previously
// parsed references.
func (s *storageTransport) SetStore(store storage.Store) {
s.store = store
}
// ParseStoreReference takes a name or an ID, tries to figure out which it is
// relative to a given store, and returns it in a reference object.
func (s *storageTransport) ParseStoreReference(store storage.Store, reference string) (types.ImageReference, error) {
if reference == "" {
return nil, ErrInvalidReference
}
id := ""
if matches := idRegexp.FindStringSubmatch(reference); len(matches) > 1 {
id = matches[len(matches)-1]
logrus.Debugf("parsed reference %q into ID %q", reference, id)
reference = ""
} else {
logrus.Debugf("treating reference %q as a name", reference)
}
return newReference(store, s, reference, id), nil
}
// ParseReference initializes the storage library and then takes a name or an
// ID, tries to figure out which it is, and returns it in a reference object.
func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
if s.store == nil {
store, err := storage.MakeStore("", "", "", []string{}, nil, nil)
if err != nil {
return nil, err
}
s.store = store
}
return s.ParseStoreReference(s.store, reference)
}
func (s *storageTransport) ValidatePolicyConfigurationScope(scope string) error {
return nil
}

View file

@ -8,6 +8,7 @@ import (
"github.com/containers/image/docker"
ociLayout "github.com/containers/image/oci/layout"
"github.com/containers/image/openshift"
"github.com/containers/image/storage"
"github.com/containers/image/types"
)
@ -21,6 +22,7 @@ func init() {
docker.Transport,
ociLayout.Transport,
openshift.Transport,
storage.Transport,
} {
name := t.Name()
if _, ok := KnownTransports[name]; ok {

View file

@ -188,18 +188,14 @@ type SystemContext struct {
// If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/).
// Not used for any of the more specific path overrides available in this struct.
// Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it).
// NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths .
// and there is no need to worry about the environment.)
// NOTE: This does NOT affect paths starting by $HOME.
RootForImplicitAbsolutePaths string
// === Global configuration overrides ===
// If not "", overrides the system's default path for signature.Policy configuration.
SignaturePolicyPath string
// If not "", overrides the system's default path for registries.d (Docker signature storage configuration)
RegistriesDirPath string
// === docker.Transport overrides ===
DockerCertPath string // If not "", a directory containing "cert.pem" and "key.pem" used when talking to a Docker Registry
DockerInsecureSkipTLSVerify bool // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
DockerInsecureSkipTLSVerify bool
}

191
vendor/github.com/containers/storage/LICENSE generated vendored Normal file
View file

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/github.com/containers/storage/NOTICE generated vendored Normal file
View file

@ -0,0 +1,19 @@
Docker
Copyright 2012-2016 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

View file

@ -0,0 +1,586 @@
// +build linux
/*
aufs driver directory structure
.
layers // Metadata of layers
1
2
3
diff // Content of the layer
1 // Contains layers that need to be mounted for the id
2
3
mnt // Mount points for the rw layers to be mounted
1
2
3
*/
package aufs
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
mountpk "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/stringid"
"github.com/opencontainers/runc/libcontainer/label"
rsystem "github.com/opencontainers/runc/libcontainer/system"
)
var (
// ErrAufsNotSupported is returned if aufs is not supported by the host.
ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems")
// ErrAufsNested means aufs cannot be used bc we are in a user namespace
ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace")
backingFs = "<unknown>"
enableDirpermLock sync.Once
enableDirperm bool
)
func init() {
graphdriver.Register("aufs", Init)
}
// Driver contains information about the filesystem mounted.
type Driver struct {
sync.Mutex
root string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
pathCacheLock sync.Mutex
pathCache map[string]string
}
// Init returns a new AUFS driver.
// An error is returned if AUFS is not supported.
func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
// Try to load the aufs kernel module
if err := supportsAufs(); err != nil {
return nil, graphdriver.ErrNotSupported
}
fsMagic, err := graphdriver.GetFSMagic(root)
if err != nil {
return nil, err
}
if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
backingFs = fsName
}
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs:
logrus.Errorf("AUFS is not supported over %s", backingFs)
return nil, graphdriver.ErrIncompatibleFS
}
paths := []string{
"mnt",
"diff",
"layers",
}
a := &Driver{
root: root,
uidMaps: uidMaps,
gidMaps: gidMaps,
pathCache: make(map[string]string),
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
// Create the root aufs driver dir and return
// if it already exists
// If not populate the dir structure
if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil {
if os.IsExist(err) {
return a, nil
}
return nil, err
}
if err := mountpk.MakePrivate(root); err != nil {
return nil, err
}
// Populate the dir structure
for _, p := range paths {
if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil {
return nil, err
}
}
return a, nil
}
// Return a nil error if the kernel supports aufs
// We cannot modprobe because inside dind modprobe fails
// to run
func supportsAufs() error {
// We can try to modprobe aufs first before looking at
// proc/filesystems for when aufs is supported
exec.Command("modprobe", "aufs").Run()
if rsystem.RunningInUserNS() {
return ErrAufsNested
}
f, err := os.Open("/proc/filesystems")
if err != nil {
return err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if strings.Contains(s.Text(), "aufs") {
return nil
}
}
return ErrAufsNotSupported
}
func (a *Driver) rootPath() string {
return a.root
}
func (*Driver) String() string {
return "aufs"
}
// Status returns current information about the filesystem such as root directory, number of directories mounted, etc.
func (a *Driver) Status() [][2]string {
ids, _ := loadIds(path.Join(a.rootPath(), "layers"))
return [][2]string{
{"Root Dir", a.rootPath()},
{"Backing Filesystem", backingFs},
{"Dirs", fmt.Sprintf("%d", len(ids))},
{"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())},
}
}
// GetMetadata not implemented
func (a *Driver) GetMetadata(id string) (map[string]string, error) {
return nil, nil
}
// Exists returns true if the given id is registered with
// this driver
func (a *Driver) Exists(id string) bool {
if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil {
return false
}
return true
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (a *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return a.Create(id, parent, mountLabel, storageOpt)
}
// Create three folders for each id
// mnt, layers, and diff
func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
if len(storageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for aufs")
}
if err := a.createDirsFor(id); err != nil {
return err
}
// Write the layers metadata
f, err := os.Create(path.Join(a.rootPath(), "layers", id))
if err != nil {
return err
}
defer f.Close()
if parent != "" {
ids, err := getParentIds(a.rootPath(), parent)
if err != nil {
return err
}
if _, err := fmt.Fprintln(f, parent); err != nil {
return err
}
for _, i := range ids {
if _, err := fmt.Fprintln(f, i); err != nil {
return err
}
}
}
return nil
}
// createDirsFor creates two directories for the given id.
// mnt and diff
func (a *Driver) createDirsFor(id string) error {
paths := []string{
"mnt",
"diff",
}
rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps)
if err != nil {
return err
}
// Directory permission is 0755.
// The path of directories are <aufs_root_path>/mnt/<image_id>
// and <aufs_root_path>/diff/<image_id>
for _, p := range paths {
if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil {
return err
}
}
return nil
}
// Remove will unmount and remove the given id.
func (a *Driver) Remove(id string) error {
a.pathCacheLock.Lock()
mountpoint, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
if !exists {
mountpoint = a.getMountpoint(id)
}
if err := a.unmount(mountpoint); err != nil {
// no need to return here, we can still try to remove since the `Rename` will fail below if still mounted
logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err)
}
// Atomically remove each directory in turn by first moving it out of the
// way (so that docker doesn't find it anymore) before doing removal of
// the whole tree.
tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id))
if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) {
return err
}
defer os.RemoveAll(tmpMntPath)
tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id))
if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) {
return err
}
defer os.RemoveAll(tmpDiffpath)
// Remove the layers file for the id
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
return err
}
a.pathCacheLock.Lock()
delete(a.pathCache, id)
a.pathCacheLock.Unlock()
return nil
}
// Get returns the rootfs path for the id.
// This will mount the dir at it's given path
func (a *Driver) Get(id, mountLabel string) (string, error) {
parents, err := a.getParentLayerPaths(id)
if err != nil && !os.IsNotExist(err) {
return "", err
}
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
if !exists {
m = a.getDiffPath(id)
if len(parents) > 0 {
m = a.getMountpoint(id)
}
}
if count := a.ctr.Increment(m); count > 1 {
return m, nil
}
// If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data
if len(parents) > 0 {
if err := a.mount(id, m, mountLabel, parents); err != nil {
return "", err
}
}
a.pathCacheLock.Lock()
a.pathCache[id] = m
a.pathCacheLock.Unlock()
return m, nil
}
// Put unmounts and updates list of active mounts.
func (a *Driver) Put(id string) error {
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
if !exists {
m = a.getMountpoint(id)
a.pathCache[id] = m
}
a.pathCacheLock.Unlock()
if count := a.ctr.Decrement(m); count > 0 {
return nil
}
err := a.unmount(m)
if err != nil {
logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
}
return err
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
// AUFS doesn't need the parent layer to produce a diff.
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
Compression: archive.Uncompressed,
ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir},
UIDMaps: a.uidMaps,
GIDMaps: a.gidMaps,
})
}
type fileGetNilCloser struct {
storage.FileGetter
}
func (f fileGetNilCloser) Close() error {
return nil
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
// contains files for the layer differences. Used for direct access for tar-split.
func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
p := path.Join(a.rootPath(), "diff", id)
return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
}
func (a *Driver) applyDiff(id string, diff archive.Reader) error {
return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
UIDMaps: a.uidMaps,
GIDMaps: a.gidMaps,
})
}
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
// AUFS doesn't need the parent layer to calculate the diff size.
return directory.Size(path.Join(a.rootPath(), "diff", id))
}
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
// AUFS doesn't need the parent id to apply the diff.
if err = a.applyDiff(id, diff); err != nil {
return
}
return a.DiffSize(id, parent)
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
// AUFS doesn't have snapshots, so we need to get changes from all parent
// layers.
layers, err := a.getParentLayerPaths(id)
if err != nil {
return nil, err
}
return archive.Changes(layers, path.Join(a.rootPath(), "diff", id))
}
func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
parentIds, err := getParentIds(a.rootPath(), id)
if err != nil {
return nil, err
}
layers := make([]string, len(parentIds))
// Get the diff paths for all the parent ids
for i, p := range parentIds {
layers[i] = path.Join(a.rootPath(), "diff", p)
}
return layers, nil
}
func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
a.Lock()
defer a.Unlock()
// If the id is mounted or we get an error return
if mounted, err := a.mounted(target); err != nil || mounted {
return err
}
rw := a.getDiffPath(id)
if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
}
return nil
}
func (a *Driver) unmount(mountPath string) error {
a.Lock()
defer a.Unlock()
if mounted, err := a.mounted(mountPath); err != nil || !mounted {
return err
}
if err := Unmount(mountPath); err != nil {
return err
}
return nil
}
func (a *Driver) mounted(mountpoint string) (bool, error) {
return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint)
}
// Cleanup aufs and unmount all mountpoints
func (a *Driver) Cleanup() error {
var dirs []string
if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
dirs = append(dirs, path)
return nil
}); err != nil {
return err
}
for _, m := range dirs {
if err := a.unmount(m); err != nil {
logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err)
}
}
return mountpk.Unmount(a.root)
}
func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
defer func() {
if err != nil {
Unmount(target)
}
}()
// Mount options are clipped to page size(4096 bytes). If there are more
// layers then these are remounted individually using append.
offset := 54
if useDirperm() {
offset += len("dirperm1")
}
b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
firstMount := true
i := 0
for {
for ; i < len(ro); i++ {
layer := fmt.Sprintf(":%s=ro+wh", ro[i])
if firstMount {
if bp+len(layer) > len(b) {
break
}
bp += copy(b[bp:], layer)
} else {
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil {
return
}
}
}
if firstMount {
opts := "dio,xino=/dev/shm/aufs.xino"
if useDirperm() {
opts += ",dirperm1"
}
data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
if err = mount("none", target, "aufs", 0, data); err != nil {
return
}
firstMount = false
}
if i == len(ro) {
break
}
}
return
}
// useDirperm checks dirperm1 mount option can be used with the current
// version of aufs.
func useDirperm() bool {
enableDirpermLock.Do(func() {
base, err := ioutil.TempDir("", "docker-aufs-base")
if err != nil {
logrus.Errorf("error checking dirperm1: %v", err)
return
}
defer os.RemoveAll(base)
union, err := ioutil.TempDir("", "docker-aufs-union")
if err != nil {
logrus.Errorf("error checking dirperm1: %v", err)
return
}
defer os.RemoveAll(union)
opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base)
if err := mount("none", union, "aufs", 0, opts); err != nil {
return
}
enableDirperm = true
if err := Unmount(union); err != nil {
logrus.Errorf("error checking dirperm1: failed to unmount %v", err)
}
})
return enableDirperm
}

View file

@ -0,0 +1,64 @@
// +build linux
package aufs
import (
"bufio"
"io/ioutil"
"os"
"path"
)
// Return all the directories
func loadIds(root string) ([]string, error) {
dirs, err := ioutil.ReadDir(root)
if err != nil {
return nil, err
}
out := []string{}
for _, d := range dirs {
if !d.IsDir() {
out = append(out, d.Name())
}
}
return out, nil
}
// Read the layers file for the current id and return all the
// layers represented by new lines in the file
//
// If there are no lines in the file then the id has no parent
// and an empty slice is returned.
func getParentIds(root, id string) ([]string, error) {
f, err := os.Open(path.Join(root, "layers", id))
if err != nil {
return nil, err
}
defer f.Close()
out := []string{}
s := bufio.NewScanner(f)
for s.Scan() {
if t := s.Text(); t != "" {
out = append(out, s.Text())
}
}
return out, s.Err()
}
func (a *Driver) getMountpoint(id string) string {
return path.Join(a.mntPath(), id)
}
func (a *Driver) mntPath() string {
return path.Join(a.rootPath(), "mnt")
}
func (a *Driver) getDiffPath(id string) string {
return path.Join(a.diffPath(), id)
}
func (a *Driver) diffPath() string {
return path.Join(a.rootPath(), "diff")
}

View file

@ -0,0 +1,21 @@
// +build linux
package aufs
import (
"os/exec"
"syscall"
"github.com/Sirupsen/logrus"
)
// Unmount the target specified.
func Unmount(target string) error {
if err := exec.Command("auplink", target, "flush").Run(); err != nil {
logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err)
}
if err := syscall.Unmount(target, 0); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,7 @@
package aufs
import "syscall"
func mount(source string, target string, fstype string, flags uintptr, data string) error {
return syscall.Mount(source, target, fstype, flags, data)
}

View file

@ -0,0 +1,12 @@
// +build !linux
package aufs
import "errors"
// MsRemount declared to specify a non-linux system mount.
const MsRemount = 0
func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
return errors.New("mount is not implemented on this platform")
}

View file

@ -0,0 +1,520 @@
// +build linux
package btrfs
/*
#include <stdlib.h>
#include <dirent.h>
#include <btrfs/ioctl.h>
#include <btrfs/ctree.h>
static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) {
snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value);
}
*/
import "C"
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"syscall"
"unsafe"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/docker/go-units"
"github.com/opencontainers/runc/libcontainer/label"
)
func init() {
graphdriver.Register("btrfs", Init)
}
var (
quotaEnabled = false
userDiskQuota = false
)
type btrfsOptions struct {
minSpace uint64
size uint64
}
// Init returns a new BTRFS driver.
// An error is returned if BTRFS is not supported.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
}
if fsMagic != graphdriver.FsMagicBtrfs {
return nil, graphdriver.ErrPrerequisites
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
return nil, err
}
if err := mount.MakePrivate(home); err != nil {
return nil, err
}
opt, err := parseOptions(options)
if err != nil {
return nil, err
}
if userDiskQuota {
if err := subvolEnableQuota(home); err != nil {
return nil, err
}
quotaEnabled = true
}
driver := &Driver{
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
options: opt,
}
return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil
}
func parseOptions(opt []string) (btrfsOptions, error) {
var options btrfsOptions
for _, option := range opt {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return options, err
}
key = strings.ToLower(key)
switch key {
case "btrfs.min_space":
minSpace, err := units.RAMInBytes(val)
if err != nil {
return options, err
}
userDiskQuota = true
options.minSpace = uint64(minSpace)
default:
return options, fmt.Errorf("Unknown option %s", key)
}
}
return options, nil
}
// Driver contains information about the filesystem mounted.
type Driver struct {
//root of the file system
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
options btrfsOptions
}
// String prints the name of the driver (btrfs).
func (d *Driver) String() string {
return "btrfs"
}
// Status returns current driver information in a two dimensional string array.
// Output contains "Build Version" and "Library Version" of the btrfs libraries used.
// Version information can be used to check compatibility with your kernel.
func (d *Driver) Status() [][2]string {
status := [][2]string{}
if bv := btrfsBuildVersion(); bv != "-" {
status = append(status, [2]string{"Build Version", bv})
}
if lv := btrfsLibVersion(); lv != -1 {
status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)})
}
return status
}
// GetMetadata returns empty metadata for this driver.
func (d *Driver) GetMetadata(id string) (map[string]string, error) {
return nil, nil
}
// Cleanup unmounts the home directory.
func (d *Driver) Cleanup() error {
if quotaEnabled {
if err := subvolDisableQuota(d.home); err != nil {
return err
}
}
return mount.Unmount(d.home)
}
func free(p *C.char) {
C.free(unsafe.Pointer(p))
}
func openDir(path string) (*C.DIR, error) {
Cpath := C.CString(path)
defer free(Cpath)
dir := C.opendir(Cpath)
if dir == nil {
return nil, fmt.Errorf("Can't open dir")
}
return dir, nil
}
func closeDir(dir *C.DIR) {
if dir != nil {
C.closedir(dir)
}
}
func getDirFd(dir *C.DIR) uintptr {
return uintptr(C.dirfd(dir))
}
func subvolCreate(path, name string) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_vol_args
for i, c := range []byte(name) {
args.name[i] = C.char(c)
}
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error())
}
return nil
}
func subvolSnapshot(src, dest, name string) error {
srcDir, err := openDir(src)
if err != nil {
return err
}
defer closeDir(srcDir)
destDir, err := openDir(dest)
if err != nil {
return err
}
defer closeDir(destDir)
var args C.struct_btrfs_ioctl_vol_args_v2
args.fd = C.__s64(getDirFd(srcDir))
var cs = C.CString(name)
C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
C.free(unsafe.Pointer(cs))
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error())
}
return nil
}
func isSubvolume(p string) (bool, error) {
var bufStat syscall.Stat_t
if err := syscall.Lstat(p, &bufStat); err != nil {
return false, err
}
// return true if it is a btrfs subvolume
return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil
}
func subvolDelete(dirpath, name string) error {
dir, err := openDir(dirpath)
if err != nil {
return err
}
defer closeDir(dir)
fullPath := path.Join(dirpath, name)
var args C.struct_btrfs_ioctl_vol_args
// walk the btrfs subvolumes
walkSubvolumes := func(p string, f os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) && p != fullPath {
// missing most likely because the path was a subvolume that got removed in the previous iteration
// since it's gone anyway, we don't care
return nil
}
return fmt.Errorf("error walking subvolumes: %v", err)
}
// we want to check children only so skip itself
// it will be removed after the filepath walk anyways
if f.IsDir() && p != fullPath {
sv, err := isSubvolume(p)
if err != nil {
return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err)
}
if sv {
if err := subvolDelete(path.Dir(p), f.Name()); err != nil {
return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err)
}
}
}
return nil
}
if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil {
return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err)
}
// all subvolumes have been removed
// now remove the one originally passed in
for i, c := range []byte(name) {
args.name[i] = C.char(c)
}
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error())
}
return nil
}
func subvolEnableQuota(path string) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_quota_ctl_args
args.cmd = C.BTRFS_QUOTA_CTL_ENABLE
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error())
}
return nil
}
func subvolDisableQuota(path string) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_quota_ctl_args
args.cmd = C.BTRFS_QUOTA_CTL_DISABLE
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error())
}
return nil
}
func subvolRescanQuota(path string) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_quota_rescan_args
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error())
}
return nil
}
func subvolLimitQgroup(path string, size uint64) error {
dir, err := openDir(path)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_qgroup_limit_args
args.lim.max_referenced = C.__u64(size)
args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
}
return nil
}
func (d *Driver) subvolumesDir() string {
return path.Join(d.home, "subvolumes")
}
func (d *Driver) subvolumesDirID(id string) string {
return path.Join(d.subvolumesDir(), id)
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.Create(id, parent, mountLabel, storageOpt)
}
// Create the filesystem with given id.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
subvolumes := path.Join(d.home, "subvolumes")
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return err
}
if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil {
return err
}
if parent == "" {
if err := subvolCreate(subvolumes, id); err != nil {
return err
}
} else {
parentDir := d.subvolumesDirID(parent)
st, err := os.Stat(parentDir)
if err != nil {
return err
}
if !st.IsDir() {
return fmt.Errorf("%s: not a directory", parentDir)
}
if err := subvolSnapshot(parentDir, subvolumes, id); err != nil {
return err
}
}
if _, ok := storageOpt["size"]; ok {
driver := &Driver{}
if err := d.parseStorageOpt(storageOpt, driver); err != nil {
return err
}
if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
return err
}
}
// if we have a remapped root (user namespaces enabled), change the created snapshot
// dir ownership to match
if rootUID != 0 || rootGID != 0 {
if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil {
return err
}
}
return label.Relabel(path.Join(subvolumes, id), mountLabel, false)
}
// Parse btrfs storage options
func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error {
// Read size to change the subvolume disk quota per container
for key, val := range storageOpt {
key := strings.ToLower(key)
switch key {
case "size":
size, err := units.RAMInBytes(val)
if err != nil {
return err
}
driver.options.size = uint64(size)
default:
return fmt.Errorf("Unknown option %s", key)
}
}
return nil
}
// Set btrfs storage size
func (d *Driver) setStorageSize(dir string, driver *Driver) error {
if driver.options.size <= 0 {
return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size)))
}
if d.options.minSpace > 0 && driver.options.size < d.options.minSpace {
return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace)))
}
if !quotaEnabled {
if err := subvolEnableQuota(d.home); err != nil {
return err
}
quotaEnabled = true
}
if err := subvolLimitQgroup(dir, driver.options.size); err != nil {
return err
}
return nil
}
// Remove the filesystem with given id.
func (d *Driver) Remove(id string) error {
dir := d.subvolumesDirID(id)
if _, err := os.Stat(dir); err != nil {
return err
}
if err := subvolDelete(d.subvolumesDir(), id); err != nil {
return err
}
if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) {
return err
}
if err := subvolRescanQuota(d.home); err != nil {
return err
}
return nil
}
// Get the requested filesystem id.
func (d *Driver) Get(id, mountLabel string) (string, error) {
dir := d.subvolumesDirID(id)
st, err := os.Stat(dir)
if err != nil {
return "", err
}
if !st.IsDir() {
return "", fmt.Errorf("%s: not a directory", dir)
}
return dir, nil
}
// Put is not implemented for BTRFS as there is no cleanup required for the id.
func (d *Driver) Put(id string) error {
// Get() creates no runtime resources (like e.g. mounts)
// so this doesn't need to do anything.
return nil
}
// Exists checks if the id exists in the filesystem.
func (d *Driver) Exists(id string) bool {
dir := d.subvolumesDirID(id)
_, err := os.Stat(dir)
return err == nil
}

View file

@ -0,0 +1,3 @@
// +build !linux !cgo
package btrfs

View file

@ -0,0 +1,26 @@
// +build linux,!btrfs_noversion
package btrfs
/*
#include <btrfs/version.h>
// around version 3.16, they did not define lib version yet
#ifndef BTRFS_LIB_VERSION
#define BTRFS_LIB_VERSION -1
#endif
// upstream had removed it, but now it will be coming back
#ifndef BTRFS_BUILD_VERSION
#define BTRFS_BUILD_VERSION "-"
#endif
*/
import "C"
func btrfsBuildVersion() string {
return string(C.BTRFS_BUILD_VERSION)
}
func btrfsLibVersion() int {
return int(C.BTRFS_LIB_VERSION)
}

View file

@ -0,0 +1,14 @@
// +build linux,btrfs_noversion
package btrfs
// TODO(vbatts) remove this work-around once supported linux distros are on
// btrfs utilities of >= 3.16.1
func btrfsBuildVersion() string {
return "-"
}
func btrfsLibVersion() int {
return -1
}

View file

@ -0,0 +1,67 @@
package graphdriver
import "sync"
type minfo struct {
check bool
count int
}
// RefCounter is a generic counter for use by graphdriver Get/Put calls
type RefCounter struct {
counts map[string]*minfo
mu sync.Mutex
checker Checker
}
// NewRefCounter returns a new RefCounter
func NewRefCounter(c Checker) *RefCounter {
return &RefCounter{
checker: c,
counts: make(map[string]*minfo),
}
}
// Increment increaes the ref count for the given id and returns the current count
func (c *RefCounter) Increment(path string) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count++
c.mu.Unlock()
return m.count
}
// Decrement decreases the ref count for the given id and returns the current count
func (c *RefCounter) Decrement(path string) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count--
c.mu.Unlock()
return m.count
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,106 @@
package devmapper
// Definition of struct dm_task and sub structures (from lvm2)
//
// struct dm_ioctl {
// /*
// * The version number is made up of three parts:
// * major - no backward or forward compatibility,
// * minor - only backwards compatible,
// * patch - both backwards and forwards compatible.
// *
// * All clients of the ioctl interface should fill in the
// * version number of the interface that they were
// * compiled with.
// *
// * All recognized ioctl commands (ie. those that don't
// * return -ENOTTY) fill out this field, even if the
// * command failed.
// */
// uint32_t version[3]; /* in/out */
// uint32_t data_size; /* total size of data passed in
// * including this struct */
// uint32_t data_start; /* offset to start of data
// * relative to start of this struct */
// uint32_t target_count; /* in/out */
// int32_t open_count; /* out */
// uint32_t flags; /* in/out */
// /*
// * event_nr holds either the event number (input and output) or the
// * udev cookie value (input only).
// * The DM_DEV_WAIT ioctl takes an event number as input.
// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls
// * use the field as a cookie to return in the DM_COOKIE
// * variable with the uevents they issue.
// * For output, the ioctls return the event number, not the cookie.
// */
// uint32_t event_nr; /* in/out */
// uint32_t padding;
// uint64_t dev; /* in/out */
// char name[DM_NAME_LEN]; /* device name */
// char uuid[DM_UUID_LEN]; /* unique identifier for
// * the block device */
// char data[7]; /* padding or data */
// };
// struct target {
// uint64_t start;
// uint64_t length;
// char *type;
// char *params;
// struct target *next;
// };
// typedef enum {
// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */
// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */
// } dm_add_node_t;
// struct dm_task {
// int type;
// char *dev_name;
// char *mangled_dev_name;
// struct target *head, *tail;
// int read_only;
// uint32_t event_nr;
// int major;
// int minor;
// int allow_default_major_fallback;
// uid_t uid;
// gid_t gid;
// mode_t mode;
// uint32_t read_ahead;
// uint32_t read_ahead_flags;
// union {
// struct dm_ioctl *v4;
// } dmi;
// char *newname;
// char *message;
// char *geometry;
// uint64_t sector;
// int no_flush;
// int no_open_count;
// int skip_lockfs;
// int query_inactive_table;
// int suppress_identical_reload;
// dm_add_node_t add_node;
// uint64_t existing_table_size;
// int cookie_set;
// int new_uuid;
// int secure_data;
// int retry_remove;
// int enable_checks;
// int expected_errno;
// char *uuid;
// char *mangled_uuid;
// };
//

View file

@ -0,0 +1,226 @@
// +build linux
package devmapper
import (
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/devicemapper"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/docker/go-units"
)
func init() {
graphdriver.Register("devicemapper", Init)
}
// Driver contains the device set mounted and the home directory
type Driver struct {
*DeviceSet
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
}
// Init creates a driver with the given home and the set of options.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps)
if err != nil {
return nil, err
}
if err := mount.MakePrivate(home); err != nil {
return nil, err
}
d := &Driver{
DeviceSet: deviceSet,
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
}
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
}
func (d *Driver) String() string {
return "devicemapper"
}
// Status returns the status about the driver in a printable format.
// Information returned contains Pool Name, Data File, Metadata file, disk usage by
// the data and metadata, etc.
func (d *Driver) Status() [][2]string {
s := d.DeviceSet.Status()
status := [][2]string{
{"Pool Name", s.PoolName},
{"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))},
{"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))},
{"Backing Filesystem", s.BaseDeviceFS},
{"Data file", s.DataFile},
{"Metadata file", s.MetadataFile},
{"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))},
{"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))},
{"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))},
{"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))},
{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))},
{"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))},
{"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))},
{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
{"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
{"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)},
}
if len(s.DataLoopback) > 0 {
status = append(status, [2]string{"Data loop file", s.DataLoopback})
}
if len(s.MetadataLoopback) > 0 {
status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback})
}
if vStr, err := devicemapper.GetLibraryVersion(); err == nil {
status = append(status, [2]string{"Library Version", vStr})
}
return status
}
// GetMetadata returns a map of information about the device.
func (d *Driver) GetMetadata(id string) (map[string]string, error) {
m, err := d.DeviceSet.exportDeviceMetadata(id)
if err != nil {
return nil, err
}
metadata := make(map[string]string)
metadata["DeviceId"] = strconv.Itoa(m.deviceID)
metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10)
metadata["DeviceName"] = m.deviceName
return metadata, nil
}
// Cleanup unmounts a device.
func (d *Driver) Cleanup() error {
err := d.DeviceSet.Shutdown(d.home)
if err2 := mount.Unmount(d.home); err == nil {
err = err2
}
return err
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.Create(id, parent, mountLabel, storageOpt)
}
// Create adds a device with a given id and the parent.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil {
return err
}
return nil
}
// Remove removes a device with a given id, unmounts the filesystem.
func (d *Driver) Remove(id string) error {
if !d.DeviceSet.HasDevice(id) {
// Consider removing a non-existing device a no-op
// This is useful to be able to progress on container removal
// if the underlying device has gone away due to earlier errors
return nil
}
// This assumes the device has been properly Get/Put:ed and thus is unmounted
if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
return err
}
mp := path.Join(d.home, "mnt", id)
if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// Get mounts a device with given id into the root filesystem
func (d *Driver) Get(id, mountLabel string) (string, error) {
mp := path.Join(d.home, "mnt", id)
rootFs := path.Join(mp, "rootfs")
if count := d.ctr.Increment(mp); count > 1 {
return rootFs, nil
}
uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
d.ctr.Decrement(mp)
return "", err
}
// Create the target directories if they don't exist
if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) {
d.ctr.Decrement(mp)
return "", err
}
if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
d.ctr.Decrement(mp)
return "", err
}
// Mount the device
if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil {
d.ctr.Decrement(mp)
return "", err
}
if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
}
idFile := path.Join(mp, "id")
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
// Create an "id" file with the container/image id in it to help reconstruct this in case
// of later problems
if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
}
}
return rootFs, nil
}
// Put unmounts a device and removes it.
func (d *Driver) Put(id string) error {
mp := path.Join(d.home, "mnt", id)
if count := d.ctr.Decrement(mp); count > 0 {
return nil
}
err := d.DeviceSet.UnmountDevice(id, mp)
if err != nil {
logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err)
}
return err
}
// Exists checks to see if the device exists.
func (d *Driver) Exists(id string) bool {
return d.DeviceSet.HasDevice(id)
}

View file

@ -0,0 +1,89 @@
// +build linux
package devmapper
import (
"bytes"
"fmt"
"os"
"path/filepath"
"syscall"
)
// FIXME: this is copy-pasted from the aufs driver.
// It should be moved into the core.
// Mounted returns true if a mount point exists.
func Mounted(mountpoint string) (bool, error) {
mntpoint, err := os.Stat(mountpoint)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
parent, err := os.Stat(filepath.Join(mountpoint, ".."))
if err != nil {
return false, err
}
mntpointSt := mntpoint.Sys().(*syscall.Stat_t)
parentSt := parent.Sys().(*syscall.Stat_t)
return mntpointSt.Dev != parentSt.Dev, nil
}
type probeData struct {
fsName string
magic string
offset uint64
}
// ProbeFsType returns the filesystem name for the given device id.
func ProbeFsType(device string) (string, error) {
probes := []probeData{
{"btrfs", "_BHRfS_M", 0x10040},
{"ext4", "\123\357", 0x438},
{"xfs", "XFSB", 0},
}
maxLen := uint64(0)
for _, p := range probes {
l := p.offset + uint64(len(p.magic))
if l > maxLen {
maxLen = l
}
}
file, err := os.Open(device)
if err != nil {
return "", err
}
defer file.Close()
buffer := make([]byte, maxLen)
l, err := file.Read(buffer)
if err != nil {
return "", err
}
if uint64(l) != maxLen {
return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device)
}
for _, p := range probes {
if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) {
return p.fsName, nil
}
}
return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device)
}
func joinMountOptions(a, b string) string {
if a == "" {
return b
}
if b == "" {
return a
}
return a + "," + b
}

243
vendor/github.com/containers/storage/drivers/driver.go generated vendored Normal file
View file

@ -0,0 +1,243 @@
package graphdriver
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
)
// FsMagic unsigned id of the filesystem in use.
type FsMagic uint32
const (
// FsMagicUnsupported is a predefined constant value other than a valid filesystem id.
FsMagicUnsupported = FsMagic(0x00000000)
)
var (
// All registered drivers
drivers map[string]InitFunc
// ErrNotSupported returned when driver is not supported.
ErrNotSupported = errors.New("driver not supported")
// ErrPrerequisites retuned when driver does not meet prerequisites.
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
// ErrIncompatibleFS returned when file system is not supported.
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
)
// InitFunc initializes the storage driver.
type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
// ProtoDriver defines the basic capabilities of a driver.
// This interface exists solely to be a minimum set of methods
// for client code which choose not to implement the entire Driver
// interface and use the NaiveDiffDriver wrapper constructor.
//
// Use of ProtoDriver directly by client code is not recommended.
type ProtoDriver interface {
// String returns a string representation of this driver.
String() string
// CreateReadWrite creates a new, empty filesystem layer that is ready
// to be used as the storage for a container.
CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error
// Create creates a new, empty, filesystem layer with the
// specified id and parent and mountLabel. Parent and mountLabel may be "".
Create(id, parent, mountLabel string, storageOpt map[string]string) error
// Remove attempts to remove the filesystem layer with this id.
Remove(id string) error
// Get returns the mountpoint for the layered filesystem referred
// to by this id. You can optionally specify a mountLabel or "".
// Returns the absolute path to the mounted layered filesystem.
Get(id, mountLabel string) (dir string, err error)
// Put releases the system resources for the specified id,
// e.g, unmounting layered filesystem.
Put(id string) error
// Exists returns whether a filesystem layer with the specified
// ID exists on this driver.
Exists(id string) bool
// Status returns a set of key-value pairs which give low
// level diagnostic status about this driver.
Status() [][2]string
// Returns a set of key-value pairs which give low level information
// about the image/container driver is managing.
GetMetadata(id string) (map[string]string, error)
// Cleanup performs necessary tasks to release resources
// held by the driver, e.g., unmounting all layered filesystems
// known to this driver.
Cleanup() error
}
// Driver is the interface for layered/snapshot file system drivers.
type Driver interface {
ProtoDriver
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
Diff(id, parent string) (archive.Archive, error)
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The archive.Reader must be an uncompressed stream.
ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
DiffSize(id, parent string) (size int64, err error)
}
// DiffGetterDriver is the interface for layered file system drivers that
// provide a specialized function for getting file contents for tar-split.
type DiffGetterDriver interface {
Driver
// DiffGetter returns an interface to efficiently retrieve the contents
// of files in a layer.
DiffGetter(id string) (FileGetCloser, error)
}
// FileGetCloser extends the storage.FileGetter interface with a Close method
// for cleaning up.
type FileGetCloser interface {
storage.FileGetter
// Close cleans up any resources associated with the FileGetCloser.
Close() error
}
// Checker makes checks on specified filesystems.
type Checker interface {
// IsMounted returns true if the provided path is mounted for the specific checker
IsMounted(path string) bool
}
func init() {
drivers = make(map[string]InitFunc)
}
// Register registers an InitFunc for the driver.
func Register(name string, initFunc InitFunc) error {
if _, exists := drivers[name]; exists {
return fmt.Errorf("Name already registered %s", name)
}
drivers[name] = initFunc
return nil
}
// GetDriver initializes and returns the registered driver
func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
}
if pluginDriver, err := lookupPlugin(name, home, options); err == nil {
return pluginDriver, nil
}
logrus.Errorf("Failed to GetDriver graph %s %s", name, home)
return nil, ErrNotSupported
}
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
}
logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
return nil, ErrNotSupported
}
// New creates the driver and initializes it at the specified root.
func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if name != "" {
logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver
return GetDriver(name, root, options, uidMaps, gidMaps)
}
// Guess for prior driver
driversMap := scanPriorDrivers(root)
for _, name := range priority {
if name == "vfs" {
// don't use vfs even if there is state present.
continue
}
if _, prior := driversMap[name]; prior {
// of the state found from prior drivers, check in order of our priority
// which we would prefer
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps)
if err != nil {
// unlike below, we will return error here, because there is prior
// state, and now it is no longer supported/prereq/compatible, so
// something changed and needs attention. Otherwise the daemon's
// images would just "disappear".
logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err)
return nil, err
}
// abort starting when there are other prior configured drivers
// to ensure the user explicitly selects the driver to load
if len(driversMap)-1 > 0 {
var driversSlice []string
for name := range driversMap {
driversSlice = append(driversSlice, name)
}
return nil, fmt.Errorf("%q contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", root, strings.Join(driversSlice, ", "))
}
logrus.Infof("[graphdriver] using prior storage driver %q", name)
return driver, nil
}
}
// Check for priority drivers first
for _, name := range priority {
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps)
if err != nil {
if isDriverNotSupported(err) {
continue
}
return nil, err
}
return driver, nil
}
// Check all registered drivers if no priority driver is found
for name, initFunc := range drivers {
driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps)
if err != nil {
if isDriverNotSupported(err) {
continue
}
return nil, err
}
return driver, nil
}
return nil, fmt.Errorf("No supported storage backend found")
}
// isDriverNotSupported returns true if the error initializing
// the graph driver is a non-supported error.
func isDriverNotSupported(err error) bool {
return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS
}
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
func scanPriorDrivers(root string) map[string]bool {
driversMap := make(map[string]bool)
for driver := range drivers {
p := filepath.Join(root, driver)
if _, err := os.Stat(p); err == nil && driver != "vfs" {
driversMap[driver] = true
}
}
return driversMap
}

View file

@ -0,0 +1,19 @@
package graphdriver
import "syscall"
var (
// Slice of drivers that should be used in an order
priority = []string{
"zfs",
}
)
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View file

@ -0,0 +1,133 @@
// +build linux
package graphdriver
import (
"path/filepath"
"syscall"
"github.com/containers/storage/pkg/mount"
)
const (
// FsMagicAufs filesystem id for Aufs
FsMagicAufs = FsMagic(0x61756673)
// FsMagicBtrfs filesystem id for Btrfs
FsMagicBtrfs = FsMagic(0x9123683E)
// FsMagicCramfs filesystem id for Cramfs
FsMagicCramfs = FsMagic(0x28cd3d45)
// FsMagicEcryptfs filesystem id for eCryptfs
FsMagicEcryptfs = FsMagic(0xf15f)
// FsMagicExtfs filesystem id for Extfs
FsMagicExtfs = FsMagic(0x0000EF53)
// FsMagicF2fs filesystem id for F2fs
FsMagicF2fs = FsMagic(0xF2F52010)
// FsMagicGPFS filesystem id for GPFS
FsMagicGPFS = FsMagic(0x47504653)
// FsMagicJffs2Fs filesystem if for Jffs2Fs
FsMagicJffs2Fs = FsMagic(0x000072b6)
// FsMagicJfs filesystem id for Jfs
FsMagicJfs = FsMagic(0x3153464a)
// FsMagicNfsFs filesystem id for NfsFs
FsMagicNfsFs = FsMagic(0x00006969)
// FsMagicRAMFs filesystem id for RamFs
FsMagicRAMFs = FsMagic(0x858458f6)
// FsMagicReiserFs filesystem id for ReiserFs
FsMagicReiserFs = FsMagic(0x52654973)
// FsMagicSmbFs filesystem id for SmbFs
FsMagicSmbFs = FsMagic(0x0000517B)
// FsMagicSquashFs filesystem id for SquashFs
FsMagicSquashFs = FsMagic(0x73717368)
// FsMagicTmpFs filesystem id for TmpFs
FsMagicTmpFs = FsMagic(0x01021994)
// FsMagicVxFS filesystem id for VxFs
FsMagicVxFS = FsMagic(0xa501fcf5)
// FsMagicXfs filesystem id for Xfs
FsMagicXfs = FsMagic(0x58465342)
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
// FsMagicOverlay filesystem id for overlay
FsMagicOverlay = FsMagic(0x794C7630)
)
var (
// Slice of drivers that should be used in an order
priority = []string{
"aufs",
"btrfs",
"zfs",
"devicemapper",
"overlay",
"vfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicAufs: "aufs",
FsMagicBtrfs: "btrfs",
FsMagicCramfs: "cramfs",
FsMagicExtfs: "extfs",
FsMagicF2fs: "f2fs",
FsMagicGPFS: "gpfs",
FsMagicJffs2Fs: "jffs2",
FsMagicJfs: "jfs",
FsMagicNfsFs: "nfs",
FsMagicRAMFs: "ramfs",
FsMagicReiserFs: "reiserfs",
FsMagicSmbFs: "smb",
FsMagicSquashFs: "squashfs",
FsMagicTmpFs: "tmpfs",
FsMagicUnsupported: "unsupported",
FsMagicVxFS: "vxfs",
FsMagicXfs: "xfs",
FsMagicZfs: "zfs",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil {
return 0, err
}
return FsMagic(buf.Type), nil
}
// NewFsChecker returns a checker configured for the provied FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,
}
}
type fsChecker struct {
t FsMagic
}
func (c *fsChecker) IsMounted(path string) bool {
m, _ := Mounted(c.t, path)
return m
}
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct {
}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View file

@ -0,0 +1,65 @@
// +build solaris,cgo
package graphdriver
/*
#include <sys/statvfs.h>
#include <stdlib.h>
static inline struct statvfs *getstatfs(char *s) {
struct statvfs *buf;
int err;
buf = (struct statvfs *)malloc(sizeof(struct statvfs));
err = statvfs(s, buf);
return buf;
}
*/
import "C"
import (
"path/filepath"
"unsafe"
log "github.com/Sirupsen/logrus"
)
const (
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
)
var (
// Slice of drivers that should be used in an order
priority = []string{
"zfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicZfs: "zfs",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
return 0, nil
}
// Mounted checks if the given path is mounted as the fs type
//Solaris supports only ZFS for now
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
cs := C.CString(filepath.Dir(mountPath))
buf := C.getstatfs(cs)
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
(buf.f_basetype[3] != 0) {
log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
C.free(unsafe.Pointer(buf))
return false, ErrPrerequisites
}
C.free(unsafe.Pointer(buf))
C.free(unsafe.Pointer(cs))
return true, nil
}

View file

@ -0,0 +1,15 @@
// +build !linux,!windows,!freebsd,!solaris
package graphdriver
var (
// Slice of drivers that should be used in an order
priority = []string{
"unsupported",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
return FsMagicUnsupported, nil
}

View file

@ -0,0 +1,14 @@
package graphdriver
var (
// Slice of drivers that should be used in order
priority = []string{
"windowsfilter",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
// Note it is OK to return FsMagicUnsupported on Windows.
return FsMagicUnsupported, nil
}

157
vendor/github.com/containers/storage/drivers/fsdiff.go generated vendored Normal file
View file

@ -0,0 +1,157 @@
package graphdriver
import (
"time"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
)
var (
// ApplyUncompressedLayer defines the unpack method used by the graph
// driver.
ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
)
// NaiveDiffDriver takes a ProtoDriver and adds the
// capability of the Diffing methods which it may or may not
// support on its own. See the comment on the exported
// NewNaiveDiffDriver function below.
// Notably, the AUFS driver doesn't need to be wrapped like this.
type NaiveDiffDriver struct {
ProtoDriver
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
}
// NewNaiveDiffDriver returns a fully functional driver that wraps the
// given ProtoDriver and adds the capability of the following methods which
// it may or may not support on its own:
// Diff(id, parent string) (archive.Archive, error)
// Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
// DiffSize(id, parent string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
gdw := &NaiveDiffDriver{
ProtoDriver: driver,
uidMaps: uidMaps,
gidMaps: gidMaps,
}
return gdw
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) {
layerFs, err := gdw.Get(id, "")
if err != nil {
return nil, err
}
defer func() {
if err != nil {
gdw.Put(id)
}
}()
if parent == "" {
archive, err := archive.Tar(layerFs, archive.Uncompressed)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
gdw.Put(id)
return err
}), nil
}
parentFs, err := gdw.Get(parent, "")
if err != nil {
return nil, err
}
defer gdw.Put(parent)
changes, err := archive.ChangesDirs(layerFs, parentFs)
if err != nil {
return nil, err
}
archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
gdw.Put(id)
return err
}), nil
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
layerFs, err := gdw.Get(id, "")
if err != nil {
return nil, err
}
defer gdw.Put(id)
parentFs := ""
if parent != "" {
parentFs, err = gdw.Get(parent, "")
if err != nil {
return nil, err
}
defer gdw.Put(parent)
}
return archive.ChangesDirs(layerFs, parentFs)
}
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
// Mount the root filesystem so we can apply the diff/layer.
layerFs, err := gdw.Get(id, "")
if err != nil {
return
}
defer gdw.Put(id)
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
GIDMaps: gdw.gidMaps}
start := time.Now().UTC()
logrus.Debug("Start untar layer")
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
return
}
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
return
}
// DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
changes, err := gdw.Changes(id, parent)
if err != nil {
return
}
layerFs, err := gdw.Get(id, "")
if err != nil {
return
}
defer gdw.Put(id)
return archive.ChangesSize(layerFs, changes), nil
}

View file

@ -0,0 +1,169 @@
// +build linux
package overlay
import (
"fmt"
"os"
"path/filepath"
"syscall"
"time"
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/system"
)
type copyFlags int
const (
copyHardlink copyFlags = 1 << iota
)
func copyRegular(srcPath, dstPath string, mode os.FileMode) error {
srcFile, err := os.Open(srcPath)
if err != nil {
return err
}
defer srcFile.Close()
dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode)
if err != nil {
return err
}
defer dstFile.Close()
_, err = pools.Copy(dstFile, srcFile)
return err
}
func copyXattr(srcPath, dstPath, attr string) error {
data, err := system.Lgetxattr(srcPath, attr)
if err != nil {
return err
}
if data != nil {
if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
return err
}
}
return nil
}
func copyDir(srcDir, dstDir string, flags copyFlags) error {
err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(srcDir, srcPath)
if err != nil {
return err
}
dstPath := filepath.Join(dstDir, relPath)
if err != nil {
return err
}
stat, ok := f.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
}
isHardlink := false
switch f.Mode() & os.ModeType {
case 0: // Regular file
if flags&copyHardlink != 0 {
isHardlink = true
if err := os.Link(srcPath, dstPath); err != nil {
return err
}
} else {
if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil {
return err
}
}
case os.ModeDir:
if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) {
return err
}
case os.ModeSymlink:
link, err := os.Readlink(srcPath)
if err != nil {
return err
}
if err := os.Symlink(link, dstPath); err != nil {
return err
}
case os.ModeNamedPipe:
fallthrough
case os.ModeSocket:
if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil {
return err
}
case os.ModeDevice:
if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
return err
}
default:
return fmt.Errorf("Unknown file type for %s\n", srcPath)
}
// Everything below is copying metadata from src to dst. All this metadata
// already shares an inode for hardlinks.
if isHardlink {
return nil
}
if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
return err
}
// We need to copy this attribute if it appears in an overlay upper layer, as
// this function is used to copy those. It is set by overlay if a directory
// is removed and then re-created and should not inherit anything from the
// same dir in the lower dir.
if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil {
return err
}
isSymlink := f.Mode()&os.ModeSymlink != 0
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode
if !isSymlink {
if err := os.Chmod(dstPath, f.Mode()); err != nil {
return err
}
}
// system.Chtimes doesn't support a NOFOLLOW flag atm
if !isSymlink {
aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
return err
}
} else {
ts := []syscall.Timespec{stat.Atim, stat.Mtim}
if err := system.LUtimesNano(dstPath, ts); err != nil {
return err
}
}
return nil
})
return err
}

View file

@ -0,0 +1,450 @@
// +build linux
package overlay
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/opencontainers/runc/libcontainer/label"
)
// This is a small wrapper over the NaiveDiffWriter that lets us have a custom
// implementation of ApplyDiff()
var (
// ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer.
ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff")
backingFs = "<unknown>"
)
// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method.
type ApplyDiffProtoDriver interface {
graphdriver.ProtoDriver
// ApplyDiff writes the diff to the archive for the given id and parent id.
// It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise.
ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
}
type naiveDiffDriverWithApply struct {
graphdriver.Driver
applyDiff ApplyDiffProtoDriver
}
// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff.
func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver {
return &naiveDiffDriverWithApply{
Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps),
applyDiff: driver,
}
}
// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback.
func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
b, err := d.applyDiff.ApplyDiff(id, parent, diff)
if err == ErrApplyDiffFallback {
return d.Driver.ApplyDiff(id, parent, diff)
}
return b, err
}
// This backend uses the overlay union filesystem for containers
// plus hard link file sharing for images.
// Each container/image can have a "root" subdirectory which is a plain
// filesystem hierarchy, or they can use overlay.
// If they use overlay there is a "upper" directory and a "lower-id"
// file, as well as "merged" and "work" directories. The "upper"
// directory has the upper layer of the overlay, and "lower-id" contains
// the id of the parent whose "root" directory shall be used as the lower
// layer in the overlay. The overlay itself is mounted in the "merged"
// directory, and the "work" dir is needed for overlay to work.
// When an overlay layer is created there are two cases, either the
// parent has a "root" dir, then we start out with an empty "upper"
// directory overlaid on the parents root. This is typically the
// case with the init layer of a container which is based on an image.
// If there is no "root" in the parent, we inherit the lower-id from
// the parent and start by making a copy in the parent's "upper" dir.
// This is typically the case for a container layer which copies
// its parent -init upper layer.
// Additionally we also have a custom implementation of ApplyLayer
// which makes a recursive copy of the parent "root" layer using
// hardlinks to share file data, and then applies the layer on top
// of that. This means all child images share file (but not directory)
// data with the parent.
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
type Driver struct {
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
}
func init() {
graphdriver.Register("overlay", Init)
}
// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
if err := supportsOverlay(); err != nil {
return nil, graphdriver.ErrNotSupported
}
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
}
if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
backingFs = fsName
}
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs:
logrus.Errorf("'overlay' is not supported over %s", backingFs)
return nil, graphdriver.ErrIncompatibleFS
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
// Create the driver home dir
if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
if err := mount.MakePrivate(home); err != nil {
return nil, err
}
d := &Driver{
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
}
return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil
}
func supportsOverlay() error {
// We can try to modprobe overlay first before looking at
// proc/filesystems for when overlay is supported
exec.Command("modprobe", "overlay").Run()
f, err := os.Open("/proc/filesystems")
if err != nil {
return err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if s.Text() == "nodev\toverlay" {
return nil
}
}
logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
return graphdriver.ErrNotSupported
}
func (d *Driver) String() string {
return "overlay"
}
// Status returns current driver information in a two dimensional string array.
// Output contains "Backing Filesystem" used in this implementation.
func (d *Driver) Status() [][2]string {
return [][2]string{
{"Backing Filesystem", backingFs},
}
}
// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data.
func (d *Driver) GetMetadata(id string) (map[string]string, error) {
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return nil, err
}
metadata := make(map[string]string)
// If id has a root, it is an image
rootDir := path.Join(dir, "root")
if _, err := os.Stat(rootDir); err == nil {
metadata["RootDir"] = rootDir
return metadata, nil
}
lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
if err != nil {
return nil, err
}
metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root")
metadata["UpperDir"] = path.Join(dir, "upper")
metadata["WorkDir"] = path.Join(dir, "work")
metadata["MergedDir"] = path.Join(dir, "merged")
return metadata, nil
}
// Cleanup any state created by overlay which should be cleaned when daemon
// is being shutdown. For now, we just have to unmount the bind mounted
// we had created.
func (d *Driver) Cleanup() error {
return mount.Unmount(d.home)
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.Create(id, parent, mountLabel, storageOpt)
}
// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id.
// The parent filesystem is used to configure these directories for the overlay.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) {
if len(storageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for overlay")
}
dir := d.dir(id)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return err
}
if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil {
return err
}
defer func() {
// Clean up on failure
if retErr != nil {
os.RemoveAll(dir)
}
}()
// Toplevel images are just a "root" dir
if parent == "" {
if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil {
return err
}
return nil
}
parentDir := d.dir(parent)
// Ensure parent exists
if _, err := os.Lstat(parentDir); err != nil {
return err
}
// If parent has a root, just do an overlay to it
parentRoot := path.Join(parentDir, "root")
if s, err := os.Lstat(parentRoot); err == nil {
if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil {
return err
}
if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil {
return err
}
return nil
}
// Otherwise, copy the upper and the lower-id from the parent
lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id"))
if err != nil {
return err
}
if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil {
return err
}
parentUpperDir := path.Join(parentDir, "upper")
s, err := os.Lstat(parentUpperDir)
if err != nil {
return err
}
upperDir := path.Join(dir, "upper")
if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil {
return err
}
return copyDir(parentUpperDir, upperDir, 0)
}
func (d *Driver) dir(id string) string {
return path.Join(d.home, id)
}
// Remove cleans the directories that are created for this id.
func (d *Driver) Remove(id string) error {
if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// Get creates and mounts the required file system for the given id and returns the mount path.
func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return "", err
}
// If id has a root, just return it
rootDir := path.Join(dir, "root")
if _, err := os.Stat(rootDir); err == nil {
return rootDir, nil
}
mergedDir := path.Join(dir, "merged")
if count := d.ctr.Increment(mergedDir); count > 1 {
return mergedDir, nil
}
defer func() {
if err != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 {
syscall.Unmount(mergedDir, 0)
}
}
}()
lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
if err != nil {
return "", err
}
var (
lowerDir = path.Join(d.dir(string(lowerID)), "root")
upperDir = path.Join(dir, "upper")
workDir = path.Join(dir, "work")
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
)
if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
}
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return "", err
}
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
return "", err
}
return mergedDir, nil
}
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
mountpoint := path.Join(d.dir(id), "merged")
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
err := syscall.Unmount(mountpoint, 0)
if err != nil {
rootDir := path.Join(d.dir(id), "root")
if _, err := os.Stat(rootDir); err == nil {
// We weren't mounting a "merged" directory anyway
return nil
}
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
}
return err
}
// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error.
func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {
dir := d.dir(id)
if parent == "" {
return 0, ErrApplyDiffFallback
}
parentRootDir := path.Join(d.dir(parent), "root")
if _, err := os.Stat(parentRootDir); err != nil {
return 0, ErrApplyDiffFallback
}
// We now know there is a parent, and it has a "root" directory containing
// the full root filesystem. We can just hardlink it and apply the
// layer. This relies on two things:
// 1) ApplyDiff is only run once on a clean (no writes to upper layer) container
// 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks)
// These are all currently true and are not expected to break
tmpRootDir, err := ioutil.TempDir(dir, "tmproot")
if err != nil {
return 0, err
}
defer func() {
if err != nil {
os.RemoveAll(tmpRootDir)
} else {
os.RemoveAll(path.Join(dir, "upper"))
os.RemoveAll(path.Join(dir, "work"))
os.RemoveAll(path.Join(dir, "merged"))
os.RemoveAll(path.Join(dir, "lower-id"))
}
}()
if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil {
return 0, err
}
options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps}
if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil {
return 0, err
}
rootDir := path.Join(dir, "root")
if err := os.Rename(tmpRootDir, rootDir); err != nil {
return 0, err
}
return
}
// Exists checks to see if the id is already mounted.
func (d *Driver) Exists(id string) bool {
_, err := os.Stat(d.dir(id))
return err == nil
}

View file

@ -0,0 +1,3 @@
// +build !linux
package overlay

View file

@ -0,0 +1,88 @@
// +build linux
package overlay2
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"os"
"runtime"
"syscall"
"github.com/containers/storage/pkg/reexec"
)
func init() {
reexec.Register("docker-mountfrom", mountFromMain)
}
func fatal(err error) {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
type mountOptions struct {
Device string
Target string
Type string
Label string
Flag uint32
}
func mountFrom(dir, device, target, mType, label string) error {
options := &mountOptions{
Device: device,
Target: target,
Type: mType,
Flag: 0,
Label: label,
}
cmd := reexec.Command("docker-mountfrom", dir)
w, err := cmd.StdinPipe()
if err != nil {
return fmt.Errorf("mountfrom error on pipe creation: %v", err)
}
output := bytes.NewBuffer(nil)
cmd.Stdout = output
cmd.Stderr = output
if err := cmd.Start(); err != nil {
return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
}
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
}
w.Close()
if err := cmd.Wait(); err != nil {
return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output)
}
return nil
}
// mountfromMain is the entry-point for docker-mountfrom on re-exec.
func mountFromMain() {
runtime.LockOSThread()
flag.Parse()
var options *mountOptions
if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
fatal(err)
}
if err := os.Chdir(flag.Arg(0)); err != nil {
fatal(err)
}
if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
fatal(err)
}
os.Exit(0)
}

View file

@ -0,0 +1,514 @@
// +build linux
package overlay2
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strconv"
"strings"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/parsers/kernel"
"github.com/opencontainers/runc/libcontainer/label"
)
var (
// untar defines the untar method
untar = chrootarchive.UntarUncompressed
)
// This backend uses the overlay union filesystem for containers
// with diff directories for each layer.
// This version of the overlay driver requires at least kernel
// 4.0.0 in order to support mounting multiple diff directories.
// Each container/image has at least a "diff" directory and "link" file.
// If there is also a "lower" file when there are diff layers
// below as well as "merged" and "work" directories. The "diff" directory
// has the upper layer of the overlay and is used to capture any
// changes to the layer. The "lower" file contains all the lower layer
// mounts separated by ":" and ordered from uppermost to lowermost
// layers. The overlay itself is mounted in the "merged" directory,
// and the "work" dir is needed for overlay to work.
// The "link" file for each layer contains a unique string for the layer.
// Under the "l" directory at the root there will be a symbolic link
// with that unique string pointing the "diff" directory for the layer.
// The symbolic links are used to reference lower layers in the "lower"
// file and on mount. The links are used to shorten the total length
// of a layer reference without requiring changes to the layer identifier
// or root directory. Mounts are always done relative to root and
// referencing the symbolic links in order to ensure the number of
// lower directories can fit in a single page for making the mount
// syscall. A hard upper limit of 128 lower layers is enforced to ensure
// that mounts do not fail due to length.
const (
driverName = "overlay2"
linkDir = "l"
lowerFile = "lower"
maxDepth = 128
// idLength represents the number of random characters
// which can be used to create the unique link identifer
// for every layer. If this value is too long then the
// page size limit for the mount command may be exceeded.
// The idLength should be selected such that following equation
// is true (512 is a buffer for label metadata).
// ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512)
idLength = 26
)
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
type Driver struct {
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
}
var backingFs = "<unknown>"
func init() {
graphdriver.Register(driverName, Init)
}
// Init returns the a native diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
opts, err := parseOptions(options)
if err != nil {
return nil, err
}
if err := supportsOverlay(); err != nil {
return nil, graphdriver.ErrNotSupported
}
// require kernel 4.0.0 to ensure multiple lower dirs are supported
v, err := kernel.GetKernelVersion()
if err != nil {
return nil, err
}
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 {
if !opts.overrideKernelCheck {
return nil, graphdriver.ErrNotSupported
}
logrus.Warnf("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update")
}
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
}
if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
backingFs = fsName
}
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
switch fsMagic {
case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
logrus.Errorf("'overlay2' is not supported over %s", backingFs)
return nil, graphdriver.ErrIncompatibleFS
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
// Create the driver home dir
if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
if err := mount.MakePrivate(home); err != nil {
return nil, err
}
d := &Driver{
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
}
return d, nil
}
type overlayOptions struct {
overrideKernelCheck bool
}
func parseOptions(options []string) (*overlayOptions, error) {
o := &overlayOptions{}
for _, option := range options {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err
}
key = strings.ToLower(key)
switch key {
case "overlay2.override_kernel_check":
o.overrideKernelCheck, err = strconv.ParseBool(val)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("overlay2: Unknown option %s\n", key)
}
}
return o, nil
}
func supportsOverlay() error {
// We can try to modprobe overlay first before looking at
// proc/filesystems for when overlay is supported
exec.Command("modprobe", "overlay").Run()
f, err := os.Open("/proc/filesystems")
if err != nil {
return err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if s.Text() == "nodev\toverlay" {
return nil
}
}
logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
return graphdriver.ErrNotSupported
}
func (d *Driver) String() string {
return driverName
}
// Status returns current driver information in a two dimensional string array.
// Output contains "Backing Filesystem" used in this implementation.
func (d *Driver) Status() [][2]string {
return [][2]string{
{"Backing Filesystem", backingFs},
}
}
// GetMetadata returns meta data about the overlay driver such as
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
func (d *Driver) GetMetadata(id string) (map[string]string, error) {
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return nil, err
}
metadata := map[string]string{
"WorkDir": path.Join(dir, "work"),
"MergedDir": path.Join(dir, "merged"),
"UpperDir": path.Join(dir, "diff"),
}
lowerDirs, err := d.getLowerDirs(id)
if err != nil {
return nil, err
}
if len(lowerDirs) > 0 {
metadata["LowerDir"] = strings.Join(lowerDirs, ":")
}
return metadata, nil
}
// Cleanup any state created by overlay which should be cleaned when daemon
// is being shutdown. For now, we just have to unmount the bind mounted
// we had created.
func (d *Driver) Cleanup() error {
return mount.Unmount(d.home)
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.Create(id, parent, mountLabel, storageOpt)
}
// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id.
// The parent filesystem is used to configure these directories for the overlay.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) {
if len(storageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for overlay")
}
dir := d.dir(id)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return err
}
if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil {
return err
}
defer func() {
// Clean up on failure
if retErr != nil {
os.RemoveAll(dir)
}
}()
if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil {
return err
}
lid := generateID(idLength)
if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil {
return err
}
// Write link id to link file
if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
return err
}
// if no parent directory, done
if parent == "" {
return nil
}
if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil {
return err
}
lower, err := d.getLower(parent)
if err != nil {
return err
}
if lower != "" {
if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
return err
}
}
return nil
}
func (d *Driver) getLower(parent string) (string, error) {
parentDir := d.dir(parent)
// Ensure parent exists
if _, err := os.Lstat(parentDir); err != nil {
return "", err
}
// Read Parent link fileA
parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link"))
if err != nil {
return "", err
}
lowers := []string{path.Join(linkDir, string(parentLink))}
parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile))
if err == nil {
parentLowers := strings.Split(string(parentLower), ":")
lowers = append(lowers, parentLowers...)
}
if len(lowers) > maxDepth {
return "", errors.New("max depth exceeded")
}
return strings.Join(lowers, ":"), nil
}
func (d *Driver) dir(id string) string {
return path.Join(d.home, id)
}
func (d *Driver) getLowerDirs(id string) ([]string, error) {
var lowersArray []string
lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile))
if err == nil {
for _, s := range strings.Split(string(lowers), ":") {
lp, err := os.Readlink(path.Join(d.home, s))
if err != nil {
return nil, err
}
lowersArray = append(lowersArray, path.Clean(path.Join(d.home, "link", lp)))
}
} else if !os.IsNotExist(err) {
return nil, err
}
return lowersArray, nil
}
// Remove cleans the directories that are created for this id.
func (d *Driver) Remove(id string) error {
dir := d.dir(id)
lid, err := ioutil.ReadFile(path.Join(dir, "link"))
if err == nil {
if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil {
logrus.Debugf("Failed to remove link: %v", err)
}
}
if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// Get creates and mounts the required file system for the given id and returns the mount path.
func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return "", err
}
diffDir := path.Join(dir, "diff")
lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
if err != nil {
// If no lower, just return diff directory
if os.IsNotExist(err) {
return diffDir, nil
}
return "", err
}
mergedDir := path.Join(dir, "merged")
if count := d.ctr.Increment(mergedDir); count > 1 {
return mergedDir, nil
}
defer func() {
if err != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 {
syscall.Unmount(mergedDir, 0)
}
}
}()
workDir := path.Join(dir, "work")
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work"))
mountLabel = label.FormatMountLabel(opts, mountLabel)
if len(mountLabel) > syscall.Getpagesize() {
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel))
}
if err := mountFrom(d.home, "overlay", path.Join(id, "merged"), "overlay", mountLabel); err != nil {
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
}
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return "", err
}
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
return "", err
}
return mergedDir, nil
}
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
mountpoint := path.Join(d.dir(id), "merged")
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
err := syscall.Unmount(mountpoint, 0)
if err != nil {
if _, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)); err != nil {
// We didn't have a "lower" directory, so we weren't mounting a "merged" directory anyway
return nil
}
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
}
return err
}
// Exists checks to see if the id is already mounted.
func (d *Driver) Exists(id string) bool {
_, err := os.Stat(d.dir(id))
return err == nil
}
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {
applyDir := d.getDiffPath(id)
logrus.Debugf("Applying tar in %s", applyDir)
// Overlay doesn't need the parent id to apply the diff
if err := untar(diff, applyDir, &archive.TarOptions{
UIDMaps: d.uidMaps,
GIDMaps: d.gidMaps,
WhiteoutFormat: archive.OverlayWhiteoutFormat,
}); err != nil {
return 0, err
}
return d.DiffSize(id, parent)
}
func (d *Driver) getDiffPath(id string) string {
dir := d.dir(id)
return path.Join(dir, "diff")
}
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
return directory.Size(d.getDiffPath(id))
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (d *Driver) Diff(id, parent string) (archive.Archive, error) {
diffPath := d.getDiffPath(id)
logrus.Debugf("Tar with options on %s", diffPath)
return archive.TarWithOptions(diffPath, &archive.TarOptions{
Compression: archive.Uncompressed,
UIDMaps: d.uidMaps,
GIDMaps: d.gidMaps,
WhiteoutFormat: archive.OverlayWhiteoutFormat,
})
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
// Overlay doesn't have snapshots, so we need to get changes from all parent
// layers.
diffPath := d.getDiffPath(id)
layers, err := d.getLowerDirs(id)
if err != nil {
return nil, err
}
return archive.OverlayChanges(layers, diffPath)
}

View file

@ -0,0 +1,3 @@
// +build !linux
package overlay2

View file

@ -0,0 +1,80 @@
// +build linux
package overlay2
import (
"crypto/rand"
"encoding/base32"
"fmt"
"io"
"os"
"syscall"
"time"
"github.com/Sirupsen/logrus"
)
// generateID creates a new random string identifier with the given length
func generateID(l int) string {
const (
// ensures we backoff for less than 450ms total. Use the following to
// select new value, in units of 10ms:
// n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
maxretries = 9
backoff = time.Millisecond * 10
)
var (
totalBackoff time.Duration
count int
retries int
size = (l*5 + 7) / 8
u = make([]byte, size)
)
// TODO: Include time component, counter component, random component
for {
// This should never block but the read may fail. Because of this,
// we just try to read the random number generator until we get
// something. This is a very rare condition but may happen.
b := time.Duration(retries) * backoff
time.Sleep(b)
totalBackoff += b
n, err := io.ReadFull(rand.Reader, u[count:])
if err != nil {
if retryOnError(err) && retries < maxretries {
count += n
retries++
logrus.Errorf("error generating version 4 uuid, retrying: %v", err)
continue
}
// Any other errors represent a system problem. What did someone
// do to /dev/urandom?
panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
}
break
}
s := base32.StdEncoding.EncodeToString(u)
return s[:l]
}
// retryOnError tries to detect whether or not retrying would be fruitful.
func retryOnError(err error) bool {
switch err := err.(type) {
case *os.PathError:
return retryOnError(err.Err) // unpack the target error
case syscall.Errno:
if err == syscall.EPERM {
// EPERM represents an entropy pool exhaustion, a condition under
// which we backoff and retry.
return true
}
}
return false
}

32
vendor/github.com/containers/storage/drivers/plugin.go generated vendored Normal file
View file

@ -0,0 +1,32 @@
// +build experimental
package graphdriver
import (
"fmt"
"io"
"github.com/containers/storage/pkg/plugins"
)
type pluginClient interface {
// Call calls the specified method with the specified arguments for the plugin.
Call(string, interface{}, interface{}) error
// Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream
Stream(string, interface{}) (io.ReadCloser, error)
// SendFile calls the specified method, and passes through the IO stream
SendFile(string, io.Reader, interface{}) error
}
func lookupPlugin(name, home string, opts []string) (Driver, error) {
pl, err := plugins.Get(name, "GraphDriver")
if err != nil {
return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
}
return newPluginDriver(name, home, opts, pl.Client())
}
func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) {
proxy := &graphDriverProxy{name, c}
return proxy, proxy.Init(home, opts)
}

View file

@ -0,0 +1,7 @@
// +build !experimental
package graphdriver
func lookupPlugin(name, home string, opts []string) (Driver, error) {
return nil, ErrNotSupported
}

225
vendor/github.com/containers/storage/drivers/proxy.go generated vendored Normal file
View file

@ -0,0 +1,225 @@
// +build experimental
package graphdriver
import (
"errors"
"fmt"
"github.com/containers/storage/pkg/archive"
)
type graphDriverProxy struct {
name string
client pluginClient
}
type graphDriverRequest struct {
ID string `json:",omitempty"`
Parent string `json:",omitempty"`
MountLabel string `json:",omitempty"`
}
type graphDriverResponse struct {
Err string `json:",omitempty"`
Dir string `json:",omitempty"`
Exists bool `json:",omitempty"`
Status [][2]string `json:",omitempty"`
Changes []archive.Change `json:",omitempty"`
Size int64 `json:",omitempty"`
Metadata map[string]string `json:",omitempty"`
}
type graphDriverInitRequest struct {
Home string
Opts []string
}
func (d *graphDriverProxy) Init(home string, opts []string) error {
args := &graphDriverInitRequest{
Home: home,
Opts: opts,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) String() string {
return d.name
}
func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Remove(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
args := &graphDriverRequest{
ID: id,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil {
return "", err
}
var err error
if ret.Err != "" {
err = errors.New(ret.Err)
}
return ret.Dir, err
}
func (d *graphDriverProxy) Put(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Exists(id string) bool {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil {
return false
}
return ret.Exists
}
func (d *graphDriverProxy) Status() [][2]string {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil {
return nil
}
return ret.Status
}
func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) {
args := &graphDriverRequest{
ID: id,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
return nil, errors.New(ret.Err)
}
return ret.Metadata, nil
}
func (d *graphDriverProxy) Cleanup() error {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil {
return nil
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
body, err := d.client.Stream("GraphDriver.Diff", args)
if err != nil {
return nil, err
}
return archive.Archive(body), nil
}
func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
return nil, errors.New(ret.Err)
}
return ret.Changes, nil
}
func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
var ret graphDriverResponse
if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
return -1, err
}
if ret.Err != "" {
return -1, errors.New(ret.Err)
}
return ret.Size, nil
}
func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil {
return -1, err
}
if ret.Err != "" {
return -1, errors.New(ret.Err)
}
return ret.Size, nil
}

View file

@ -0,0 +1,8 @@
// +build !exclude_graphdriver_aufs,linux
package register
import (
// register the aufs graphdriver
_ "github.com/containers/storage/drivers/aufs"
)

View file

@ -0,0 +1,8 @@
// +build !exclude_graphdriver_btrfs,linux
package register
import (
// register the btrfs graphdriver
_ "github.com/containers/storage/drivers/btrfs"
)

View file

@ -0,0 +1,8 @@
// +build !exclude_graphdriver_devicemapper,linux
package register
import (
// register the devmapper graphdriver
_ "github.com/containers/storage/drivers/devmapper"
)

View file

@ -0,0 +1,9 @@
// +build !exclude_graphdriver_overlay,linux
package register
import (
// register the overlay graphdriver
_ "github.com/containers/storage/drivers/overlay"
_ "github.com/containers/storage/drivers/overlay2"
)

View file

@ -0,0 +1,6 @@
package register
import (
// register vfs
_ "github.com/containers/storage/drivers/vfs"
)

View file

@ -0,0 +1,6 @@
package register
import (
// register the windows graph driver
_ "github.com/containers/storage/drivers/windows"
)

View file

@ -0,0 +1,8 @@
// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris
package register
import (
// register the zfs driver
_ "github.com/containers/storage/drivers/zfs"
)

View file

@ -0,0 +1,145 @@
package vfs
import (
"fmt"
"os"
"path/filepath"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/runc/libcontainer/label"
)
var (
// CopyWithTar defines the copy method to use.
CopyWithTar = chrootarchive.CopyWithTar
)
func init() {
graphdriver.Register("vfs", Init)
}
// Init returns a new VFS driver.
// This sets the home directory for the driver and returns NaiveDiffDriver.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
d := &Driver{
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
return nil, err
}
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
}
// Driver holds information about the driver, home directory of the driver.
// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations.
// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support.
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
type Driver struct {
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
}
func (d *Driver) String() string {
return "vfs"
}
// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information.
func (d *Driver) Status() [][2]string {
return nil
}
// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data.
func (d *Driver) GetMetadata(id string) (map[string]string, error) {
return nil, nil
}
// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver.
func (d *Driver) Cleanup() error {
return nil
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.Create(id, parent, mountLabel, storageOpt)
}
// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
if len(storageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for vfs")
}
dir := d.dir(id)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return err
}
if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil {
return err
}
if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil {
return err
}
opts := []string{"level:s0"}
if _, mountLabel, err := label.InitLabels(opts); err == nil {
label.SetFileLabel(dir, mountLabel)
}
if parent == "" {
return nil
}
parentDir, err := d.Get(parent, "")
if err != nil {
return fmt.Errorf("%s: %s", parent, err)
}
if err := CopyWithTar(parentDir, dir); err != nil {
return err
}
return nil
}
func (d *Driver) dir(id string) string {
return filepath.Join(d.home, "dir", filepath.Base(id))
}
// Remove deletes the content from the directory for a given id.
func (d *Driver) Remove(id string) error {
if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// Get returns the directory for the given id.
func (d *Driver) Get(id, mountLabel string) (string, error) {
dir := d.dir(id)
if st, err := os.Stat(dir); err != nil {
return "", err
} else if !st.IsDir() {
return "", fmt.Errorf("%s: not a directory", dir)
}
return dir, nil
}
// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up.
func (d *Driver) Put(id string) error {
// The vfs driver has no runtime resources (e.g. mounts)
// to clean up, so we don't need anything here
return nil
}
// Exists checks to see if the directory exists for the given id.
func (d *Driver) Exists(id string) bool {
_, err := os.Stat(d.dir(id))
return err == nil
}

View file

@ -0,0 +1,779 @@
//+build windows
package windows
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"unsafe"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/longpath"
"github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/system"
"github.com/vbatts/tar-split/tar/storage"
)
// filterDriver is an HCSShim driver type for the Windows Filter driver.
const filterDriver = 1
// init registers the windows graph drivers to the register.
func init() {
graphdriver.Register("windowsfilter", InitFilter)
reexec.Register("docker-windows-write-layer", writeLayer)
}
type checker struct {
}
func (c *checker) IsMounted(path string) bool {
return false
}
// Driver represents a windows graph driver.
type Driver struct {
// info stores the shim driver information
info hcsshim.DriverInfo
ctr *graphdriver.RefCounter
// it is safe for windows to use a cache here because it does not support
// restoring containers when the daemon dies.
cacheMu sync.Mutex
cache map[string]string
}
func isTP5OrOlder() bool {
return system.GetOSVersion().Build <= 14300
}
// InitFilter returns a new Windows storage filter driver.
func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
logrus.Debugf("WindowsGraphDriver InitFilter at %s", home)
d := &Driver{
info: hcsshim.DriverInfo{
HomeDir: home,
Flavour: filterDriver,
},
cache: make(map[string]string),
ctr: graphdriver.NewRefCounter(&checker{}),
}
return d, nil
}
// String returns the string representation of a driver. This should match
// the name the graph driver has been registered with.
func (d *Driver) String() string {
return "windowsfilter"
}
// Status returns the status of the driver.
func (d *Driver) Status() [][2]string {
return [][2]string{
{"Windows", ""},
}
}
// Exists returns true if the given id is registered with this driver.
func (d *Driver) Exists(id string) bool {
rID, err := d.resolveID(id)
if err != nil {
return false
}
result, err := hcsshim.LayerExists(d.info, rID)
if err != nil {
return false
}
return result
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.create(id, parent, mountLabel, false, storageOpt)
}
// Create creates a new read-only layer with the given id.
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.create(id, parent, mountLabel, true, storageOpt)
}
func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error {
if len(storageOpt) != 0 {
return fmt.Errorf("--storage-opt is not supported for windows")
}
rPId, err := d.resolveID(parent)
if err != nil {
return err
}
parentChain, err := d.getLayerChain(rPId)
if err != nil {
return err
}
var layerChain []string
if rPId != "" {
parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId)
if err != nil {
return err
}
if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil {
// This is a legitimate parent layer (not the empty "-init" layer),
// so include it in the layer chain.
layerChain = []string{parentPath}
}
}
layerChain = append(layerChain, parentChain...)
if readOnly {
if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil {
return err
}
} else {
var parentPath string
if len(layerChain) != 0 {
parentPath = layerChain[0]
}
if isTP5OrOlder() {
// Pre-create the layer directory, providing an ACL to give the Hyper-V Virtual Machines
// group access. This is necessary to ensure that Hyper-V containers can access the
// virtual machine data. This is not necessary post-TP5.
path, err := syscall.UTF16FromString(filepath.Join(d.info.HomeDir, id))
if err != nil {
return err
}
// Give system and administrators full control, and VMs read, write, and execute.
// Mark these ACEs as inherited.
sd, err := winio.SddlToSecurityDescriptor("D:(A;OICI;FA;;;SY)(A;OICI;FA;;;BA)(A;OICI;FRFWFX;;;S-1-5-83-0)")
if err != nil {
return err
}
err = syscall.CreateDirectory(&path[0], &syscall.SecurityAttributes{
Length: uint32(unsafe.Sizeof(syscall.SecurityAttributes{})),
SecurityDescriptor: uintptr(unsafe.Pointer(&sd[0])),
})
if err != nil {
return err
}
}
if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil {
return err
}
}
if _, err := os.Lstat(d.dir(parent)); err != nil {
if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil {
logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2)
}
return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err)
}
if err := d.setLayerChain(id, layerChain); err != nil {
if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil {
logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2)
}
return err
}
return nil
}
// dir returns the absolute path to the layer.
func (d *Driver) dir(id string) string {
return filepath.Join(d.info.HomeDir, filepath.Base(id))
}
// Remove unmounts and removes the dir information.
func (d *Driver) Remove(id string) error {
rID, err := d.resolveID(id)
if err != nil {
return err
}
os.RemoveAll(filepath.Join(d.info.HomeDir, "sysfile-backups", rID)) // ok to fail
return hcsshim.DestroyLayer(d.info, rID)
}
// Get returns the rootfs path for the id. This will mount the dir at it's given path.
func (d *Driver) Get(id, mountLabel string) (string, error) {
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
var dir string
rID, err := d.resolveID(id)
if err != nil {
return "", err
}
if count := d.ctr.Increment(rID); count > 1 {
return d.cache[rID], nil
}
// Getting the layer paths must be done outside of the lock.
layerChain, err := d.getLayerChain(rID)
if err != nil {
d.ctr.Decrement(rID)
return "", err
}
if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
d.ctr.Decrement(rID)
return "", err
}
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
d.ctr.Decrement(rID)
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
}
return "", err
}
mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
if err != nil {
d.ctr.Decrement(rID)
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
}
return "", err
}
d.cacheMu.Lock()
d.cache[rID] = mountPath
d.cacheMu.Unlock()
// If the layer has a mount path, use that. Otherwise, use the
// folder path.
if mountPath != "" {
dir = mountPath
} else {
dir = d.dir(id)
}
return dir, nil
}
// Put adds a new layer to the driver.
func (d *Driver) Put(id string) error {
logrus.Debugf("WindowsGraphDriver Put() id %s", id)
rID, err := d.resolveID(id)
if err != nil {
return err
}
if count := d.ctr.Decrement(rID); count > 0 {
return nil
}
d.cacheMu.Lock()
delete(d.cache, rID)
d.cacheMu.Unlock()
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return err
}
return hcsshim.DeactivateLayer(d.info, rID)
}
// Cleanup ensures the information the driver stores is properly removed.
func (d *Driver) Cleanup() error {
return nil
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
// The layer should be mounted when calling this function
func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) {
rID, err := d.resolveID(id)
if err != nil {
return
}
layerChain, err := d.getLayerChain(rID)
if err != nil {
return
}
// this is assuming that the layer is unmounted
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return nil, err
}
prepare := func() {
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
}
}
arch, err := d.exportLayer(rID, layerChain)
if err != nil {
prepare()
return
}
return ioutils.NewReadCloserWrapper(arch, func() error {
err := arch.Close()
prepare()
return err
}), nil
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
// The layer should be mounted when calling this function
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
rID, err := d.resolveID(id)
if err != nil {
return nil, err
}
parentChain, err := d.getLayerChain(rID)
if err != nil {
return nil, err
}
// this is assuming that the layer is unmounted
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return nil, err
}
defer func() {
if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
}
}()
var changes []archive.Change
err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
r, err := hcsshim.NewLayerReader(d.info, id, parentChain)
if err != nil {
return err
}
defer r.Close()
for {
name, _, fileInfo, err := r.Next()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
name = filepath.ToSlash(name)
if fileInfo == nil {
changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete})
} else {
// Currently there is no way to tell between an add and a modify.
changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify})
}
}
})
if err != nil {
return nil, err
}
return changes, nil
}
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The layer should not be mounted when calling this function
func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
var layerChain []string
if parent != "" {
rPId, err := d.resolveID(parent)
if err != nil {
return 0, err
}
parentChain, err := d.getLayerChain(rPId)
if err != nil {
return 0, err
}
parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId)
if err != nil {
return 0, err
}
layerChain = append(layerChain, parentPath)
layerChain = append(layerChain, parentChain...)
}
size, err := d.importLayer(id, diff, layerChain)
if err != nil {
return 0, err
}
if err = d.setLayerChain(id, layerChain); err != nil {
return 0, err
}
return size, nil
}
// DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
rPId, err := d.resolveID(parent)
if err != nil {
return
}
changes, err := d.Changes(id, rPId)
if err != nil {
return
}
layerFs, err := d.Get(id, "")
if err != nil {
return
}
defer d.Put(id)
return archive.ChangesSize(layerFs, changes), nil
}
// GetMetadata returns custom driver information.
func (d *Driver) GetMetadata(id string) (map[string]string, error) {
m := make(map[string]string)
m["dir"] = d.dir(id)
return m, nil
}
func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error {
t := tar.NewWriter(w)
for {
name, size, fileInfo, err := r.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if fileInfo == nil {
// Write a whiteout file.
hdr := &tar.Header{
Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))),
}
err := t.WriteHeader(hdr)
if err != nil {
return err
}
} else {
err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo)
if err != nil {
return err
}
}
}
return t.Close()
}
// exportLayer generates an archive from a layer based on the given ID.
func (d *Driver) exportLayer(id string, parentLayerPaths []string) (archive.Archive, error) {
archive, w := io.Pipe()
go func() {
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths)
if err != nil {
return err
}
err = writeTarFromLayer(r, w)
cerr := r.Close()
if err == nil {
err = cerr
}
return err
})
w.CloseWithError(err)
}()
return archive, nil
}
func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) {
t := tar.NewReader(r)
hdr, err := t.Next()
totalSize := int64(0)
buf := bufio.NewWriter(nil)
for err == nil {
base := path.Base(hdr.Name)
if strings.HasPrefix(base, archive.WhiteoutPrefix) {
name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):])
err = w.Remove(filepath.FromSlash(name))
if err != nil {
return 0, err
}
hdr, err = t.Next()
} else if hdr.Typeflag == tar.TypeLink {
err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname))
if err != nil {
return 0, err
}
hdr, err = t.Next()
} else {
var (
name string
size int64
fileInfo *winio.FileBasicInfo
)
name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr)
if err != nil {
return 0, err
}
err = w.Add(filepath.FromSlash(name), fileInfo)
if err != nil {
return 0, err
}
buf.Reset(w)
// Add the Hyper-V Virtual Machine group ACE to the security descriptor
// for TP5 so that Xenons can access all files. This is not necessary
// for post-TP5 builds.
if isTP5OrOlder() {
if sddl, ok := hdr.Winheaders["sd"]; ok {
var ace string
if hdr.Typeflag == tar.TypeDir {
ace = "(A;OICI;0x1200a9;;;S-1-5-83-0)"
} else {
ace = "(A;;0x1200a9;;;S-1-5-83-0)"
}
if hdr.Winheaders["sd"], ok = addAceToSddlDacl(sddl, ace); !ok {
logrus.Debugf("failed to add VM ACE to %s", sddl)
}
}
}
hdr, err = backuptar.WriteBackupStreamFromTarFile(buf, t, hdr)
ferr := buf.Flush()
if ferr != nil {
err = ferr
}
totalSize += size
}
}
if err != io.EOF {
return 0, err
}
return totalSize, nil
}
func addAceToSddlDacl(sddl, ace string) (string, bool) {
daclStart := strings.Index(sddl, "D:")
if daclStart < 0 {
return sddl, false
}
dacl := sddl[daclStart:]
daclEnd := strings.Index(dacl, "S:")
if daclEnd < 0 {
daclEnd = len(dacl)
}
dacl = dacl[:daclEnd]
if strings.Contains(dacl, ace) {
return sddl, true
}
i := 2
for i+1 < len(dacl) {
if dacl[i] != '(' {
return sddl, false
}
if dacl[i+1] == 'A' {
break
}
i += 2
for p := 1; i < len(dacl) && p > 0; i++ {
if dacl[i] == '(' {
p++
} else if dacl[i] == ')' {
p--
}
}
}
return sddl[:daclStart+i] + ace + sddl[daclStart+i:], true
}
// importLayer adds a new layer to the tag and graph store based on the given data.
func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) {
cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...)
output := bytes.NewBuffer(nil)
cmd.Stdin = layerData
cmd.Stdout = output
cmd.Stderr = output
if err = cmd.Start(); err != nil {
return
}
if err = cmd.Wait(); err != nil {
return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output)
}
return strconv.ParseInt(output.String(), 10, 64)
}
// writeLayer is the re-exec entry point for writing a layer from a tar file
func writeLayer() {
home := os.Args[1]
id := os.Args[2]
parentLayerPaths := os.Args[3:]
err := func() error {
err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})
if err != nil {
return err
}
info := hcsshim.DriverInfo{
Flavour: filterDriver,
HomeDir: home,
}
w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths)
if err != nil {
return err
}
size, err := writeLayerFromTar(os.Stdin, w)
if err != nil {
return err
}
err = w.Close()
if err != nil {
return err
}
fmt.Fprint(os.Stdout, size)
return nil
}()
if err != nil {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
}
// resolveID computes the layerID information based on the given id.
func (d *Driver) resolveID(id string) (string, error) {
content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID"))
if os.IsNotExist(err) {
return id, nil
} else if err != nil {
return "", err
}
return string(content), nil
}
// setID stores the layerId in disk.
func (d *Driver) setID(id, altID string) error {
err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
if err != nil {
return err
}
return nil
}
// getLayerChain returns the layer chain information.
func (d *Driver) getLayerChain(id string) ([]string, error) {
jPath := filepath.Join(d.dir(id), "layerchain.json")
content, err := ioutil.ReadFile(jPath)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("Unable to read layerchain file - %s", err)
}
var layerChain []string
err = json.Unmarshal(content, &layerChain)
if err != nil {
return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err)
}
return layerChain, nil
}
// setLayerChain stores the layer chain information in disk.
func (d *Driver) setLayerChain(id string, chain []string) error {
content, err := json.Marshal(&chain)
if err != nil {
return fmt.Errorf("Failed to marshall layerchain json - %s", err)
}
jPath := filepath.Join(d.dir(id), "layerchain.json")
err = ioutil.WriteFile(jPath, content, 0600)
if err != nil {
return fmt.Errorf("Unable to write layerchain file - %s", err)
}
return nil
}
type fileGetCloserWithBackupPrivileges struct {
path string
}
func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) {
var f *os.File
// Open the file while holding the Windows backup privilege. This ensures that the
// file can be opened even if the caller does not actually have access to it according
// to the security descriptor.
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
path := longpath.AddPrefix(filepath.Join(fg.path, filename))
p, err := syscall.UTF16FromString(path)
if err != nil {
return err
}
h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
if err != nil {
return &os.PathError{Op: "open", Path: path, Err: err}
}
f = os.NewFile(uintptr(h), path)
return nil
})
return f, err
}
func (fg *fileGetCloserWithBackupPrivileges) Close() error {
return nil
}
type fileGetDestroyCloser struct {
storage.FileGetter
path string
}
func (f *fileGetDestroyCloser) Close() error {
// TODO: activate layers and release here?
return os.RemoveAll(f.path)
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
// contains files for the layer differences. Used for direct access for tar-split.
func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
id, err := d.resolveID(id)
if err != nil {
return nil, err
}
return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil
}

405
vendor/github.com/containers/storage/drivers/zfs/zfs.go generated vendored Normal file
View file

@ -0,0 +1,405 @@
// +build linux freebsd solaris
package zfs
import (
"fmt"
"os"
"os/exec"
"path"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
zfs "github.com/mistifyio/go-zfs"
"github.com/opencontainers/runc/libcontainer/label"
)
type zfsOptions struct {
fsName string
mountPath string
}
func init() {
graphdriver.Register("zfs", Init)
}
// Logger returns a zfs logger implementation.
type Logger struct{}
// Log wraps log message from ZFS driver with a prefix '[zfs]'.
func (*Logger) Log(cmd []string) {
logrus.Debugf("[zfs] %s", strings.Join(cmd, " "))
}
// Init returns a new ZFS driver.
// It takes base mount path and an array of options which are represented as key value pairs.
// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options.
func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
var err error
if _, err := exec.LookPath("zfs"); err != nil {
logrus.Debugf("[zfs] zfs command is not available: %v", err)
return nil, graphdriver.ErrPrerequisites
}
file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600)
if err != nil {
logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err)
return nil, graphdriver.ErrPrerequisites
}
defer file.Close()
options, err := parseOptions(opt)
if err != nil {
return nil, err
}
options.mountPath = base
rootdir := path.Dir(base)
if options.fsName == "" {
err = checkRootdirFs(rootdir)
if err != nil {
return nil, err
}
}
if options.fsName == "" {
options.fsName, err = lookupZfsDataset(rootdir)
if err != nil {
return nil, err
}
}
zfs.SetLogger(new(Logger))
filesystems, err := zfs.Filesystems(options.fsName)
if err != nil {
return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err)
}
filesystemsCache := make(map[string]bool, len(filesystems))
var rootDataset *zfs.Dataset
for _, fs := range filesystems {
if fs.Name == options.fsName {
rootDataset = fs
}
filesystemsCache[fs.Name] = true
}
if rootDataset == nil {
return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
}
if err := mount.MakePrivate(base); err != nil {
return nil, err
}
d := &Driver{
dataset: rootDataset,
options: options,
filesystemsCache: filesystemsCache,
uidMaps: uidMaps,
gidMaps: gidMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
}
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
}
func parseOptions(opt []string) (zfsOptions, error) {
var options zfsOptions
options.fsName = ""
for _, option := range opt {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return options, err
}
key = strings.ToLower(key)
switch key {
case "zfs.fsname":
options.fsName = val
default:
return options, fmt.Errorf("Unknown option %s", key)
}
}
return options, nil
}
func lookupZfsDataset(rootdir string) (string, error) {
var stat syscall.Stat_t
if err := syscall.Stat(rootdir, &stat); err != nil {
return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err)
}
wantedDev := stat.Dev
mounts, err := mount.GetMounts()
if err != nil {
return "", err
}
for _, m := range mounts {
if err := syscall.Stat(m.Mountpoint, &stat); err != nil {
logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
continue // may fail on fuse file systems
}
if stat.Dev == wantedDev && m.Fstype == "zfs" {
return m.Source, nil
}
}
return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir)
}
// Driver holds information about the driver, such as zfs dataset, options and cache.
type Driver struct {
dataset *zfs.Dataset
options zfsOptions
sync.Mutex // protects filesystem cache against concurrent access
filesystemsCache map[string]bool
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
}
func (d *Driver) String() string {
return "zfs"
}
// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver.
func (d *Driver) Cleanup() error {
return nil
}
// Status returns information about the ZFS filesystem. It returns a two dimensional array of information
// such as pool name, dataset name, disk usage, parent quota and compression used.
// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent',
// 'Space Available', 'Parent Quota' and 'Compression'.
func (d *Driver) Status() [][2]string {
parts := strings.Split(d.dataset.Name, "/")
pool, err := zfs.GetZpool(parts[0])
var poolName, poolHealth string
if err == nil {
poolName = pool.Name
poolHealth = pool.Health
} else {
poolName = fmt.Sprintf("error while getting pool information %v", err)
poolHealth = "not available"
}
quota := "no"
if d.dataset.Quota != 0 {
quota = strconv.FormatUint(d.dataset.Quota, 10)
}
return [][2]string{
{"Zpool", poolName},
{"Zpool Health", poolHealth},
{"Parent Dataset", d.dataset.Name},
{"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)},
{"Space Available", strconv.FormatUint(d.dataset.Avail, 10)},
{"Parent Quota", quota},
{"Compression", d.dataset.Compression},
}
}
// GetMetadata returns image/container metadata related to graph driver
func (d *Driver) GetMetadata(id string) (map[string]string, error) {
return nil, nil
}
func (d *Driver) cloneFilesystem(name, parentName string) error {
snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond())
parentDataset := zfs.Dataset{Name: parentName}
snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false)
if err != nil {
return err
}
_, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"})
if err == nil {
d.Lock()
d.filesystemsCache[name] = true
d.Unlock()
}
if err != nil {
snapshot.Destroy(zfs.DestroyDeferDeletion)
return err
}
return snapshot.Destroy(zfs.DestroyDeferDeletion)
}
func (d *Driver) zfsPath(id string) string {
return d.options.fsName + "/" + id
}
func (d *Driver) mountPath(id string) string {
return path.Join(d.options.mountPath, "graph", getMountpoint(id))
}
// CreateReadWrite creates a layer that is writable for use as a container
// file system.
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
return d.Create(id, parent, mountLabel, storageOpt)
}
// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent.
func (d *Driver) Create(id string, parent string, mountLabel string, storageOpt map[string]string) error {
err := d.create(id, parent, storageOpt)
if err == nil {
return nil
}
if zfsError, ok := err.(*zfs.Error); ok {
if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") {
return err
}
// aborted build -> cleanup
} else {
return err
}
dataset := zfs.Dataset{Name: d.zfsPath(id)}
if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil {
return err
}
// retry
return d.create(id, parent, storageOpt)
}
func (d *Driver) create(id, parent string, storageOpt map[string]string) error {
name := d.zfsPath(id)
quota, err := parseStorageOpt(storageOpt)
if err != nil {
return err
}
if parent == "" {
mountoptions := map[string]string{"mountpoint": "legacy"}
fs, err := zfs.CreateFilesystem(name, mountoptions)
if err == nil {
err = setQuota(name, quota)
if err == nil {
d.Lock()
d.filesystemsCache[fs.Name] = true
d.Unlock()
}
}
return err
}
err = d.cloneFilesystem(name, d.zfsPath(parent))
if err == nil {
err = setQuota(name, quota)
}
return err
}
func parseStorageOpt(storageOpt map[string]string) (string, error) {
// Read size to change the disk quota per container
for k, v := range storageOpt {
key := strings.ToLower(k)
switch key {
case "size":
return v, nil
default:
return "0", fmt.Errorf("Unknown option %s", key)
}
}
return "0", nil
}
func setQuota(name string, quota string) error {
if quota == "0" {
return nil
}
fs, err := zfs.GetDataset(name)
if err != nil {
return err
}
return fs.SetProperty("quota", quota)
}
// Remove deletes the dataset, filesystem and the cache for the given id.
func (d *Driver) Remove(id string) error {
name := d.zfsPath(id)
dataset := zfs.Dataset{Name: name}
err := dataset.Destroy(zfs.DestroyRecursive)
if err == nil {
d.Lock()
delete(d.filesystemsCache, name)
d.Unlock()
}
return err
}
// Get returns the mountpoint for the given id after creating the target directories if necessary.
func (d *Driver) Get(id, mountLabel string) (string, error) {
mountpoint := d.mountPath(id)
if count := d.ctr.Increment(mountpoint); count > 1 {
return mountpoint, nil
}
filesystem := d.zfsPath(id)
options := label.FormatMountLabel("", mountLabel)
logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
d.ctr.Decrement(mountpoint)
return "", err
}
// Create the target directories if they don't exist
if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
d.ctr.Decrement(mountpoint)
return "", err
}
if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil {
d.ctr.Decrement(mountpoint)
return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
}
// this could be our first mount after creation of the filesystem, and the root dir may still have root
// permissions instead of the remapped root uid:gid (if user namespaces are enabled):
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
mount.Unmount(mountpoint)
d.ctr.Decrement(mountpoint)
return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
}
return mountpoint, nil
}
// Put removes the existing mountpoint for the given id if it exists.
func (d *Driver) Put(id string) error {
mountpoint := d.mountPath(id)
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint)
if err != nil || !mounted {
return err
}
logrus.Debugf(`[zfs] unmount("%s")`, mountpoint)
err = mount.Unmount(mountpoint)
if err != nil {
return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
}
return err
}
// Exists checks to see if the cache entry exists for the given id.
func (d *Driver) Exists(id string) bool {
d.Lock()
defer d.Unlock()
return d.filesystemsCache[d.zfsPath(id)] == true
}

View file

@ -0,0 +1,38 @@
package zfs
import (
"fmt"
"strings"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/containers/storage/drivers"
)
func checkRootdirFs(rootdir string) error {
var buf syscall.Statfs_t
if err := syscall.Statfs(rootdir, &buf); err != nil {
return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
}
// on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ]
if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) {
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
return graphdriver.ErrPrerequisites
}
return nil
}
func getMountpoint(id string) string {
maxlen := 12
// we need to preserve filesystem suffix
suffix := strings.SplitN(id, "-", 2)
if len(suffix) > 1 {
return id[:maxlen] + "-" + suffix[1]
}
return id[:maxlen]
}

Some files were not shown because too many files have changed in this diff Show more