fix chown and seccomp

Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
Jess Frazelle 2018-03-22 09:22:44 -04:00
parent 60f032f6f5
commit 2569457739
8197 changed files with 30742 additions and 1596554 deletions

88
Gopkg.lock generated
View file

@ -26,7 +26,7 @@
branch = "master"
name = "github.com/beorn7/perks"
packages = ["quantile"]
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
branch = "master"
@ -34,10 +34,33 @@
packages = ["."]
revision = "cb7008ab3d8359b78c5f464cb7cf160107ad5925"
[[projects]]
name = "github.com/containerd/containerd"
packages = [
"containers",
"content",
"contrib/seccomp",
"errdefs",
"fs",
"images",
"log",
"mount",
"namespaces",
"oci",
"platforms",
"snapshots",
"sys"
]
revision = "cfd04396dc68220d1cecbe686a6cc3aa5ce3667c"
version = "v1.0.2"
[[projects]]
branch = "master"
name = "github.com/containerd/continuity"
packages = ["pathdriver"]
packages = [
"pathdriver",
"sysx"
]
revision = "d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371"
[[projects]]
@ -48,7 +71,7 @@
"dbus",
"util"
]
revision = "54f92f7696b56003327688a2f18514c6c43e98c7"
revision = "d7b55e18be11bfd0d09b6bd1be308332818b4f46"
[[projects]]
name = "github.com/coreos/pkg"
@ -70,7 +93,7 @@
"cli/config/credentials",
"opts"
]
revision = "c0ffb9491cdffb628e18bb491b566255987fd28d"
revision = "c1b58a90961424e0b97956b93a88303a2790d789"
[[projects]]
branch = "master"
@ -100,7 +123,7 @@
"registry/storage/driver/filesystem",
"uuid"
]
revision = "607ae5d128a82f280e8c7f453d5fb30c535bda17"
revision = "23bef416bdd6dcff90083288c284367cf6af955e"
[[projects]]
branch = "master"
@ -137,14 +160,14 @@
"registry",
"registry/resumable"
]
revision = "0c1006f1abc1af7aa6b9847754370d054dfa6c68"
revision = "00c59ed77e186b6f9056bb8e0a2700767b4be383"
source = "github.com/moby/moby"
[[projects]]
branch = "master"
name = "github.com/docker/docker-ce"
packages = ["components/cli/cli/config"]
revision = "4836f8c6fc8cb4bd4de48f019f772b683b94ed71"
revision = "ea449e9b10cebb259e1a43325587cd9a0e98d0ff"
[[projects]]
name = "github.com/docker/docker-credential-helpers"
@ -201,13 +224,23 @@
[[projects]]
name = "github.com/gogo/protobuf"
packages = ["proto"]
packages = [
"proto",
"sortkeys",
"types"
]
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto"]
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
]
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
@ -279,7 +312,7 @@
"libcontainer/user",
"libcontainer/utils"
]
revision = "69663f0bd4b60df09991c08812a60108003fa340"
revision = "4e3f25b953e3a3eab4a93b576421bad7a1d43af6"
[[projects]]
name = "github.com/opencontainers/runtime-spec"
@ -342,13 +375,13 @@
"nfs",
"xfs"
]
revision = "54d17b57dd7d4a3aa092476596b3f8a933bde349"
revision = "780932d4fbbe0e69b84c34c20f5c8d0981e109ea"
[[projects]]
branch = "master"
name = "github.com/seccomp/libseccomp-golang"
packages = ["."]
revision = "f6ec81daf48e41bf48b475afc7fe06a26bfb72d1"
revision = "e3496e3a417d1dc9ecdceca5af2513271fed37a0"
version = "v0.9.0"
[[projects]]
name = "github.com/sirupsen/logrus"
@ -391,7 +424,7 @@
branch = "master"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
revision = "c3a3ad6d03f7a915c0f7e194b7152974bb73d287"
revision = "80db560fac1fb3e6ac81dbc7f8ae4c061f5257bd"
[[projects]]
branch = "master"
@ -402,6 +435,12 @@
]
revision = "6078986fec03a1dcc236c34816c71b0e05018fda"
[[projects]]
branch = "master"
name = "golang.org/x/sync"
packages = ["errgroup"]
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
@ -409,11 +448,28 @@
"unix",
"windows"
]
revision = "d8e400bc7db4870d786864138af681469693d18c"
revision = "c488ab1dd8481ef762f96a79a9577c27825be697"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "f8c8703595236ae70fdf8789ecb656ea0bcdcf46"
[[projects]]
name = "google.golang.org/grpc"
packages = [
"codes",
"grpclog",
"metadata",
"status"
]
revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655"
version = "v1.10.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "3bfe517dda3080944417768fdbeb5f1286fb1370a35c1e6c1bb9c3e2d45961f9"
inputs-digest = "866a453fb14b8289c10259ac6f2382399814fe5f67382e281e4be842ac83616b"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -45,10 +45,6 @@
name = "github.com/coreos/go-systemd"
branch = "master"
[[constraint]]
name = "github.com/seccomp/libseccomp-golang"
branch = "master"
[[constraint]]
name = "github.com/jteeuwen/go-bindata"
branch = "master"

View file

@ -1,6 +1,7 @@
package container
import (
"github.com/containerd/containerd/contrib/seccomp"
aaprofile "github.com/docker/docker/profiles/apparmor"
"github.com/opencontainers/runc/libcontainer/apparmor"
"github.com/opencontainers/runc/libcontainer/specconv"
@ -42,7 +43,7 @@ func Spec(opts SpecOpts) *specs.Spec {
spec.Hooks = opts.Hooks
// Set the default seccomp profile.
spec.Linux.Seccomp = DefaultSeccompProfile
spec.Linux.Seccomp = seccomp.DefaultProfile(spec)
// Install the default apparmor profile.
if apparmor.IsEnabled() {

View file

@ -1,27 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,344 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tar implements access to tar archives.
// It aims to cover most of the variations, including those produced
// by GNU and BSD tars.
//
// References:
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
package tar
import (
"bytes"
"errors"
"fmt"
"os"
"path"
"time"
)
const (
blockSize = 512
// Types
TypeReg = '0' // regular file
TypeRegA = '\x00' // regular file
TypeLink = '1' // hard link
TypeSymlink = '2' // symbolic link
TypeChar = '3' // character device node
TypeBlock = '4' // block device node
TypeDir = '5' // directory
TypeFifo = '6' // fifo node
TypeCont = '7' // reserved
TypeXHeader = 'x' // extended header
TypeXGlobalHeader = 'g' // global extended header
TypeGNULongName = 'L' // Next file has a long name
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
TypeGNUSparse = 'S' // sparse file
)
// A Header represents a single header in a tar archive.
// Some fields may not be populated.
type Header struct {
Name string // name of header file entry
Mode int64 // permission and mode bits
Uid int // user id of owner
Gid int // group id of owner
Size int64 // length in bytes
ModTime time.Time // modified time
Typeflag byte // type of header entry
Linkname string // target name of link
Uname string // user name of owner
Gname string // group name of owner
Devmajor int64 // major number of character or block device
Devminor int64 // minor number of character or block device
AccessTime time.Time // access time
ChangeTime time.Time // status change time
CreationTime time.Time // creation time
Xattrs map[string]string
Winheaders map[string]string
}
// File name constants from the tar spec.
const (
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
)
// FileInfo returns an os.FileInfo for the Header.
func (h *Header) FileInfo() os.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements os.FileInfo.
type headerFileInfo struct {
h *Header
}
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
func (fi headerFileInfo) Sys() interface{} { return fi.h }
// Name returns the base name of the file.
func (fi headerFileInfo) Name() string {
if fi.IsDir() {
return path.Base(path.Clean(fi.h.Name))
}
return path.Base(fi.h.Name)
}
// Mode returns the permission and mode bits for the headerFileInfo.
func (fi headerFileInfo) Mode() (mode os.FileMode) {
// Set file permission bits.
mode = os.FileMode(fi.h.Mode).Perm()
// Set setuid, setgid and sticky bits.
if fi.h.Mode&c_ISUID != 0 {
// setuid
mode |= os.ModeSetuid
}
if fi.h.Mode&c_ISGID != 0 {
// setgid
mode |= os.ModeSetgid
}
if fi.h.Mode&c_ISVTX != 0 {
// sticky
mode |= os.ModeSticky
}
// Set file mode bits.
// clear perm, setuid, setgid and sticky bits.
m := os.FileMode(fi.h.Mode) &^ 07777
if m == c_ISDIR {
// directory
mode |= os.ModeDir
}
if m == c_ISFIFO {
// named pipe (FIFO)
mode |= os.ModeNamedPipe
}
if m == c_ISLNK {
// symbolic link
mode |= os.ModeSymlink
}
if m == c_ISBLK {
// device file
mode |= os.ModeDevice
}
if m == c_ISCHR {
// Unix character device
mode |= os.ModeDevice
mode |= os.ModeCharDevice
}
if m == c_ISSOCK {
// Unix domain socket
mode |= os.ModeSocket
}
switch fi.h.Typeflag {
case TypeSymlink:
// symbolic link
mode |= os.ModeSymlink
case TypeChar:
// character device node
mode |= os.ModeDevice
mode |= os.ModeCharDevice
case TypeBlock:
// block device node
mode |= os.ModeDevice
case TypeDir:
// directory
mode |= os.ModeDir
case TypeFifo:
// fifo node
mode |= os.ModeNamedPipe
}
return mode
}
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi os.FileInfo, h *Header) error
// Mode constants from the tar spec.
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
// Keywords for the PAX Extended Header
const (
paxAtime = "atime"
paxCharset = "charset"
paxComment = "comment"
paxCtime = "ctime" // please note that ctime is not a valid pax header.
paxCreationTime = "LIBARCHIVE.creationtime"
paxGid = "gid"
paxGname = "gname"
paxLinkpath = "linkpath"
paxMtime = "mtime"
paxPath = "path"
paxSize = "size"
paxUid = "uid"
paxUname = "uname"
paxXattr = "SCHILY.xattr."
paxWindows = "MSWINDOWS."
paxNone = ""
)
// FileInfoHeader creates a partially-populated Header from fi.
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
// Because os.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("tar: FileInfo is nil")
}
fm := fi.Mode()
h := &Header{
Name: fi.Name(),
ModTime: fi.ModTime(),
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
}
switch {
case fm.IsRegular():
h.Mode |= c_ISREG
h.Typeflag = TypeReg
h.Size = fi.Size()
case fi.IsDir():
h.Typeflag = TypeDir
h.Mode |= c_ISDIR
h.Name += "/"
case fm&os.ModeSymlink != 0:
h.Typeflag = TypeSymlink
h.Mode |= c_ISLNK
h.Linkname = link
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
h.Mode |= c_ISCHR
h.Typeflag = TypeChar
} else {
h.Mode |= c_ISBLK
h.Typeflag = TypeBlock
}
case fm&os.ModeNamedPipe != 0:
h.Typeflag = TypeFifo
h.Mode |= c_ISFIFO
case fm&os.ModeSocket != 0:
h.Mode |= c_ISSOCK
default:
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
}
if fm&os.ModeSetuid != 0 {
h.Mode |= c_ISUID
}
if fm&os.ModeSetgid != 0 {
h.Mode |= c_ISGID
}
if fm&os.ModeSticky != 0 {
h.Mode |= c_ISVTX
}
// If possible, populate additional fields from OS-specific
// FileInfo fields.
if sys, ok := fi.Sys().(*Header); ok {
// This FileInfo came from a Header (not the OS). Use the
// original Header to populate all remaining fields.
h.Uid = sys.Uid
h.Gid = sys.Gid
h.Uname = sys.Uname
h.Gname = sys.Gname
h.AccessTime = sys.AccessTime
h.ChangeTime = sys.ChangeTime
if sys.Xattrs != nil {
h.Xattrs = make(map[string]string)
for k, v := range sys.Xattrs {
h.Xattrs[k] = v
}
}
if sys.Typeflag == TypeLink {
// hard link
h.Typeflag = TypeLink
h.Size = 0
h.Linkname = sys.Linkname
}
}
if sysStat != nil {
return h, sysStat(fi, h)
}
return h, nil
}
var zeroBlock = make([]byte, blockSize)
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
// We compute and return both.
func checksum(header []byte) (unsigned int64, signed int64) {
for i := 0; i < len(header); i++ {
if i == 148 {
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
unsigned += ' ' * 8
signed += ' ' * 8
i += 7
continue
}
unsigned += int64(header[i])
signed += int64(int8(header[i]))
}
return
}
type slicer []byte
func (sp *slicer) next(n int) (b []byte) {
s := *sp
b, *sp = s[0:n], s[n:]
return
}
func isASCII(s string) bool {
for _, c := range s {
if c >= 0x80 {
return false
}
}
return true
}
func toASCII(s string) string {
if isASCII(s) {
return s
}
var buf bytes.Buffer
for _, c := range s {
if c < 0x80 {
buf.WriteByte(byte(c))
}
}
return buf.String()
}
// isHeaderOnlyType checks if the given type flag is of the type that has no
// data section even if a size is specified.
func isHeaderOnlyType(flag byte) bool {
switch flag {
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
return true
default:
return false
}
}

View file

@ -1,80 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar_test
import (
"archive/tar"
"bytes"
"fmt"
"io"
"log"
"os"
)
func Example() {
// Create a buffer to write our archive to.
buf := new(bytes.Buffer)
// Create a new tar archive.
tw := tar.NewWriter(buf)
// Add some files to the archive.
var files = []struct {
Name, Body string
}{
{"readme.txt", "This archive contains some text files."},
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
{"todo.txt", "Get animal handling license."},
}
for _, file := range files {
hdr := &tar.Header{
Name: file.Name,
Mode: 0600,
Size: int64(len(file.Body)),
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatalln(err)
}
if _, err := tw.Write([]byte(file.Body)); err != nil {
log.Fatalln(err)
}
}
// Make sure to check the error on Close.
if err := tw.Close(); err != nil {
log.Fatalln(err)
}
// Open the tar archive for reading.
r := bytes.NewReader(buf.Bytes())
tr := tar.NewReader(r)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
log.Fatalln(err)
}
fmt.Printf("Contents of %s:\n", hdr.Name)
if _, err := io.Copy(os.Stdout, tr); err != nil {
log.Fatalln(err)
}
fmt.Println()
}
// Output:
// Contents of readme.txt:
// This archive contains some text files.
// Contents of gopher.txt:
// Gopher names:
// George
// Geoffrey
// Gonzo
// Contents of todo.txt:
// Get animal handling license.
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,20 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux dragonfly openbsd solaris
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atim.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctim.Unix())
}

View file

@ -1,20 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd netbsd
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atimespec.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctimespec.Unix())
}

View file

@ -1,32 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
package tar
import (
"os"
"syscall"
)
func init() {
sysStat = statUnix
}
func statUnix(fi os.FileInfo, h *Header) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
h.Uid = int(sys.Uid)
h.Gid = int(sys.Gid)
// TODO(bradfitz): populate username & group. os/user
// doesn't cache LookupId lookups, and lacks group
// lookup functions.
h.AccessTime = statAtime(sys)
h.ChangeTime = statCtime(sys)
// TODO(bradfitz): major/minor device numbers?
return nil
}

View file

@ -1,325 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes"
"io/ioutil"
"os"
"path"
"reflect"
"strings"
"testing"
"time"
)
func TestFileInfoHeader(t *testing.T) {
fi, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
h, err := FileInfoHeader(fi, "")
if err != nil {
t.Fatalf("FileInfoHeader: %v", err)
}
if g, e := h.Name, "small.txt"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
t.Errorf("Mode = %#o; want %#o", g, e)
}
if g, e := h.Size, int64(5); g != e {
t.Errorf("Size = %v; want %v", g, e)
}
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
t.Errorf("ModTime = %v; want %v", g, e)
}
// FileInfoHeader should error when passing nil FileInfo
if _, err := FileInfoHeader(nil, ""); err == nil {
t.Fatalf("Expected error when passing nil to FileInfoHeader")
}
}
func TestFileInfoHeaderDir(t *testing.T) {
fi, err := os.Stat("testdata")
if err != nil {
t.Fatal(err)
}
h, err := FileInfoHeader(fi, "")
if err != nil {
t.Fatalf("FileInfoHeader: %v", err)
}
if g, e := h.Name, "testdata/"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
// Ignoring c_ISGID for golang.org/issue/4867
if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
t.Errorf("Mode = %#o; want %#o", g, e)
}
if g, e := h.Size, int64(0); g != e {
t.Errorf("Size = %v; want %v", g, e)
}
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
t.Errorf("ModTime = %v; want %v", g, e)
}
}
func TestFileInfoHeaderSymlink(t *testing.T) {
h, err := FileInfoHeader(symlink{}, "some-target")
if err != nil {
t.Fatal(err)
}
if g, e := h.Name, "some-symlink"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
if g, e := h.Linkname, "some-target"; g != e {
t.Errorf("Linkname = %q; want %q", g, e)
}
}
type symlink struct{}
func (symlink) Name() string { return "some-symlink" }
func (symlink) Size() int64 { return 0 }
func (symlink) Mode() os.FileMode { return os.ModeSymlink }
func (symlink) ModTime() time.Time { return time.Time{} }
func (symlink) IsDir() bool { return false }
func (symlink) Sys() interface{} { return nil }
func TestRoundTrip(t *testing.T) {
data := []byte("some file contents")
var b bytes.Buffer
tw := NewWriter(&b)
hdr := &Header{
Name: "file.txt",
Uid: 1 << 21, // too big for 8 octal digits
Size: int64(len(data)),
ModTime: time.Now(),
}
// tar only supports second precision.
hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("tw.WriteHeader: %v", err)
}
if _, err := tw.Write(data); err != nil {
t.Fatalf("tw.Write: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatalf("tw.Close: %v", err)
}
// Read it back.
tr := NewReader(&b)
rHdr, err := tr.Next()
if err != nil {
t.Fatalf("tr.Next: %v", err)
}
if !reflect.DeepEqual(rHdr, hdr) {
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
}
rData, err := ioutil.ReadAll(tr)
if err != nil {
t.Fatalf("Read: %v", err)
}
if !bytes.Equal(rData, data) {
t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
}
}
type headerRoundTripTest struct {
h *Header
fm os.FileMode
}
func TestHeaderRoundTrip(t *testing.T) {
golden := []headerRoundTripTest{
// regular file.
{
h: &Header{
Name: "test.txt",
Mode: 0644 | c_ISREG,
Size: 12,
ModTime: time.Unix(1360600916, 0),
Typeflag: TypeReg,
},
fm: 0644,
},
// symbolic link.
{
h: &Header{
Name: "link.txt",
Mode: 0777 | c_ISLNK,
Size: 0,
ModTime: time.Unix(1360600852, 0),
Typeflag: TypeSymlink,
},
fm: 0777 | os.ModeSymlink,
},
// character device node.
{
h: &Header{
Name: "dev/null",
Mode: 0666 | c_ISCHR,
Size: 0,
ModTime: time.Unix(1360578951, 0),
Typeflag: TypeChar,
},
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
},
// block device node.
{
h: &Header{
Name: "dev/sda",
Mode: 0660 | c_ISBLK,
Size: 0,
ModTime: time.Unix(1360578954, 0),
Typeflag: TypeBlock,
},
fm: 0660 | os.ModeDevice,
},
// directory.
{
h: &Header{
Name: "dir/",
Mode: 0755 | c_ISDIR,
Size: 0,
ModTime: time.Unix(1360601116, 0),
Typeflag: TypeDir,
},
fm: 0755 | os.ModeDir,
},
// fifo node.
{
h: &Header{
Name: "dev/initctl",
Mode: 0600 | c_ISFIFO,
Size: 0,
ModTime: time.Unix(1360578949, 0),
Typeflag: TypeFifo,
},
fm: 0600 | os.ModeNamedPipe,
},
// setuid.
{
h: &Header{
Name: "bin/su",
Mode: 0755 | c_ISREG | c_ISUID,
Size: 23232,
ModTime: time.Unix(1355405093, 0),
Typeflag: TypeReg,
},
fm: 0755 | os.ModeSetuid,
},
// setguid.
{
h: &Header{
Name: "group.txt",
Mode: 0750 | c_ISREG | c_ISGID,
Size: 0,
ModTime: time.Unix(1360602346, 0),
Typeflag: TypeReg,
},
fm: 0750 | os.ModeSetgid,
},
// sticky.
{
h: &Header{
Name: "sticky.txt",
Mode: 0600 | c_ISREG | c_ISVTX,
Size: 7,
ModTime: time.Unix(1360602540, 0),
Typeflag: TypeReg,
},
fm: 0600 | os.ModeSticky,
},
// hard link.
{
h: &Header{
Name: "hard.txt",
Mode: 0644 | c_ISREG,
Size: 0,
Linkname: "file.txt",
ModTime: time.Unix(1360600916, 0),
Typeflag: TypeLink,
},
fm: 0644,
},
// More information.
{
h: &Header{
Name: "info.txt",
Mode: 0600 | c_ISREG,
Size: 0,
Uid: 1000,
Gid: 1000,
ModTime: time.Unix(1360602540, 0),
Uname: "slartibartfast",
Gname: "users",
Typeflag: TypeReg,
},
fm: 0600,
},
}
for i, g := range golden {
fi := g.h.FileInfo()
h2, err := FileInfoHeader(fi, "")
if err != nil {
t.Error(err)
continue
}
if strings.Contains(fi.Name(), "/") {
t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
}
name := path.Base(g.h.Name)
if fi.IsDir() {
name += "/"
}
if got, want := h2.Name, name; got != want {
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
}
if got, want := h2.Size, g.h.Size; got != want {
t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
}
if got, want := h2.Uid, g.h.Uid; got != want {
t.Errorf("i=%d: Uid: got %d, want %d", i, got, want)
}
if got, want := h2.Gid, g.h.Gid; got != want {
t.Errorf("i=%d: Gid: got %d, want %d", i, got, want)
}
if got, want := h2.Uname, g.h.Uname; got != want {
t.Errorf("i=%d: Uname: got %q, want %q", i, got, want)
}
if got, want := h2.Gname, g.h.Gname; got != want {
t.Errorf("i=%d: Gname: got %q, want %q", i, got, want)
}
if got, want := h2.Linkname, g.h.Linkname; got != want {
t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want)
}
if got, want := h2.Typeflag, g.h.Typeflag; got != want {
t.Logf("%#v %#v", g.h, fi.Sys())
t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want)
}
if got, want := h2.Mode, g.h.Mode; got != want {
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
}
if got, want := fi.Mode(), g.fm; got != want {
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
}
if got, want := h2.AccessTime, g.h.AccessTime; got != want {
t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want)
}
if got, want := h2.ChangeTime, g.h.ChangeTime; got != want {
t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want)
}
if got, want := h2.ModTime, g.h.ModTime; got != want {
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
}
if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
t.Errorf("i=%d: Sys didn't return original *Header", i)
}
}
}

View file

@ -1 +0,0 @@
Kilts

View file

@ -1 +0,0 @@
Google.com

View file

@ -1,444 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
// TODO(dsymonds):
// - catch more errors (no first header, etc.)
import (
"bytes"
"errors"
"fmt"
"io"
"path"
"sort"
"strconv"
"strings"
"time"
)
var (
ErrWriteTooLong = errors.New("archive/tar: write too long")
ErrFieldTooLong = errors.New("archive/tar: header field too long")
ErrWriteAfterClose = errors.New("archive/tar: write after close")
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
)
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
// A tar archive consists of a sequence of files.
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
// writing at most hdr.Size bytes in total.
type Writer struct {
w io.Writer
err error
nb int64 // number of unwritten bytes for current file entry
pad int64 // amount of padding to write after current file entry
closed bool
usedBinary bool // whether the binary numeric field extension was used
preferPax bool // use pax header instead of binary numeric header
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
}
type formatter struct {
err error // Last error seen
}
// NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} }
// Flush finishes writing the current file (optional).
func (tw *Writer) Flush() error {
if tw.nb > 0 {
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
return tw.err
}
n := tw.nb + tw.pad
for n > 0 && tw.err == nil {
nr := n
if nr > blockSize {
nr = blockSize
}
var nw int
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
n -= int64(nw)
}
tw.nb = 0
tw.pad = 0
return tw.err
}
// Write s into b, terminating it with a NUL if there is room.
func (f *formatter) formatString(b []byte, s string) {
if len(s) > len(b) {
f.err = ErrFieldTooLong
return
}
ascii := toASCII(s)
copy(b, ascii)
if len(ascii) < len(b) {
b[len(ascii)] = 0
}
}
// Encode x as an octal ASCII string and write it into b with leading zeros.
func (f *formatter) formatOctal(b []byte, x int64) {
s := strconv.FormatInt(x, 8)
// leading zeros, but leave room for a NUL.
for len(s)+1 < len(b) {
s = "0" + s
}
f.formatString(b, s)
}
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
// encoding. Unlike octal encoding, base-256 encoding does not require that the
// string ends with a NUL character. Thus, all n bytes are available for output.
//
// If operating in binary mode, this assumes strict GNU binary mode; which means
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
// equivalent to the sign bit in two's complement form.
func fitsInBase256(n int, x int64) bool {
var binBits = uint(n-1) * 8
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
}
// Write x into b, as binary (GNUtar/star extension).
func (f *formatter) formatNumeric(b []byte, x int64) {
if fitsInBase256(len(b), x) {
for i := len(b) - 1; i >= 0; i-- {
b[i] = byte(x)
x >>= 8
}
b[0] |= 0x80 // Highest bit indicates binary format
return
}
f.formatOctal(b, 0) // Last resort, just write zero
f.err = ErrFieldTooLong
}
var (
minTime = time.Unix(0, 0)
// There is room for 11 octal digits (33 bits) of mtime.
maxTime = minTime.Add((1<<33 - 1) * time.Second)
)
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
func (tw *Writer) WriteHeader(hdr *Header) error {
return tw.writeHeader(hdr, true)
}
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
// As this method is called internally by writePax header to allow it to
// suppress writing the pax header.
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
if tw.closed {
return ErrWriteAfterClose
}
if tw.err == nil {
tw.Flush()
}
if tw.err != nil {
return tw.err
}
// a map to hold pax header records, if any are needed
paxHeaders := make(map[string]string)
// TODO(shanemhansen): we might want to use PAX headers for
// subsecond time resolution, but for now let's just capture
// too long fields or non ascii characters
var f formatter
var header []byte
// We need to select which scratch buffer to use carefully,
// since this method is called recursively to write PAX headers.
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
// already being used by the non-recursive call, so we must use paxHdrBuff.
header = tw.hdrBuff[:]
if !allowPax {
header = tw.paxHdrBuff[:]
}
copy(header, zeroBlock)
s := slicer(header)
// Wrappers around formatter that automatically sets paxHeaders if the
// argument extends beyond the capacity of the input byte slice.
var formatString = func(b []byte, s string, paxKeyword string) {
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
if needsPaxHeader {
paxHeaders[paxKeyword] = s
return
}
f.formatString(b, s)
}
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
// Try octal first.
s := strconv.FormatInt(x, 8)
if len(s) < len(b) {
f.formatOctal(b, x)
return
}
// If it is too long for octal, and PAX is preferred, use a PAX header.
if paxKeyword != paxNone && tw.preferPax {
f.formatOctal(b, 0)
s := strconv.FormatInt(x, 10)
paxHeaders[paxKeyword] = s
return
}
tw.usedBinary = true
f.formatNumeric(b, x)
}
var formatTime = func(b []byte, t time.Time, paxKeyword string) {
var unixTime int64
if !t.Before(minTime) && !t.After(maxTime) {
unixTime = t.Unix()
}
formatNumeric(b, unixTime, paxNone)
// Write a PAX header if the time didn't fit precisely.
if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) {
paxHeaders[paxKeyword] = formatPAXTime(t)
}
}
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
pathHeaderBytes := s.next(fileNameSize)
formatString(pathHeaderBytes, hdr.Name, paxPath)
f.formatOctal(s.next(8), hdr.Mode) // 100:108
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148
s.next(8) // chksum (148:156)
s.next(1)[0] = hdr.Typeflag // 156:157
formatString(s.next(100), hdr.Linkname, paxLinkpath)
copy(s.next(8), []byte("ustar\x0000")) // 257:265
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
prefixHeaderBytes := s.next(155)
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
if tw.usedBinary {
copy(header[257:265], []byte("ustar \x00"))
}
_, paxPathUsed := paxHeaders[paxPath]
// try to use a ustar header when only the name is too long
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
prefix, suffix, ok := splitUSTARPath(hdr.Name)
if ok {
// Since we can encode in USTAR format, disable PAX header.
delete(paxHeaders, paxPath)
// Update the path fields
formatString(pathHeaderBytes, suffix, paxNone)
formatString(prefixHeaderBytes, prefix, paxNone)
}
}
// The chksum field is terminated by a NUL and a space.
// This is different from the other octal fields.
chksum, _ := checksum(header)
f.formatOctal(header[148:155], chksum) // Never fails
header[155] = ' '
// Check if there were any formatting errors.
if f.err != nil {
tw.err = f.err
return tw.err
}
if allowPax {
if !hdr.AccessTime.IsZero() {
paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime)
}
if !hdr.ChangeTime.IsZero() {
paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime)
}
if !hdr.CreationTime.IsZero() {
paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime)
}
for k, v := range hdr.Xattrs {
paxHeaders[paxXattr+k] = v
}
for k, v := range hdr.Winheaders {
paxHeaders[paxWindows+k] = v
}
}
if len(paxHeaders) > 0 {
if !allowPax {
return errInvalidHeader
}
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
return err
}
}
tw.nb = int64(hdr.Size)
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
_, tw.err = tw.w.Write(header)
return tw.err
}
func formatPAXTime(t time.Time) string {
sec := t.Unix()
usec := t.Nanosecond()
s := strconv.FormatInt(sec, 10)
if usec != 0 {
s = fmt.Sprintf("%s.%09d", s, usec)
}
return s
}
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
// If the path is not splittable, then it will return ("", "", false).
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
length := len(name)
if length <= fileNameSize || !isASCII(name) {
return "", "", false
} else if length > fileNamePrefixSize+1 {
length = fileNamePrefixSize + 1
} else if name[length-1] == '/' {
length--
}
i := strings.LastIndex(name[:length], "/")
nlen := len(name) - i - 1 // nlen is length of suffix
plen := i // plen is length of prefix
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
return "", "", false
}
return name[:i], name[i+1:], true
}
// writePaxHeader writes an extended pax header to the
// archive.
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
// Prepare extended header
ext := new(Header)
ext.Typeflag = TypeXHeader
// Setting ModTime is required for reader parsing to
// succeed, and seems harmless enough.
ext.ModTime = hdr.ModTime
// The spec asks that we namespace our pseudo files
// with the current pid. However, this results in differing outputs
// for identical inputs. As such, the constant 0 is now used instead.
// golang.org/issue/12358
dir, file := path.Split(hdr.Name)
fullName := path.Join(dir, "PaxHeaders.0", file)
ascii := toASCII(fullName)
if len(ascii) > 100 {
ascii = ascii[:100]
}
ext.Name = ascii
// Construct the body
var buf bytes.Buffer
// Keys are sorted before writing to body to allow deterministic output.
var keys []string
for k := range paxHeaders {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
}
ext.Size = int64(len(buf.Bytes()))
if err := tw.writeHeader(ext, false); err != nil {
return err
}
if _, err := tw.Write(buf.Bytes()); err != nil {
return err
}
if err := tw.Flush(); err != nil {
return err
}
return nil
}
// formatPAXRecord formats a single PAX record, prefixing it with the
// appropriate length.
func formatPAXRecord(k, v string) string {
const padding = 3 // Extra padding for ' ', '=', and '\n'
size := len(k) + len(v) + padding
size += len(strconv.Itoa(size))
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
// Final adjustment if adding size field increased the record size.
if len(record) != size {
size = len(record)
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
}
return record
}
// Write writes to the current entry in the tar archive.
// Write returns the error ErrWriteTooLong if more than
// hdr.Size bytes are written after WriteHeader.
func (tw *Writer) Write(b []byte) (n int, err error) {
if tw.closed {
err = ErrWriteAfterClose
return
}
overwrite := false
if int64(len(b)) > tw.nb {
b = b[0:tw.nb]
overwrite = true
}
n, err = tw.w.Write(b)
tw.nb -= int64(n)
if err == nil && overwrite {
err = ErrWriteTooLong
return
}
tw.err = err
return
}
// Close closes the tar archive, flushing any unwritten
// data to the underlying writer.
func (tw *Writer) Close() error {
if tw.err != nil || tw.closed {
return tw.err
}
tw.Flush()
tw.closed = true
if tw.err != nil {
return tw.err
}
// trailer: two zero blocks
for i := 0; i < 2; i++ {
_, tw.err = tw.w.Write(zeroBlock)
if tw.err != nil {
break
}
}
return tw.err
}

View file

@ -1,739 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"reflect"
"sort"
"strings"
"testing"
"testing/iotest"
"time"
)
type writerTestEntry struct {
header *Header
contents string
}
type writerTest struct {
file string // filename of expected output
entries []*writerTestEntry
}
var writerTests = []*writerTest{
// The writer test file was produced with this command:
// tar (GNU tar) 1.26
// ln -s small.txt link.txt
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
{
file: "testdata/writer.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "small.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 5,
ModTime: time.Unix(1246508266, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Kilts",
},
{
header: &Header{
Name: "small2.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 11,
ModTime: time.Unix(1245217492, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Google.com\n",
},
{
header: &Header{
Name: "link.txt",
Mode: 0777,
Uid: 1000,
Gid: 1000,
Size: 0,
ModTime: time.Unix(1314603082, 0),
Typeflag: '2',
Linkname: "small.txt",
Uname: "strings",
Gname: "strings",
},
// no contents
},
},
},
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
{
file: "testdata/writer-big.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "tmp/16gig.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 16 << 30,
ModTime: time.Unix(1254699560, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
},
},
},
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
// tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
{
file: "testdata/writer-big-long.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: strings.Repeat("longname/", 15) + "16gig.txt",
Mode: 0644,
Uid: 1000,
Gid: 1000,
Size: 16 << 30,
ModTime: time.Unix(1399583047, 0),
Typeflag: '0',
Uname: "guillaume",
Gname: "guillaume",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
},
},
},
// This file was produced using gnu tar 1.17
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
{
file: "testdata/ustar.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: strings.Repeat("longname/", 15) + "file.txt",
Mode: 0644,
Uid: 0765,
Gid: 024,
Size: 06,
ModTime: time.Unix(1360135598, 0),
Typeflag: '0',
Uname: "shane",
Gname: "staff",
},
contents: "hello\n",
},
},
},
// This file was produced using gnu tar 1.26
// echo "Slartibartfast" > file.txt
// ln file.txt hard.txt
// tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
{
file: "testdata/hardlink.tar",
entries: []*writerTestEntry{
{
header: &Header{
Name: "file.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 15,
ModTime: time.Unix(1425484303, 0),
Typeflag: '0',
Uname: "vbatts",
Gname: "users",
},
contents: "Slartibartfast\n",
},
{
header: &Header{
Name: "hard.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 0,
ModTime: time.Unix(1425484303, 0),
Typeflag: '1',
Linkname: "file.txt",
Uname: "vbatts",
Gname: "users",
},
// no contents
},
},
},
}
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
func bytestr(offset int, b []byte) string {
const rowLen = 32
s := fmt.Sprintf("%04x ", offset)
for _, ch := range b {
switch {
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
s += fmt.Sprintf(" %c", ch)
default:
s += fmt.Sprintf(" %02x", ch)
}
}
return s
}
// Render a pseudo-diff between two blocks of bytes.
func bytediff(a []byte, b []byte) string {
const rowLen = 32
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
na, nb := rowLen, rowLen
if na > len(a) {
na = len(a)
}
if nb > len(b) {
nb = len(b)
}
sa := bytestr(offset, a[0:na])
sb := bytestr(offset, b[0:nb])
if sa != sb {
s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
}
a = a[na:]
b = b[nb:]
}
return s
}
func TestWriter(t *testing.T) {
testLoop:
for i, test := range writerTests {
expected, err := ioutil.ReadFile(test.file)
if err != nil {
t.Errorf("test %d: Unexpected error: %v", i, err)
continue
}
buf := new(bytes.Buffer)
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
big := false
for j, entry := range test.entries {
big = big || entry.header.Size > 1<<10
if err := tw.WriteHeader(entry.header); err != nil {
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
continue testLoop
}
if _, err := io.WriteString(tw, entry.contents); err != nil {
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
continue testLoop
}
}
// Only interested in Close failures for the small tests.
if err := tw.Close(); err != nil && !big {
t.Errorf("test %d: Failed closing archive: %v", i, err)
continue testLoop
}
actual := buf.Bytes()
if !bytes.Equal(expected, actual) {
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
i, bytediff(expected, actual))
}
if testing.Short() { // The second test is expensive.
break
}
}
}
func TestPax(t *testing.T) {
// Create an archive with a large name
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
// Force a PAX long name to be written
longName := strings.Repeat("ab", 100)
contents := strings.Repeat(" ", int(hdr.Size))
hdr.Name = longName
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != longName {
t.Fatal("Couldn't recover long file name")
}
}
func TestPaxSymlink(t *testing.T) {
// Create an archive with a large linkname
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeSymlink
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// Force a PAX long linkname to be written
longLinkname := strings.Repeat("1234567890/1234567890", 10)
hdr.Linkname = longLinkname
hdr.Size = 0
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Linkname != longLinkname {
t.Fatal("Couldn't recover long link name")
}
}
func TestPaxNonAscii(t *testing.T) {
// Create an archive with non ascii. These should trigger a pax header
// because pax headers have a defined utf-8 encoding.
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// some sample data
chineseFilename := "文件名"
chineseGroupname := "組"
chineseUsername := "用戶名"
hdr.Name = chineseFilename
hdr.Gname = chineseGroupname
hdr.Uname = chineseUsername
contents := strings.Repeat(" ", int(hdr.Size))
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != chineseFilename {
t.Fatal("Couldn't recover unicode name")
}
if hdr.Gname != chineseGroupname {
t.Fatal("Couldn't recover unicode group")
}
if hdr.Uname != chineseUsername {
t.Fatal("Couldn't recover unicode user")
}
}
func TestPaxXattrs(t *testing.T) {
xattrs := map[string]string{
"user.key": "value",
}
// Create an archive with an xattr
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
contents := "Kilts"
hdr.Xattrs = xattrs
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Test that we can get the xattrs back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
hdr.Xattrs, xattrs)
}
}
func TestPaxHeadersSorted(t *testing.T) {
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
contents := strings.Repeat(" ", int(hdr.Size))
hdr.Xattrs = map[string]string{
"foo": "foo",
"bar": "bar",
"baz": "baz",
"qux": "qux",
}
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// xattr bar should always appear before others
indices := []int{
bytes.Index(buf.Bytes(), []byte("bar=bar")),
bytes.Index(buf.Bytes(), []byte("baz=baz")),
bytes.Index(buf.Bytes(), []byte("foo=foo")),
bytes.Index(buf.Bytes(), []byte("qux=qux")),
}
if !sort.IntsAreSorted(indices) {
t.Fatal("PAX headers are not sorted")
}
}
func TestUSTARLongName(t *testing.T) {
// Create an archive with a path that failed to split with USTAR extension in previous versions.
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeDir
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// Force a PAX long name to be written. The name was taken from a practical example
// that fails and replaced ever char through numbers to anonymize the sample.
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
hdr.Name = longName
hdr.Size = 0
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != longName {
t.Fatal("Couldn't recover long name")
}
}
func TestValidTypeflagWithPAXHeader(t *testing.T) {
var buffer bytes.Buffer
tw := NewWriter(&buffer)
fileName := strings.Repeat("ab", 100)
hdr := &Header{
Name: fileName,
Size: 4,
Typeflag: 0,
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("Failed to write header: %s", err)
}
if _, err := tw.Write([]byte("fooo")); err != nil {
t.Fatalf("Failed to write the file's data: %s", err)
}
tw.Close()
tr := NewReader(&buffer)
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("Failed to read header: %s", err)
}
if header.Typeflag != 0 {
t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
}
}
}
func TestWriteAfterClose(t *testing.T) {
var buffer bytes.Buffer
tw := NewWriter(&buffer)
hdr := &Header{
Name: "small.txt",
Size: 5,
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("Failed to write header: %s", err)
}
tw.Close()
if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose {
t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
}
}
func TestSplitUSTARPath(t *testing.T) {
var sr = strings.Repeat
var vectors = []struct {
input string // Input path
prefix string // Expected output prefix
suffix string // Expected output suffix
ok bool // Split success?
}{
{"", "", "", false},
{"abc", "", "", false},
{"用戶名", "", "", false},
{sr("a", fileNameSize), "", "", false},
{sr("a", fileNameSize) + "/", "", "", false},
{sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true},
{sr("a", fileNamePrefixSize) + "/", "", "", false},
{sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true},
{sr("a", fileNameSize+1), "", "", false},
{sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true},
{sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize),
sr("a", fileNamePrefixSize), sr("b", fileNameSize), true},
{sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false},
{sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true},
}
for _, v := range vectors {
prefix, suffix, ok := splitUSTARPath(v.input)
if prefix != v.prefix || suffix != v.suffix || ok != v.ok {
t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)",
v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok)
}
}
}
func TestFormatPAXRecord(t *testing.T) {
var medName = strings.Repeat("CD", 50)
var longName = strings.Repeat("AB", 100)
var vectors = []struct {
inputKey string
inputVal string
output string
}{
{"k", "v", "6 k=v\n"},
{"path", "/etc/hosts", "19 path=/etc/hosts\n"},
{"path", longName, "210 path=" + longName + "\n"},
{"path", medName, "110 path=" + medName + "\n"},
{"foo", "ba", "9 foo=ba\n"},
{"foo", "bar", "11 foo=bar\n"},
{"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"},
{"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"},
{"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"},
{"\x00hello", "\x00world", "17 \x00hello=\x00world\n"},
}
for _, v := range vectors {
output := formatPAXRecord(v.inputKey, v.inputVal)
if output != v.output {
t.Errorf("formatPAXRecord(%q, %q): got %q, want %q",
v.inputKey, v.inputVal, output, v.output)
}
}
}
func TestFitsInBase256(t *testing.T) {
var vectors = []struct {
input int64
width int
ok bool
}{
{+1, 8, true},
{0, 8, true},
{-1, 8, true},
{1 << 56, 8, false},
{(1 << 56) - 1, 8, true},
{-1 << 56, 8, true},
{(-1 << 56) - 1, 8, false},
{121654, 8, true},
{-9849849, 8, true},
{math.MaxInt64, 9, true},
{0, 9, true},
{math.MinInt64, 9, true},
{math.MaxInt64, 12, true},
{0, 12, true},
{math.MinInt64, 12, true},
}
for _, v := range vectors {
ok := fitsInBase256(v.width, v.input)
if ok != v.ok {
t.Errorf("checkNumeric(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok)
}
}
}
func TestFormatNumeric(t *testing.T) {
var vectors = []struct {
input int64
output string
ok bool
}{
// Test base-256 (binary) encoded values.
{-1, "\xff", true},
{-1, "\xff\xff", true},
{-1, "\xff\xff\xff", true},
{(1 << 0), "0", false},
{(1 << 8) - 1, "\x80\xff", true},
{(1 << 8), "0\x00", false},
{(1 << 16) - 1, "\x80\xff\xff", true},
{(1 << 16), "00\x00", false},
{-1 * (1 << 0), "\xff", true},
{-1*(1<<0) - 1, "0", false},
{-1 * (1 << 8), "\xff\x00", true},
{-1*(1<<8) - 1, "0\x00", false},
{-1 * (1 << 16), "\xff\x00\x00", true},
{-1*(1<<16) - 1, "00\x00", false},
{537795476381659745, "0000000\x00", false},
{537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true},
{-615126028225187231, "0000000\x00", false},
{-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true},
{math.MaxInt64, "0000000\x00", false},
{math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "0000000\x00", false},
{math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
{math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
}
for _, v := range vectors {
var f formatter
output := make([]byte, len(v.output))
f.formatNumeric(output, v.input)
ok := (f.err == nil)
if ok != v.ok {
if v.ok {
t.Errorf("formatNumeric(%d): got formatting failure, want success", v.input)
} else {
t.Errorf("formatNumeric(%d): got formatting success, want failure", v.input)
}
}
if string(output) != v.output {
t.Errorf("formatNumeric(%d): got %q, want %q", v.input, output, v.output)
}
}
}
func TestFormatPAXTime(t *testing.T) {
t1 := time.Date(2000, 1, 1, 11, 0, 0, 0, time.UTC)
t2 := time.Date(2000, 1, 1, 11, 0, 0, 100, time.UTC)
t3 := time.Date(1960, 1, 1, 11, 0, 0, 0, time.UTC)
t4 := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
verify := func(time time.Time, s string) {
p := formatPAXTime(time)
if p != s {
t.Errorf("for %v, expected %s, got %s", time, s, p)
}
}
verify(t1, "946724400")
verify(t2, "946724400.000000100")
verify(t3, "-315579600")
verify(t4, "0")
}

View file

@ -1,4 +0,0 @@
// +build !windows
// This file only exists to allow go get on non-Windows platforms.
package backuptar

View file

@ -1,439 +0,0 @@
// +build windows
package backuptar
import (
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
)
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
const (
hdrFileAttributes = "fileattr"
hdrSecurityDescriptor = "sd"
hdrRawSecurityDescriptor = "rawsd"
hdrMountPoint = "mountpoint"
hdrEaPrefix = "xattr."
)
func writeZeroes(w io.Writer, count int64) error {
buf := make([]byte, 8192)
c := len(buf)
for i := int64(0); i < count; i += int64(c) {
if int64(c) > count-i {
c = int(count - i)
}
_, err := w.Write(buf[:c])
if err != nil {
return err
}
}
return nil
}
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
curOffset := int64(0)
for {
bhdr, err := br.Next()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
return err
}
if bhdr.Id != winio.BackupSparseBlock {
return fmt.Errorf("unexpected stream %d", bhdr.Id)
}
// archive/tar does not support writing sparse files
// so just write zeroes to catch up to the current offset.
err = writeZeroes(t, bhdr.Offset-curOffset)
if bhdr.Size == 0 {
break
}
n, err := io.Copy(t, br)
if err != nil {
return err
}
curOffset = bhdr.Offset + n
}
return nil
}
// BasicInfoHeader creates a tar header from basic file information.
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
hdr := &tar.Header{
Name: filepath.ToSlash(name),
Size: size,
Typeflag: tar.TypeReg,
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()),
Winheaders: make(map[string]string),
}
hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
hdr.Mode |= c_ISDIR
hdr.Size = 0
hdr.Typeflag = tar.TypeDir
}
return hdr
}
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
//
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
//
// The additional Win32 metadata is:
//
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
//
// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
//
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
name = filepath.ToSlash(name)
hdr := BasicInfoHeader(name, size, fileInfo)
// If r can be seeked, then this function is two-pass: pass 1 collects the
// tar header data, and pass 2 copies the data stream. If r cannot be
// seeked, then some header data (in particular EAs) will be silently lost.
var (
restartPos int64
err error
)
sr, readTwice := r.(io.Seeker)
if readTwice {
if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil {
readTwice = false
}
}
br := winio.NewBackupStreamReader(r)
var dataHdr *winio.BackupHeader
for dataHdr == nil {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupData:
hdr.Mode |= c_ISREG
if !readTwice {
dataHdr = bhdr
}
case winio.BackupSecurity:
sd, err := ioutil.ReadAll(br)
if err != nil {
return err
}
hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
case winio.BackupReparseData:
hdr.Mode |= c_ISLNK
hdr.Typeflag = tar.TypeSymlink
reparseBuffer, err := ioutil.ReadAll(br)
rp, err := winio.DecodeReparsePoint(reparseBuffer)
if err != nil {
return err
}
if rp.IsMountPoint {
hdr.Winheaders[hdrMountPoint] = "1"
}
hdr.Linkname = rp.Target
case winio.BackupEaData:
eab, err := ioutil.ReadAll(br)
if err != nil {
return err
}
eas, err := winio.DecodeExtendedAttributes(eab)
if err != nil {
return err
}
for _, ea := range eas {
// Use base64 encoding for the binary value. Note that there
// is no way to encode the EA's flags, since their use doesn't
// make any sense for persisted EAs.
hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
}
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
}
}
err = t.WriteHeader(hdr)
if err != nil {
return err
}
if readTwice {
// Get back to the data stream.
if _, err = sr.Seek(restartPos, io.SeekStart); err != nil {
return err
}
for dataHdr == nil {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if bhdr.Id == winio.BackupData {
dataHdr = bhdr
}
}
}
if dataHdr != nil {
// A data stream was found. Copy the data.
if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
if size != dataHdr.Size {
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
} else {
err = copySparse(t, br)
if err != nil {
return err
}
}
}
// Look for streams after the data stream. The only ones we handle are alternate data streams.
// Other streams may have metadata that could be serialized, but the tar header has already
// been written. In practice, this means that we don't get EA or TXF metadata.
for {
bhdr, err := br.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupAlternateData:
altName := bhdr.Name
if strings.HasSuffix(altName, ":$DATA") {
altName = altName[:len(altName)-len(":$DATA")]
}
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
hdr = &tar.Header{
Name: name + altName,
Mode: hdr.Mode,
Typeflag: tar.TypeReg,
Size: bhdr.Size,
ModTime: hdr.ModTime,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
err = t.WriteHeader(hdr)
if err != nil {
return err
}
_, err = io.Copy(t, br)
if err != nil {
return err
}
} else {
// Unsupported for now, since the size of the alternate stream is not present
// in the backup stream until after the data has been read.
return errors.New("tar of sparse alternate data streams is unsupported")
}
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
// ignore these streams
default:
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
}
}
return nil
}
// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by
// WriteTarFileFromBackupStream.
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
name = hdr.Name
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
size = hdr.Size
}
fileInfo = &winio.FileBasicInfo{
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()),
}
if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
attr, err := strconv.ParseUint(attrStr, 10, 32)
if err != nil {
return "", 0, nil, err
}
fileInfo.FileAttributes = uintptr(attr)
} else {
if hdr.Typeflag == tar.TypeDir {
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
}
}
return
}
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
// tar file that was not processed, or io.EOF is there are no more.
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
bw := winio.NewBackupStreamWriter(w)
var sd []byte
var err error
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
// by this library will have raw binary for the security descriptor.
if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
sd, err = winio.SddlToSecurityDescriptor(sddl)
if err != nil {
return nil, err
}
}
if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok {
sd, err = base64.StdEncoding.DecodeString(sdraw)
if err != nil {
return nil, err
}
}
if len(sd) != 0 {
bhdr := winio.BackupHeader{
Id: winio.BackupSecurity,
Size: int64(len(sd)),
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(sd)
if err != nil {
return nil, err
}
}
var eas []winio.ExtendedAttribute
for k, v := range hdr.Winheaders {
if !strings.HasPrefix(k, hdrEaPrefix) {
continue
}
data, err := base64.StdEncoding.DecodeString(v)
if err != nil {
return nil, err
}
eas = append(eas, winio.ExtendedAttribute{
Name: k[len(hdrEaPrefix):],
Value: data,
})
}
if len(eas) != 0 {
eadata, err := winio.EncodeExtendedAttributes(eas)
if err != nil {
return nil, err
}
bhdr := winio.BackupHeader{
Id: winio.BackupEaData,
Size: int64(len(eadata)),
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(eadata)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeSymlink {
_, isMountPoint := hdr.Winheaders[hdrMountPoint]
rp := winio.ReparsePoint{
Target: filepath.FromSlash(hdr.Linkname),
IsMountPoint: isMountPoint,
}
reparse := winio.EncodeReparsePoint(&rp)
bhdr := winio.BackupHeader{
Id: winio.BackupReparseData,
Size: int64(len(reparse)),
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(reparse)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
bhdr := winio.BackupHeader{
Id: winio.BackupData,
Size: hdr.Size,
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
// Copy all the alternate data streams and return the next non-ADS header.
for {
ahdr, err := t.Next()
if err != nil {
return nil, err
}
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
return ahdr, nil
}
bhdr := winio.BackupHeader{
Id: winio.BackupAlternateData,
Size: ahdr.Size,
Name: ahdr.Name[len(hdr.Name):] + ":$DATA",
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.Copy(bw, t)
if err != nil {
return nil, err
}
}
}

View file

@ -1,84 +0,0 @@
package backuptar
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar"
)
func ensurePresent(t *testing.T, m map[string]string, keys ...string) {
for _, k := range keys {
if _, ok := m[k]; !ok {
t.Error(k, "not present in tar header")
}
}
}
func TestRoundTrip(t *testing.T) {
f, err := ioutil.TempFile("", "tst")
if err != nil {
t.Fatal(err)
}
defer f.Close()
defer os.Remove(f.Name())
if _, err = f.Write([]byte("testing 1 2 3\n")); err != nil {
t.Fatal(err)
}
if _, err = f.Seek(0, 0); err != nil {
t.Fatal(err)
}
fi, err := f.Stat()
if err != nil {
t.Fatal(err)
}
bi, err := winio.GetFileBasicInfo(f)
if err != nil {
t.Fatal(err)
}
br := winio.NewBackupFileReader(f, true)
defer br.Close()
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
err = WriteTarFileFromBackupStream(tw, br, f.Name(), fi.Size(), bi)
if err != nil {
t.Fatal(err)
}
tr := tar.NewReader(&buf)
hdr, err := tr.Next()
if err != nil {
t.Fatal(err)
}
name, size, bi2, err := FileInfoFromHeader(hdr)
if err != nil {
t.Fatal(err)
}
if name != filepath.ToSlash(f.Name()) {
t.Errorf("got name %s, expected %s", name, filepath.ToSlash(f.Name()))
}
if size != fi.Size() {
t.Errorf("got size %d, expected %d", size, fi.Size())
}
if !reflect.DeepEqual(*bi, *bi2) {
t.Errorf("got %#v, expected %#v", *bi, *bi2)
}
ensurePresent(t, hdr.Winheaders, "fileattr", "rawsd")
}

View file

@ -1,901 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Hard-coding unicode mode for VHD library.
// +build ignore
/*
mksyscall_windows generates windows system call bodies
It parses all files specified on command line containing function
prototypes (like syscall_windows.go) and prints system call bodies
to standard output.
The prototypes are marked by lines beginning with "//sys" and read
like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument. This
includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named err.
* If go func name needs to be different from it's winapi dll name,
the winapi name could be specified at the end, after "=" sign, like
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
* Each function that returns err needs to supply a condition, that
return value of winapi will be tested against to detect failure.
This would set err to windows "last-error", otherwise it will be nil.
The value can be provided at end of //sys declaration, like
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
and is [failretval==0] by default.
Usage:
mksyscall_windows [flags] [path ...]
The flags are:
-output
Specify output file name (outputs to console if blank).
-trace
Generate print statement after every syscall.
*/
package main
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"go/format"
"go/parser"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"text/template"
)
var (
filename = flag.String("output", "", "output file name (standard output if omitted)")
printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory")
)
func trim(s string) string {
return strings.Trim(s, " \t")
}
var packageName string
func packagename() string {
return packageName
}
func syscalldot() string {
if packageName == "syscall" {
return ""
}
return "syscall."
}
// Param is function parameter
type Param struct {
Name string
Type string
fn *Fn
tmpVarIdx int
}
// tmpVar returns temp variable name that will be used to represent p during syscall.
func (p *Param) tmpVar() string {
if p.tmpVarIdx < 0 {
p.tmpVarIdx = p.fn.curTmpVarIdx
p.fn.curTmpVarIdx++
}
return fmt.Sprintf("_p%d", p.tmpVarIdx)
}
// BoolTmpVarCode returns source code for bool temp variable.
func (p *Param) BoolTmpVarCode() string {
const code = `var %s uint32
if %s {
%s = 1
} else {
%s = 0
}`
tmp := p.tmpVar()
return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
}
// SliceTmpVarCode returns source code for slice temp variable.
func (p *Param) SliceTmpVarCode() string {
const code = `var %s *%s
if len(%s) > 0 {
%s = &%s[0]
}`
tmp := p.tmpVar()
return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
}
// StringTmpVarCode returns source code for string temp variable.
func (p *Param) StringTmpVarCode() string {
errvar := p.fn.Rets.ErrorVarName()
if errvar == "" {
errvar = "_"
}
tmp := p.tmpVar()
const code = `var %s %s
%s, %s = %s(%s)`
s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
if errvar == "-" {
return s
}
const morecode = `
if %s != nil {
return
}`
return s + fmt.Sprintf(morecode, errvar)
}
// TmpVarCode returns source code for temp variable.
func (p *Param) TmpVarCode() string {
switch {
case p.Type == "bool":
return p.BoolTmpVarCode()
case strings.HasPrefix(p.Type, "[]"):
return p.SliceTmpVarCode()
default:
return ""
}
}
// TmpVarHelperCode returns source code for helper's temp variable.
func (p *Param) TmpVarHelperCode() string {
if p.Type != "string" {
return ""
}
return p.StringTmpVarCode()
}
// SyscallArgList returns source code fragments representing p parameter
// in syscall. Slices are translated into 2 syscall parameters: pointer to
// the first element and length.
func (p *Param) SyscallArgList() []string {
t := p.HelperType()
var s string
switch {
case t[0] == '*':
s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
case t == "bool":
s = p.tmpVar()
case strings.HasPrefix(t, "[]"):
return []string{
fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
fmt.Sprintf("uintptr(len(%s))", p.Name),
}
default:
s = p.Name
}
return []string{fmt.Sprintf("uintptr(%s)", s)}
}
// IsError determines if p parameter is used to return error.
func (p *Param) IsError() bool {
return p.Name == "err" && p.Type == "error"
}
// HelperType returns type of parameter p used in helper function.
func (p *Param) HelperType() string {
if p.Type == "string" {
return p.fn.StrconvType()
}
return p.Type
}
// join concatenates parameters ps into a string with sep separator.
// Each parameter is converted into string by applying fn to it
// before conversion.
func join(ps []*Param, fn func(*Param) string, sep string) string {
if len(ps) == 0 {
return ""
}
a := make([]string, 0)
for _, p := range ps {
a = append(a, fn(p))
}
return strings.Join(a, sep)
}
// Rets describes function return parameters.
type Rets struct {
Name string
Type string
ReturnsError bool
FailCond string
}
// ErrorVarName returns error variable name for r.
func (r *Rets) ErrorVarName() string {
if r.ReturnsError {
return "err"
}
if r.Type == "error" {
return r.Name
}
return ""
}
// ToParams converts r into slice of *Param.
func (r *Rets) ToParams() []*Param {
ps := make([]*Param, 0)
if len(r.Name) > 0 {
ps = append(ps, &Param{Name: r.Name, Type: r.Type})
}
if r.ReturnsError {
ps = append(ps, &Param{Name: "err", Type: "error"})
}
return ps
}
// List returns source code of syscall return parameters.
func (r *Rets) List() string {
s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
if len(s) > 0 {
s = "(" + s + ")"
}
return s
}
// PrintList returns source code of trace printing part correspondent
// to syscall return values.
func (r *Rets) PrintList() string {
return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
}
// SetReturnValuesCode returns source code that accepts syscall return values.
func (r *Rets) SetReturnValuesCode() string {
if r.Name == "" && !r.ReturnsError {
return ""
}
retvar := "r0"
if r.Name == "" {
retvar = "r1"
}
errvar := "_"
if r.ReturnsError {
errvar = "e1"
}
return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
}
func (r *Rets) useLongHandleErrorCode(retvar string) string {
const code = `if %s {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = %sEINVAL
}
}`
cond := retvar + " == 0"
if r.FailCond != "" {
cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
}
return fmt.Sprintf(code, cond, syscalldot())
}
// SetErrorCode returns source code that sets return parameters.
func (r *Rets) SetErrorCode() string {
const code = `if r0 != 0 {
%s = %sErrno(r0)
}`
if r.Name == "" && !r.ReturnsError {
return ""
}
if r.Name == "" {
return r.useLongHandleErrorCode("r1")
}
if r.Type == "error" {
return fmt.Sprintf(code, r.Name, syscalldot())
}
s := ""
switch {
case r.Type[0] == '*':
s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
case r.Type == "bool":
s = fmt.Sprintf("%s = r0 != 0", r.Name)
default:
s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
}
if !r.ReturnsError {
return s
}
return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
}
// Fn describes syscall function.
type Fn struct {
Name string
Params []*Param
Rets *Rets
PrintTrace bool
dllname string
dllfuncname string
src string
// TODO: get rid of this field and just use parameter index instead
curTmpVarIdx int // insure tmp variables have uniq names
}
// extractParams parses s to extract function parameters.
func extractParams(s string, f *Fn) ([]*Param, error) {
s = trim(s)
if s == "" {
return nil, nil
}
a := strings.Split(s, ",")
ps := make([]*Param, len(a))
for i := range ps {
s2 := trim(a[i])
b := strings.Split(s2, " ")
if len(b) != 2 {
b = strings.Split(s2, "\t")
if len(b) != 2 {
return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
}
}
ps[i] = &Param{
Name: trim(b[0]),
Type: trim(b[1]),
fn: f,
tmpVarIdx: -1,
}
}
return ps, nil
}
// extractSection extracts text out of string s starting after start
// and ending just before end. found return value will indicate success,
// and prefix, body and suffix will contain correspondent parts of string s.
func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
s = trim(s)
if strings.HasPrefix(s, string(start)) {
// no prefix
body = s[1:]
} else {
a := strings.SplitN(s, string(start), 2)
if len(a) != 2 {
return "", "", s, false
}
prefix = a[0]
body = a[1]
}
a := strings.SplitN(body, string(end), 2)
if len(a) != 2 {
return "", "", "", false
}
return prefix, a[0], a[1], true
}
// newFn parses string s and return created function Fn.
func newFn(s string) (*Fn, error) {
s = trim(s)
f := &Fn{
Rets: &Rets{},
src: s,
PrintTrace: *printTraceFlag,
}
// function name and args
prefix, body, s, found := extractSection(s, '(', ')')
if !found || prefix == "" {
return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
}
f.Name = prefix
var err error
f.Params, err = extractParams(body, f)
if err != nil {
return nil, err
}
// return values
_, body, s, found = extractSection(s, '(', ')')
if found {
r, err := extractParams(body, f)
if err != nil {
return nil, err
}
switch len(r) {
case 0:
case 1:
if r[0].IsError() {
f.Rets.ReturnsError = true
} else {
f.Rets.Name = r[0].Name
f.Rets.Type = r[0].Type
}
case 2:
if !r[1].IsError() {
return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
}
f.Rets.ReturnsError = true
f.Rets.Name = r[0].Name
f.Rets.Type = r[0].Type
default:
return nil, errors.New("Too many return values in \"" + f.src + "\"")
}
}
// fail condition
_, body, s, found = extractSection(s, '[', ']')
if found {
f.Rets.FailCond = body
}
// dll and dll function names
s = trim(s)
if s == "" {
return f, nil
}
if !strings.HasPrefix(s, "=") {
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
}
s = trim(s[1:])
a := strings.Split(s, ".")
switch len(a) {
case 1:
f.dllfuncname = a[0]
case 2:
f.dllname = a[0]
f.dllfuncname = a[1]
default:
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
}
return f, nil
}
// DLLName returns DLL name for function f.
func (f *Fn) DLLName() string {
if f.dllname == "" {
return "kernel32"
}
return f.dllname
}
// DLLName returns DLL function name for function f.
func (f *Fn) DLLFuncName() string {
if f.dllfuncname == "" {
return f.Name
}
return f.dllfuncname
}
// ParamList returns source code for function f parameters.
func (f *Fn) ParamList() string {
return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
}
// HelperParamList returns source code for helper function f parameters.
func (f *Fn) HelperParamList() string {
return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
}
// ParamPrintList returns source code of trace printing part correspondent
// to syscall input parameters.
func (f *Fn) ParamPrintList() string {
return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
}
// ParamCount return number of syscall parameters for function f.
func (f *Fn) ParamCount() int {
n := 0
for _, p := range f.Params {
n += len(p.SyscallArgList())
}
return n
}
// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
// to use. It returns parameter count for correspondent SyscallX function.
func (f *Fn) SyscallParamCount() int {
n := f.ParamCount()
switch {
case n <= 3:
return 3
case n <= 6:
return 6
case n <= 9:
return 9
case n <= 12:
return 12
case n <= 15:
return 15
default:
panic("too many arguments to system call")
}
}
// Syscall determines which SyscallX function to use for function f.
func (f *Fn) Syscall() string {
c := f.SyscallParamCount()
if c == 3 {
return syscalldot() + "Syscall"
}
return syscalldot() + "Syscall" + strconv.Itoa(c)
}
// SyscallParamList returns source code for SyscallX parameters for function f.
func (f *Fn) SyscallParamList() string {
a := make([]string, 0)
for _, p := range f.Params {
a = append(a, p.SyscallArgList()...)
}
for len(a) < f.SyscallParamCount() {
a = append(a, "0")
}
return strings.Join(a, ", ")
}
// HelperCallParamList returns source code of call into function f helper.
func (f *Fn) HelperCallParamList() string {
a := make([]string, 0, len(f.Params))
for _, p := range f.Params {
s := p.Name
if p.Type == "string" {
s = p.tmpVar()
}
a = append(a, s)
}
return strings.Join(a, ", ")
}
// IsUTF16 is true, if f is W (utf16) function. It is false
// for all A (ascii) functions.
func (f *Fn) IsUTF16() bool {
return true
}
// StrconvFunc returns name of Go string to OS string function for f.
func (f *Fn) StrconvFunc() string {
if f.IsUTF16() {
return syscalldot() + "UTF16PtrFromString"
}
return syscalldot() + "BytePtrFromString"
}
// StrconvType returns Go type name used for OS string for f.
func (f *Fn) StrconvType() string {
if f.IsUTF16() {
return "*uint16"
}
return "*byte"
}
// HasStringParam is true, if f has at least one string parameter.
// Otherwise it is false.
func (f *Fn) HasStringParam() bool {
for _, p := range f.Params {
if p.Type == "string" {
return true
}
}
return false
}
// HelperName returns name of function f helper.
func (f *Fn) HelperName() string {
if !f.HasStringParam() {
return f.Name
}
return "_" + f.Name
}
// Source files and functions.
type Source struct {
Funcs []*Fn
Files []string
StdLibImports []string
ExternalImports []string
}
func (src *Source) Import(pkg string) {
src.StdLibImports = append(src.StdLibImports, pkg)
sort.Strings(src.StdLibImports)
}
func (src *Source) ExternalImport(pkg string) {
src.ExternalImports = append(src.ExternalImports, pkg)
sort.Strings(src.ExternalImports)
}
// ParseFiles parses files listed in fs and extracts all syscall
// functions listed in sys comments. It returns source files
// and functions collection *Source if successful.
func ParseFiles(fs []string) (*Source, error) {
src := &Source{
Funcs: make([]*Fn, 0),
Files: make([]string, 0),
StdLibImports: []string{
"unsafe",
},
ExternalImports: make([]string, 0),
}
for _, file := range fs {
if err := src.ParseFile(file); err != nil {
return nil, err
}
}
return src, nil
}
// DLLs return dll names for a source set src.
func (src *Source) DLLs() []string {
uniq := make(map[string]bool)
r := make([]string, 0)
for _, f := range src.Funcs {
name := f.DLLName()
if _, found := uniq[name]; !found {
uniq[name] = true
r = append(r, name)
}
}
return r
}
// ParseFile adds additional file path to a source set src.
func (src *Source) ParseFile(path string) error {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
s := bufio.NewScanner(file)
for s.Scan() {
t := trim(s.Text())
if len(t) < 7 {
continue
}
if !strings.HasPrefix(t, "//sys") {
continue
}
t = t[5:]
if !(t[0] == ' ' || t[0] == '\t') {
continue
}
f, err := newFn(t[1:])
if err != nil {
return err
}
src.Funcs = append(src.Funcs, f)
}
if err := s.Err(); err != nil {
return err
}
src.Files = append(src.Files, path)
// get package name
fset := token.NewFileSet()
_, err = file.Seek(0, 0)
if err != nil {
return err
}
pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
if err != nil {
return err
}
packageName = pkg.Name.Name
return nil
}
// IsStdRepo returns true if src is part of standard library.
func (src *Source) IsStdRepo() (bool, error) {
if len(src.Files) == 0 {
return false, errors.New("no input files provided")
}
abspath, err := filepath.Abs(src.Files[0])
if err != nil {
return false, err
}
goroot := runtime.GOROOT()
if runtime.GOOS == "windows" {
abspath = strings.ToLower(abspath)
goroot = strings.ToLower(goroot)
}
sep := string(os.PathSeparator)
if !strings.HasSuffix(goroot, sep) {
goroot += sep
}
return strings.HasPrefix(abspath, goroot), nil
}
// Generate output source file from a source set src.
func (src *Source) Generate(w io.Writer) error {
const (
pkgStd = iota // any package in std library
pkgXSysWindows // x/sys/windows package
pkgOther
)
isStdRepo, err := src.IsStdRepo()
if err != nil {
return err
}
var pkgtype int
switch {
case isStdRepo:
pkgtype = pkgStd
case packageName == "windows":
// TODO: this needs better logic than just using package name
pkgtype = pkgXSysWindows
default:
pkgtype = pkgOther
}
if *systemDLL {
switch pkgtype {
case pkgStd:
src.Import("internal/syscall/windows/sysdll")
case pkgXSysWindows:
default:
src.ExternalImport("golang.org/x/sys/windows")
}
}
if packageName != "syscall" {
src.Import("syscall")
}
funcMap := template.FuncMap{
"packagename": packagename,
"syscalldot": syscalldot,
"newlazydll": func(dll string) string {
arg := "\"" + dll + ".dll\""
if !*systemDLL {
return syscalldot() + "NewLazyDLL(" + arg + ")"
}
switch pkgtype {
case pkgStd:
return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))"
case pkgXSysWindows:
return "NewLazySystemDLL(" + arg + ")"
default:
return "windows.NewLazySystemDLL(" + arg + ")"
}
},
}
t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
err = t.Execute(w, src)
if err != nil {
return errors.New("Failed to execute template: " + err.Error())
}
return nil
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
flag.PrintDefaults()
os.Exit(1)
}
func main() {
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
src, err := ParseFiles(flag.Args())
if err != nil {
log.Fatal(err)
}
var buf bytes.Buffer
if err := src.Generate(&buf); err != nil {
log.Fatal(err)
}
data, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
}
if *filename == "" {
_, err = os.Stdout.Write(data)
} else {
err = ioutil.WriteFile(*filename, data, 0644)
}
if err != nil {
log.Fatal(err)
}
}
// TODO: use println instead to print in the following template
const srcTemplate = `
{{define "main"}}// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package {{packagename}}
import (
{{range .StdLibImports}}"{{.}}"
{{end}}
{{range .ExternalImports}}"{{.}}"
{{end}}
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e {{syscalldot}}Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
{{template "dlls" .}}
{{template "funcnames" .}})
{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
{{end}}
{{/* help functions */}}
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
{{end}}{{end}}
{{define "funcnames"}}{{range .Funcs}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}")
{{end}}{{end}}
{{define "helperbody"}}
func {{.Name}}({{.ParamList}}) {{template "results" .}}{
{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
}
{{end}}
{{define "funcbody"}}
func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
{{template "tmpvars" .}} {{template "syscall" .}}
{{template "seterror" .}}{{template "printtrace" .}} return
}
{{end}}
{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
{{end}}{{end}}{{end}}
{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
{{end}}{{end}}{{end}}
{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
{{end}}{{end}}
{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
{{end}}{{end}}
`

View file

@ -1,82 +0,0 @@
// +build windows
package vhd
import "syscall"
//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go
//sys createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.CreateVirtualDisk
type virtualStorageType struct {
DeviceID uint32
VendorID [16]byte
}
const virtualDiskAccessNONE uint32 = 0
const virtualDiskAccessATTACHRO uint32 = 65536
const virtualDiskAccessATTACHRW uint32 = 131072
const virtualDiskAccessDETACH uint32 = 262144
const virtualDiskAccessGETINFO uint32 = 524288
const virtualDiskAccessCREATE uint32 = 1048576
const virtualDiskAccessMETAOPS uint32 = 2097152
const virtualDiskAccessREAD uint32 = 851968
const virtualDiskAccessALL uint32 = 4128768
const virtualDiskAccessWRITABLE uint32 = 3276800
const createVirtualDiskFlagNone uint32 = 0
const createVirtualDiskFlagFullPhysicalAllocation uint32 = 1
const createVirtualDiskFlagPreventWritesToSourceDisk uint32 = 2
const createVirtualDiskFlagDoNotCopyMetadataFromParent uint32 = 4
type version2 struct {
UniqueID [16]byte // GUID
MaximumSize uint64
BlockSizeInBytes uint32
SectorSizeInBytes uint32
ParentPath *uint16 // string
SourcePath *uint16 // string
OpenFlags uint32
ParentVirtualStorageType virtualStorageType
SourceVirtualStorageType virtualStorageType
ResiliencyGUID [16]byte // GUID
}
type createVirtualDiskParameters struct {
Version uint32 // Must always be set to 2
Version2 version2
}
// CreateVhdx will create a simple vhdx file at the given path using default values.
func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
var defaultType virtualStorageType
parameters := createVirtualDiskParameters{
Version: 2,
Version2: version2{
MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024,
BlockSizeInBytes: blockSizeInMb * 1024 * 1024,
},
}
var handle syscall.Handle
if err := createVirtualDisk(
&defaultType,
path,
virtualDiskAccessNONE,
nil,
createVirtualDiskFlagNone,
0,
&parameters,
nil,
&handle); err != nil {
return err
}
if err := syscall.CloseHandle(handle); err != nil {
return err
}
return nil
}

View file

@ -1,64 +0,0 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package vhd
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modVirtDisk = windows.NewLazySystemDLL("VirtDisk.dll")
procCreateVirtualDisk = modVirtDisk.NewProc("CreateVirtualDisk")
)
func createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(path)
if err != nil {
return
}
return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, flags, providerSpecificFlags, parameters, o, handle)
}
func _createVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) {
r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(flags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(handle)))
if r1 != 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}

View file

@ -1,138 +0,0 @@
package wim
import (
"encoding/binary"
"io"
"io/ioutil"
"github.com/Microsoft/go-winio/wim/lzx"
)
const chunkSize = 32768 // Compressed resource chunk size
type compressedReader struct {
r *io.SectionReader
d io.ReadCloser
chunks []int64
curChunk int
originalSize int64
}
func newCompressedReader(r *io.SectionReader, originalSize int64, offset int64) (*compressedReader, error) {
nchunks := (originalSize + chunkSize - 1) / chunkSize
var base int64
chunks := make([]int64, nchunks)
if originalSize <= 0xffffffff {
// 32-bit chunk offsets
base = (nchunks - 1) * 4
chunks32 := make([]uint32, nchunks-1)
err := binary.Read(r, binary.LittleEndian, chunks32)
if err != nil {
return nil, err
}
for i, n := range chunks32 {
chunks[i+1] = int64(n)
}
} else {
// 64-bit chunk offsets
base = (nchunks - 1) * 8
err := binary.Read(r, binary.LittleEndian, chunks[1:])
if err != nil {
return nil, err
}
}
for i, c := range chunks {
chunks[i] = c + base
}
cr := &compressedReader{
r: r,
chunks: chunks,
originalSize: originalSize,
}
err := cr.reset(int(offset / chunkSize))
if err != nil {
return nil, err
}
suboff := offset % chunkSize
if suboff != 0 {
_, err := io.CopyN(ioutil.Discard, cr.d, suboff)
if err != nil {
return nil, err
}
}
return cr, nil
}
func (r *compressedReader) chunkOffset(n int) int64 {
if n == len(r.chunks) {
return r.r.Size()
}
return r.chunks[n]
}
func (r *compressedReader) chunkSize(n int) int {
return int(r.chunkOffset(n+1) - r.chunkOffset(n))
}
func (r *compressedReader) uncompressedSize(n int) int {
if n < len(r.chunks)-1 {
return chunkSize
}
size := int(r.originalSize % chunkSize)
if size == 0 {
size = chunkSize
}
return size
}
func (r *compressedReader) reset(n int) error {
if n >= len(r.chunks) {
return io.EOF
}
if r.d != nil {
r.d.Close()
}
r.curChunk = n
size := r.chunkSize(n)
uncompressedSize := r.uncompressedSize(n)
section := io.NewSectionReader(r.r, r.chunkOffset(n), int64(size))
if size != uncompressedSize {
d, err := lzx.NewReader(section, uncompressedSize)
if err != nil {
return err
}
r.d = d
} else {
r.d = ioutil.NopCloser(section)
}
return nil
}
func (r *compressedReader) Read(b []byte) (int, error) {
for {
n, err := r.d.Read(b)
if err != io.EOF {
return n, err
}
err = r.reset(r.curChunk + 1)
if err != nil {
return n, err
}
}
}
func (r *compressedReader) Close() error {
var err error
if r.d != nil {
err = r.d.Close()
r.d = nil
}
return err
}

View file

@ -1,606 +0,0 @@
// Package lzx implements a decompressor for the the WIM variant of the
// LZX compression algorithm.
//
// The LZX algorithm is an earlier variant of LZX DELTA, which is documented
// at https://msdn.microsoft.com/en-us/library/cc483133(v=exchg.80).aspx.
package lzx
import (
"bytes"
"encoding/binary"
"errors"
"io"
)
const (
maincodecount = 496
maincodesplit = 256
lencodecount = 249
lenshift = 9
codemask = 0x1ff
tablebits = 9
tablesize = 1 << tablebits
maxBlockSize = 32768
windowSize = 32768
maxTreePathLen = 16
e8filesize = 12000000
maxe8offset = 0x3fffffff
verbatimBlock = 1
alignedOffsetBlock = 2
uncompressedBlock = 3
)
var footerBits = [...]byte{
0, 0, 0, 0, 1, 1, 2, 2,
3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10,
11, 11, 12, 12, 13, 13, 14,
}
var basePosition = [...]uint16{
0, 1, 2, 3, 4, 6, 8, 12,
16, 24, 32, 48, 64, 96, 128, 192,
256, 384, 512, 768, 1024, 1536, 2048, 3072,
4096, 6144, 8192, 12288, 16384, 24576, 32768,
}
var (
errCorrupt = errors.New("LZX data corrupt")
)
// Reader is an interface used by the decompressor to access
// the input stream. If the provided io.Reader does not implement
// Reader, then a bufio.Reader is used.
type Reader interface {
io.Reader
io.ByteReader
}
type decompressor struct {
r io.Reader
err error
unaligned bool
nbits byte
c uint32
lru [3]uint16
uncompressed int
windowReader *bytes.Reader
mainlens [maincodecount]byte
lenlens [lencodecount]byte
window [windowSize]byte
b []byte
bv int
bo int
}
//go:noinline
func (f *decompressor) fail(err error) {
if f.err == nil {
f.err = err
}
f.bo = 0
f.bv = 0
}
func (f *decompressor) ensureAtLeast(n int) error {
if f.bv-f.bo >= n {
return nil
}
if f.err != nil {
return f.err
}
if f.bv != f.bo {
copy(f.b[:f.bv-f.bo], f.b[f.bo:f.bv])
}
n, err := io.ReadAtLeast(f.r, f.b[f.bv-f.bo:], n)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
} else {
f.fail(err)
}
return err
}
f.bv = f.bv - f.bo + n
f.bo = 0
return nil
}
// feed retrieves another 16-bit word from the stream and consumes
// it into f.c. It returns false if there are no more bytes available.
// Otherwise, on error, it sets f.err.
func (f *decompressor) feed() bool {
err := f.ensureAtLeast(2)
if err != nil {
if err == io.ErrUnexpectedEOF {
return false
}
}
f.c |= (uint32(f.b[f.bo+1])<<8 | uint32(f.b[f.bo])) << (16 - f.nbits)
f.nbits += 16
f.bo += 2
return true
}
// getBits retrieves the next n bits from the byte stream. n
// must be <= 16. It sets f.err on error.
func (f *decompressor) getBits(n byte) uint16 {
if f.nbits < n {
if !f.feed() {
f.fail(io.ErrUnexpectedEOF)
}
}
c := uint16(f.c >> (32 - n))
f.c <<= n
f.nbits -= n
return c
}
type huffman struct {
extra [][]uint16
maxbits byte
table [tablesize]uint16
}
// buildTable builds a huffman decoding table from a slice of code lengths,
// one per code, in order. Each code length must be <= maxTreePathLen.
// See https://en.wikipedia.org/wiki/Canonical_Huffman_code.
func buildTable(codelens []byte) *huffman {
// Determine the number of codes of each length, and the
// maximum length.
var count [maxTreePathLen + 1]uint
var max byte
for _, cl := range codelens {
count[cl]++
if max < cl {
max = cl
}
}
if max == 0 {
return &huffman{}
}
// Determine the first code of each length.
var first [maxTreePathLen + 1]uint
code := uint(0)
for i := byte(1); i <= max; i++ {
code <<= 1
first[i] = code
code += count[i]
}
if code != 1<<max {
return nil
}
// Build a table for code lookup. For code sizes < max,
// put all possible suffixes for the code into the table, too.
// For max > tablebits, split long codes into additional tables
// of suffixes of max-tablebits length.
h := &huffman{maxbits: max}
if max > tablebits {
core := first[tablebits+1] / 2 // Number of codes that fit without extra tables
nextra := 1<<tablebits - core // Number of extra entries
h.extra = make([][]uint16, nextra)
for code := core; code < 1<<tablebits; code++ {
h.table[code] = uint16(code - core)
h.extra[code-core] = make([]uint16, 1<<(max-tablebits))
}
}
for i, cl := range codelens {
if cl != 0 {
code := first[cl]
first[cl]++
v := uint16(cl)<<lenshift | uint16(i)
if cl <= tablebits {
extendedCode := code << (tablebits - cl)
for j := uint(0); j < 1<<(tablebits-cl); j++ {
h.table[extendedCode+j] = v
}
} else {
prefix := code >> (cl - tablebits)
suffix := code & (1<<(cl-tablebits) - 1)
extendedCode := suffix << (max - cl)
for j := uint(0); j < 1<<(max-cl); j++ {
h.extra[h.table[prefix]][extendedCode+j] = v
}
}
}
}
return h
}
// getCode retrieves the next code using the provided
// huffman tree. It sets f.err on error.
func (f *decompressor) getCode(h *huffman) uint16 {
if h.maxbits > 0 {
if f.nbits < maxTreePathLen {
f.feed()
}
// For codes with length < tablebits, it doesn't matter
// what the remainder of the bits used for table lookup
// are, since entries with all possible suffixes were
// added to the table.
c := h.table[f.c>>(32-tablebits)]
if c >= 1<<lenshift {
// The code is already in c.
} else {
c = h.extra[c][f.c<<tablebits>>(32-(h.maxbits-tablebits))]
}
n := byte(c >> lenshift)
if f.nbits >= n {
// Only consume the length of the code, not the maximum
// code length.
f.c <<= n
f.nbits -= n
return c & codemask
}
f.fail(io.ErrUnexpectedEOF)
return 0
}
// This is an empty tree. It should not be used.
f.fail(errCorrupt)
return 0
}
// readTree updates the huffman tree path lengths in lens by
// reading and decoding lengths from the byte stream. lens
// should be prepopulated with the previous block's tree's path
// lengths. For the first block, lens should be zero.
func (f *decompressor) readTree(lens []byte) error {
// Get the pre-tree for the main tree.
var pretreeLen [20]byte
for i := range pretreeLen {
pretreeLen[i] = byte(f.getBits(4))
}
if f.err != nil {
return f.err
}
h := buildTable(pretreeLen[:])
// The lengths are encoded as a series of huffman codes
// encoded by the pre-tree.
for i := 0; i < len(lens); {
c := byte(f.getCode(h))
if f.err != nil {
return f.err
}
switch {
case c <= 16: // length is delta from previous length
lens[i] = (lens[i] + 17 - c) % 17
i++
case c == 17: // next n + 4 lengths are zero
zeroes := int(f.getBits(4)) + 4
if i+zeroes > len(lens) {
return errCorrupt
}
for j := 0; j < zeroes; j++ {
lens[i+j] = 0
}
i += zeroes
case c == 18: // next n + 20 lengths are zero
zeroes := int(f.getBits(5)) + 20
if i+zeroes > len(lens) {
return errCorrupt
}
for j := 0; j < zeroes; j++ {
lens[i+j] = 0
}
i += zeroes
case c == 19: // next n + 4 lengths all have the same value
same := int(f.getBits(1)) + 4
if i+same > len(lens) {
return errCorrupt
}
c = byte(f.getCode(h))
if c > 16 {
return errCorrupt
}
l := (lens[i] + 17 - c) % 17
for j := 0; j < same; j++ {
lens[i+j] = l
}
i += same
default:
return errCorrupt
}
}
if f.err != nil {
return f.err
}
return nil
}
func (f *decompressor) readBlockHeader() (byte, uint16, error) {
// If the previous block was an unaligned uncompressed block, restore
// 2-byte alignment.
if f.unaligned {
err := f.ensureAtLeast(1)
if err != nil {
return 0, 0, err
}
f.bo++
f.unaligned = false
}
blockType := f.getBits(3)
full := f.getBits(1)
var blockSize uint16
if full != 0 {
blockSize = maxBlockSize
} else {
blockSize = f.getBits(16)
if blockSize > maxBlockSize {
return 0, 0, errCorrupt
}
}
if f.err != nil {
return 0, 0, f.err
}
switch blockType {
case verbatimBlock, alignedOffsetBlock:
// The caller will read the huffman trees.
case uncompressedBlock:
if f.nbits > 16 {
panic("impossible: more than one 16-bit word remains")
}
// Drop the remaining bits in the current 16-bit word
// If there are no bits left, discard a full 16-bit word.
n := f.nbits
if n == 0 {
n = 16
}
f.getBits(n)
// Read the LRU values for the next block.
err := f.ensureAtLeast(12)
if err != nil {
return 0, 0, err
}
f.lru[0] = uint16(binary.LittleEndian.Uint32(f.b[f.bo : f.bo+4]))
f.lru[1] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+4 : f.bo+8]))
f.lru[2] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+8 : f.bo+12]))
f.bo += 12
default:
return 0, 0, errCorrupt
}
return byte(blockType), blockSize, nil
}
// readTrees reads the two or three huffman trees for the current block.
// readAligned specifies whether to read the aligned offset tree.
func (f *decompressor) readTrees(readAligned bool) (main *huffman, length *huffman, aligned *huffman, err error) {
// Aligned offset blocks start with a small aligned offset tree.
if readAligned {
var alignedLen [8]byte
for i := range alignedLen {
alignedLen[i] = byte(f.getBits(3))
}
aligned = buildTable(alignedLen[:])
if aligned == nil {
err = errors.New("corrupt")
return
}
}
// The main tree is encoded in two parts.
err = f.readTree(f.mainlens[:maincodesplit])
if err != nil {
return
}
err = f.readTree(f.mainlens[maincodesplit:])
if err != nil {
return
}
main = buildTable(f.mainlens[:])
if main == nil {
err = errors.New("corrupt")
return
}
// The length tree is encoding in a single part.
err = f.readTree(f.lenlens[:])
if err != nil {
return
}
length = buildTable(f.lenlens[:])
if length == nil {
err = errors.New("corrupt")
return
}
err = f.err
return
}
// readCompressedBlock decodes a compressed block, writing into the window
// starting at start and ending at end, and using the provided huffman trees.
func (f *decompressor) readCompressedBlock(start, end uint16, hmain, hlength, haligned *huffman) (int, error) {
i := start
for i < end {
main := f.getCode(hmain)
if f.err != nil {
break
}
if main < 256 {
// Literal byte.
f.window[i] = byte(main)
i++
continue
}
// This is a match backward in the window. Determine
// the offset and dlength.
matchlen := (main - 256) % 8
slot := (main - 256) / 8
// The length is either the low bits of the code,
// or if this is 7, is encoded with the length tree.
if matchlen == 7 {
matchlen += f.getCode(hlength)
}
matchlen += 2
var matchoffset uint16
if slot < 3 {
// The offset is one of the LRU values.
matchoffset = f.lru[slot]
f.lru[slot] = f.lru[0]
f.lru[0] = matchoffset
} else {
// The offset is encoded as a combination of the
// slot and more bits from the bit stream.
offsetbits := footerBits[slot]
var verbatimbits, alignedbits uint16
if offsetbits > 0 {
if haligned != nil && offsetbits >= 3 {
// This is an aligned offset block. Combine
// the bits written verbatim with the aligned
// offset tree code.
verbatimbits = f.getBits(offsetbits-3) * 8
alignedbits = f.getCode(haligned)
} else {
// There are no aligned offset bits to read,
// only verbatim bits.
verbatimbits = f.getBits(offsetbits)
alignedbits = 0
}
}
matchoffset = basePosition[slot] + verbatimbits + alignedbits - 2
// Update the LRU cache.
f.lru[2] = f.lru[1]
f.lru[1] = f.lru[0]
f.lru[0] = matchoffset
}
if matchoffset <= i && matchlen <= end-i {
copyend := i + matchlen
for ; i < copyend; i++ {
f.window[i] = f.window[i-matchoffset]
}
} else {
f.fail(errCorrupt)
break
}
}
return int(i - start), f.err
}
// readBlock decodes the current block and returns the number of uncompressed bytes.
func (f *decompressor) readBlock(start uint16) (int, error) {
blockType, size, err := f.readBlockHeader()
if err != nil {
return 0, err
}
if blockType == uncompressedBlock {
if size%2 == 1 {
// Remember to realign the byte stream at the next block.
f.unaligned = true
}
copied := 0
if f.bo < f.bv {
copied = int(size)
s := int(start)
if copied > f.bv-f.bo {
copied = f.bv - f.bo
}
copy(f.window[s:s+copied], f.b[f.bo:f.bo+copied])
f.bo += copied
}
n, err := io.ReadFull(f.r, f.window[start+uint16(copied):start+size])
return copied + n, err
}
hmain, hlength, haligned, err := f.readTrees(blockType == alignedOffsetBlock)
if err != nil {
return 0, err
}
return f.readCompressedBlock(start, start+size, hmain, hlength, haligned)
}
// decodeE8 reverses the 0xe8 x86 instruction encoding that was performed
// to the uncompressed data before it was compressed.
func decodeE8(b []byte, off int64) {
if off > maxe8offset || len(b) < 10 {
return
}
for i := 0; i < len(b)-10; i++ {
if b[i] == 0xe8 {
currentPtr := int32(off) + int32(i)
abs := int32(binary.LittleEndian.Uint32(b[i+1 : i+5]))
if abs >= -currentPtr && abs < e8filesize {
var rel int32
if abs >= 0 {
rel = abs - currentPtr
} else {
rel = abs + e8filesize
}
binary.LittleEndian.PutUint32(b[i+1:i+5], uint32(rel))
}
i += 4
}
}
}
func (f *decompressor) Read(b []byte) (int, error) {
// Read and uncompress everything.
if f.windowReader == nil {
n := 0
for n < f.uncompressed {
k, err := f.readBlock(uint16(n))
if err != nil {
return 0, err
}
n += k
}
decodeE8(f.window[:f.uncompressed], 0)
f.windowReader = bytes.NewReader(f.window[:f.uncompressed])
}
// Just read directly from the window.
return f.windowReader.Read(b)
}
func (f *decompressor) Close() error {
return nil
}
// NewReader returns a new io.ReadCloser that decompresses a
// WIM LZX stream until uncompressedSize bytes have been returned.
func NewReader(r io.Reader, uncompressedSize int) (io.ReadCloser, error) {
if uncompressedSize > windowSize {
return nil, errors.New("uncompressed size is limited to 32KB")
}
f := &decompressor{
lru: [3]uint16{1, 1, 1},
uncompressed: uncompressedSize,
b: make([]byte, 4096),
r: r,
}
return f, nil
}

View file

@ -1,51 +0,0 @@
package main
import (
"flag"
"fmt"
"os"
"github.com/Microsoft/go-winio/wim"
)
func main() {
flag.Parse()
f, err := os.Open(flag.Arg(0))
if err != nil {
panic(err)
}
w, err := wim.NewReader(f)
if err != nil {
panic(err)
}
fmt.Printf("%#v\n%#v\n", w.Image[0], w.Image[0].Windows)
dir, err := w.Image[0].Open()
if err != nil {
panic(err)
}
err = recur(dir)
if err != nil {
panic(err)
}
}
func recur(d *wim.File) error {
files, err := d.Readdir()
if err != nil {
return fmt.Errorf("%s: %s", d.Name, err)
}
for _, f := range files {
if f.IsDir() {
err = recur(f)
if err != nil {
return fmt.Errorf("%s: %s", f.Name, err)
}
}
}
return nil
}

View file

@ -1,866 +0,0 @@
// Package wim implements a WIM file parser.
//
// WIM files are used to distribute Windows file system and container images.
// They are documented at https://msdn.microsoft.com/en-us/library/windows/desktop/dd861280.aspx.
package wim
import (
"bytes"
"crypto/sha1"
"encoding/binary"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"strconv"
"sync"
"time"
"unicode/utf16"
)
// File attribute constants from Windows.
const (
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_HIDDEN = 0x00000002
FILE_ATTRIBUTE_SYSTEM = 0x00000004
FILE_ATTRIBUTE_DIRECTORY = 0x00000010
FILE_ATTRIBUTE_ARCHIVE = 0x00000020
FILE_ATTRIBUTE_DEVICE = 0x00000040
FILE_ATTRIBUTE_NORMAL = 0x00000080
FILE_ATTRIBUTE_TEMPORARY = 0x00000100
FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200
FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400
FILE_ATTRIBUTE_COMPRESSED = 0x00000800
FILE_ATTRIBUTE_OFFLINE = 0x00001000
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000
FILE_ATTRIBUTE_ENCRYPTED = 0x00004000
FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000
FILE_ATTRIBUTE_VIRTUAL = 0x00010000
FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000
FILE_ATTRIBUTE_EA = 0x00040000
)
// Windows processor architectures.
const (
PROCESSOR_ARCHITECTURE_INTEL = 0
PROCESSOR_ARCHITECTURE_MIPS = 1
PROCESSOR_ARCHITECTURE_ALPHA = 2
PROCESSOR_ARCHITECTURE_PPC = 3
PROCESSOR_ARCHITECTURE_SHX = 4
PROCESSOR_ARCHITECTURE_ARM = 5
PROCESSOR_ARCHITECTURE_IA64 = 6
PROCESSOR_ARCHITECTURE_ALPHA64 = 7
PROCESSOR_ARCHITECTURE_MSIL = 8
PROCESSOR_ARCHITECTURE_AMD64 = 9
PROCESSOR_ARCHITECTURE_IA32_ON_WIN64 = 10
PROCESSOR_ARCHITECTURE_NEUTRAL = 11
PROCESSOR_ARCHITECTURE_ARM64 = 12
)
var wimImageTag = [...]byte{'M', 'S', 'W', 'I', 'M', 0, 0, 0}
type guid struct {
Data1 uint32
Data2 uint16
Data3 uint16
Data4 [8]byte
}
func (g guid) String() string {
return fmt.Sprintf("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", g.Data1, g.Data2, g.Data3, g.Data4[0], g.Data4[1], g.Data4[2], g.Data4[3], g.Data4[4], g.Data4[5], g.Data4[6], g.Data4[7])
}
type resourceDescriptor struct {
FlagsAndCompressedSize uint64
Offset int64
OriginalSize int64
}
type resFlag byte
const (
resFlagFree resFlag = 1 << iota
resFlagMetadata
resFlagCompressed
resFlagSpanned
)
const validate = false
const supportedResFlags = resFlagMetadata | resFlagCompressed
func (r *resourceDescriptor) Flags() resFlag {
return resFlag(r.FlagsAndCompressedSize >> 56)
}
func (r *resourceDescriptor) CompressedSize() int64 {
return int64(r.FlagsAndCompressedSize & 0xffffffffffffff)
}
func (r *resourceDescriptor) String() string {
s := fmt.Sprintf("%d bytes at %d", r.CompressedSize(), r.Offset)
if r.Flags()&4 != 0 {
s += fmt.Sprintf(" (uncompresses to %d)", r.OriginalSize)
}
return s
}
// SHA1Hash contains the SHA1 hash of a file or stream.
type SHA1Hash [20]byte
type streamDescriptor struct {
resourceDescriptor
PartNumber uint16
RefCount uint32
Hash SHA1Hash
}
type hdrFlag uint32
const (
hdrFlagReserved hdrFlag = 1 << iota
hdrFlagCompressed
hdrFlagReadOnly
hdrFlagSpanned
hdrFlagResourceOnly
hdrFlagMetadataOnly
hdrFlagWriteInProgress
hdrFlagRpFix
)
const (
hdrFlagCompressReserved hdrFlag = 1 << (iota + 16)
hdrFlagCompressXpress
hdrFlagCompressLzx
)
const supportedHdrFlags = hdrFlagRpFix | hdrFlagReadOnly | hdrFlagCompressed | hdrFlagCompressLzx
type wimHeader struct {
ImageTag [8]byte
Size uint32
Version uint32
Flags hdrFlag
CompressionSize uint32
WIMGuid guid
PartNumber uint16
TotalParts uint16
ImageCount uint32
OffsetTable resourceDescriptor
XMLData resourceDescriptor
BootMetadata resourceDescriptor
BootIndex uint32
Padding uint32
Integrity resourceDescriptor
Unused [60]byte
}
type securityblockDisk struct {
TotalLength uint32
NumEntries uint32
}
const securityblockDiskSize = 8
type direntry struct {
Attributes uint32
SecurityID uint32
SubdirOffset int64
Unused1, Unused2 int64
CreationTime Filetime
LastAccessTime Filetime
LastWriteTime Filetime
Hash SHA1Hash
Padding uint32
ReparseHardLink int64
StreamCount uint16
ShortNameLength uint16
FileNameLength uint16
}
var direntrySize = int64(binary.Size(direntry{}) + 8) // includes an 8-byte length prefix
type streamentry struct {
Unused int64
Hash SHA1Hash
NameLength int16
}
var streamentrySize = int64(binary.Size(streamentry{}) + 8) // includes an 8-byte length prefix
// Filetime represents a Windows time.
type Filetime struct {
LowDateTime uint32
HighDateTime uint32
}
// Time returns the time as time.Time.
func (ft *Filetime) Time() time.Time {
// 100-nanosecond intervals since January 1, 1601
nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime)
// change starting time to the Epoch (00:00:00 UTC, January 1, 1970)
nsec -= 116444736000000000
// convert into nanoseconds
nsec *= 100
return time.Unix(0, nsec)
}
// UnmarshalXML unmarshals the time from a WIM XML blob.
func (ft *Filetime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type time struct {
Low string `xml:"LOWPART"`
High string `xml:"HIGHPART"`
}
var t time
err := d.DecodeElement(&t, &start)
if err != nil {
return err
}
low, err := strconv.ParseUint(t.Low, 0, 32)
if err != nil {
return err
}
high, err := strconv.ParseUint(t.High, 0, 32)
if err != nil {
return err
}
ft.LowDateTime = uint32(low)
ft.HighDateTime = uint32(high)
return nil
}
type info struct {
Image []ImageInfo `xml:"IMAGE"`
}
// ImageInfo contains information about the image.
type ImageInfo struct {
Name string `xml:"NAME"`
Index int `xml:"INDEX,attr"`
CreationTime Filetime `xml:"CREATIONTIME"`
ModTime Filetime `xml:"LASTMODIFICATIONTIME"`
Windows *WindowsInfo `xml:"WINDOWS"`
}
// WindowsInfo contains information about the Windows installation in the image.
type WindowsInfo struct {
Arch byte `xml:"ARCH"`
ProductName string `xml:"PRODUCTNAME"`
EditionID string `xml:"EDITIONID"`
InstallationType string `xml:"INSTALLATIONTYPE"`
ProductType string `xml:"PRODUCTTYPE"`
Languages []string `xml:"LANGUAGES>LANGUAGE"`
DefaultLanguage string `xml:"LANGUAGES>DEFAULT"`
Version Version `xml:"VERSION"`
SystemRoot string `xml:"SYSTEMROOT"`
}
// Version represents a Windows build version.
type Version struct {
Major int `xml:"MAJOR"`
Minor int `xml:"MINOR"`
Build int `xml:"BUILD"`
SPBuild int `xml:"SPBUILD"`
SPLevel int `xml:"SPLEVEL"`
}
// ParseError is returned when the WIM cannot be parsed.
type ParseError struct {
Oper string
Path string
Err error
}
func (e *ParseError) Error() string {
if e.Path == "" {
return "WIM parse error at " + e.Oper + ": " + e.Err.Error()
}
return fmt.Sprintf("WIM parse error: %s %s: %s", e.Oper, e.Path, e.Err.Error())
}
// Reader provides functions to read a WIM file.
type Reader struct {
hdr wimHeader
r io.ReaderAt
fileData map[SHA1Hash]resourceDescriptor
XMLInfo string // The XML information about the WIM.
Image []*Image // The WIM's images.
}
// Image represents an image within a WIM file.
type Image struct {
wim *Reader
offset resourceDescriptor
sds [][]byte
rootOffset int64
r io.ReadCloser
curOffset int64
m sync.Mutex
ImageInfo
}
// StreamHeader contains alternate data stream metadata.
type StreamHeader struct {
Name string
Hash SHA1Hash
Size int64
}
// Stream represents an alternate data stream or reparse point data stream.
type Stream struct {
StreamHeader
wim *Reader
offset resourceDescriptor
}
// FileHeader contains file metadata.
type FileHeader struct {
Name string
ShortName string
Attributes uint32
SecurityDescriptor []byte
CreationTime Filetime
LastAccessTime Filetime
LastWriteTime Filetime
Hash SHA1Hash
Size int64
LinkID int64
ReparseTag uint32
ReparseReserved uint32
}
// File represents a file or directory in a WIM image.
type File struct {
FileHeader
Streams []*Stream
offset resourceDescriptor
img *Image
subdirOffset int64
}
// NewReader returns a Reader that can be used to read WIM file data.
func NewReader(f io.ReaderAt) (*Reader, error) {
r := &Reader{r: f}
section := io.NewSectionReader(f, 0, 0xffff)
err := binary.Read(section, binary.LittleEndian, &r.hdr)
if err != nil {
return nil, err
}
if r.hdr.ImageTag != wimImageTag {
return nil, &ParseError{Oper: "image tag", Err: errors.New("not a WIM file")}
}
if r.hdr.Flags&^supportedHdrFlags != 0 {
return nil, fmt.Errorf("unsupported WIM flags %x", r.hdr.Flags&^supportedHdrFlags)
}
if r.hdr.CompressionSize != 0x8000 {
return nil, fmt.Errorf("unsupported compression size %d", r.hdr.CompressionSize)
}
if r.hdr.TotalParts != 1 {
return nil, errors.New("multi-part WIM not supported")
}
fileData, images, err := r.readOffsetTable(&r.hdr.OffsetTable)
if err != nil {
return nil, err
}
xmlinfo, err := r.readXML()
if err != nil {
return nil, err
}
var info info
err = xml.Unmarshal([]byte(xmlinfo), &info)
if err != nil {
return nil, &ParseError{Oper: "XML info", Err: err}
}
for i, img := range images {
for _, imgInfo := range info.Image {
if imgInfo.Index == i+1 {
img.ImageInfo = imgInfo
break
}
}
}
r.fileData = fileData
r.Image = images
r.XMLInfo = xmlinfo
return r, nil
}
// Close releases resources associated with the Reader.
func (r *Reader) Close() error {
for _, img := range r.Image {
img.reset()
}
return nil
}
func (r *Reader) resourceReader(hdr *resourceDescriptor) (io.ReadCloser, error) {
return r.resourceReaderWithOffset(hdr, 0)
}
func (r *Reader) resourceReaderWithOffset(hdr *resourceDescriptor, offset int64) (io.ReadCloser, error) {
var sr io.ReadCloser
section := io.NewSectionReader(r.r, hdr.Offset, hdr.CompressedSize())
if hdr.Flags()&resFlagCompressed == 0 {
section.Seek(offset, 0)
sr = ioutil.NopCloser(section)
} else {
cr, err := newCompressedReader(section, hdr.OriginalSize, offset)
if err != nil {
return nil, err
}
sr = cr
}
return sr, nil
}
func (r *Reader) readResource(hdr *resourceDescriptor) ([]byte, error) {
rsrc, err := r.resourceReader(hdr)
if err != nil {
return nil, err
}
defer rsrc.Close()
return ioutil.ReadAll(rsrc)
}
func (r *Reader) readXML() (string, error) {
if r.hdr.XMLData.CompressedSize() == 0 {
return "", nil
}
rsrc, err := r.resourceReader(&r.hdr.XMLData)
if err != nil {
return "", err
}
defer rsrc.Close()
XMLData := make([]uint16, r.hdr.XMLData.OriginalSize/2)
err = binary.Read(rsrc, binary.LittleEndian, XMLData)
if err != nil {
return "", &ParseError{Oper: "XML data", Err: err}
}
// The BOM will always indicate little-endian UTF-16.
if XMLData[0] != 0xfeff {
return "", &ParseError{Oper: "XML data", Err: errors.New("invalid BOM")}
}
return string(utf16.Decode(XMLData[1:])), nil
}
func (r *Reader) readOffsetTable(res *resourceDescriptor) (map[SHA1Hash]resourceDescriptor, []*Image, error) {
fileData := make(map[SHA1Hash]resourceDescriptor)
var images []*Image
offsetTable, err := r.readResource(res)
if err != nil {
return nil, nil, &ParseError{Oper: "offset table", Err: err}
}
br := bytes.NewReader(offsetTable)
for i := 0; ; i++ {
var res streamDescriptor
err := binary.Read(br, binary.LittleEndian, &res)
if err == io.EOF {
break
}
if err != nil {
return nil, nil, &ParseError{Oper: "offset table", Err: err}
}
if res.Flags()&^supportedResFlags != 0 {
return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("unsupported resource flag")}
}
// Validation for ad-hoc testing
if validate {
sec, err := r.resourceReader(&res.resourceDescriptor)
if err != nil {
panic(fmt.Sprint(i, err))
}
hash := sha1.New()
_, err = io.Copy(hash, sec)
sec.Close()
if err != nil {
panic(fmt.Sprint(i, err))
}
var cmphash SHA1Hash
copy(cmphash[:], hash.Sum(nil))
if cmphash != res.Hash {
panic(fmt.Sprint(i, "hash mismatch"))
}
}
if res.Flags()&resFlagMetadata != 0 {
image := &Image{
wim: r,
offset: res.resourceDescriptor,
}
images = append(images, image)
} else {
fileData[res.Hash] = res.resourceDescriptor
}
}
if len(images) != int(r.hdr.ImageCount) {
return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("mismatched image count")}
}
return fileData, images, nil
}
func (r *Reader) readSecurityDescriptors(rsrc io.Reader) (sds [][]byte, n int64, err error) {
var secBlock securityblockDisk
err = binary.Read(rsrc, binary.LittleEndian, &secBlock)
if err != nil {
err = &ParseError{Oper: "security table", Err: err}
return
}
n += securityblockDiskSize
secSizes := make([]int64, secBlock.NumEntries)
err = binary.Read(rsrc, binary.LittleEndian, &secSizes)
if err != nil {
err = &ParseError{Oper: "security table sizes", Err: err}
return
}
n += int64(secBlock.NumEntries * 8)
sds = make([][]byte, secBlock.NumEntries)
for i, size := range secSizes {
sd := make([]byte, size&0xffffffff)
_, err = io.ReadFull(rsrc, sd)
if err != nil {
err = &ParseError{Oper: "security descriptor", Err: err}
return
}
n += int64(len(sd))
sds[i] = sd
}
secsize := int64((secBlock.TotalLength + 7) &^ 7)
if n > secsize {
err = &ParseError{Oper: "security descriptor", Err: errors.New("security descriptor table too small")}
return
}
_, err = io.CopyN(ioutil.Discard, rsrc, secsize-n)
if err != nil {
return
}
n = secsize
return
}
// Open parses the image and returns the root directory.
func (img *Image) Open() (*File, error) {
if img.sds == nil {
rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, img.rootOffset)
if err != nil {
return nil, err
}
sds, n, err := img.wim.readSecurityDescriptors(rsrc)
if err != nil {
rsrc.Close()
return nil, err
}
img.sds = sds
img.r = rsrc
img.rootOffset = n
img.curOffset = n
}
f, err := img.readdir(img.rootOffset)
if err != nil {
return nil, err
}
if len(f) != 1 {
return nil, &ParseError{Oper: "root directory", Err: errors.New("expected exactly 1 root directory entry")}
}
return f[0], err
}
func (img *Image) reset() {
if img.r != nil {
img.r.Close()
img.r = nil
}
img.curOffset = -1
}
func (img *Image) readdir(offset int64) ([]*File, error) {
img.m.Lock()
defer img.m.Unlock()
if offset < img.curOffset || offset > img.curOffset+chunkSize {
// Reset to seek backward or to seek forward very far.
img.reset()
}
if img.r == nil {
rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, offset)
if err != nil {
return nil, err
}
img.r = rsrc
img.curOffset = offset
}
if offset > img.curOffset {
_, err := io.CopyN(ioutil.Discard, img.r, offset-img.curOffset)
if err != nil {
img.reset()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
}
var entries []*File
for {
e, n, err := img.readNextEntry(img.r)
img.curOffset += n
if err == io.EOF {
break
}
if err != nil {
img.reset()
return nil, err
}
entries = append(entries, e)
}
return entries, nil
}
func (img *Image) readNextEntry(r io.Reader) (*File, int64, error) {
var length int64
err := binary.Read(r, binary.LittleEndian, &length)
if err != nil {
return nil, 0, &ParseError{Oper: "directory length check", Err: err}
}
if length == 0 {
return nil, 8, io.EOF
}
left := length
if left < direntrySize {
return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short")}
}
var dentry direntry
err = binary.Read(r, binary.LittleEndian, &dentry)
if err != nil {
return nil, 0, &ParseError{Oper: "directory entry", Err: err}
}
left -= direntrySize
namesLen := int64(dentry.FileNameLength + 2 + dentry.ShortNameLength)
if left < namesLen {
return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short for names")}
}
names := make([]uint16, namesLen/2)
err = binary.Read(r, binary.LittleEndian, names)
if err != nil {
return nil, 0, &ParseError{Oper: "file name", Err: err}
}
left -= namesLen
var name, shortName string
if dentry.FileNameLength > 0 {
name = string(utf16.Decode(names[:dentry.FileNameLength/2]))
}
if dentry.ShortNameLength > 0 {
shortName = string(utf16.Decode(names[dentry.FileNameLength/2+1:]))
}
var offset resourceDescriptor
zerohash := SHA1Hash{}
if dentry.Hash != zerohash {
var ok bool
offset, ok = img.wim.fileData[dentry.Hash]
if !ok {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %#v", dentry)}
}
}
f := &File{
FileHeader: FileHeader{
Attributes: dentry.Attributes,
CreationTime: dentry.CreationTime,
LastAccessTime: dentry.LastAccessTime,
LastWriteTime: dentry.LastWriteTime,
Hash: dentry.Hash,
Size: offset.OriginalSize,
Name: name,
ShortName: shortName,
},
offset: offset,
img: img,
subdirOffset: dentry.SubdirOffset,
}
isDir := false
if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT == 0 {
f.LinkID = dentry.ReparseHardLink
if dentry.Attributes&FILE_ATTRIBUTE_DIRECTORY != 0 {
isDir = true
}
} else {
f.ReparseTag = uint32(dentry.ReparseHardLink)
f.ReparseReserved = uint32(dentry.ReparseHardLink >> 32)
}
if isDir && f.subdirOffset == 0 {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("no subdirectory data for directory")}
} else if !isDir && f.subdirOffset != 0 {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("unexpected subdirectory data for non-directory")}
}
if dentry.SecurityID != 0xffffffff {
f.SecurityDescriptor = img.sds[dentry.SecurityID]
}
_, err = io.CopyN(ioutil.Discard, r, left)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, 0, err
}
if dentry.StreamCount > 0 {
var streams []*Stream
for i := uint16(0); i < dentry.StreamCount; i++ {
s, n, err := img.readNextStream(r)
length += n
if err != nil {
return nil, 0, err
}
// The first unnamed stream should be treated as the file stream.
if i == 0 && s.Name == "" {
f.Hash = s.Hash
f.Size = s.Size
f.offset = s.offset
} else if s.Name != "" {
streams = append(streams, s)
}
}
f.Streams = streams
}
if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT != 0 && f.Size == 0 {
return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("reparse point is missing reparse stream")}
}
return f, length, nil
}
func (img *Image) readNextStream(r io.Reader) (*Stream, int64, error) {
var length int64
err := binary.Read(r, binary.LittleEndian, &length)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, 0, &ParseError{Oper: "stream length check", Err: err}
}
left := length
if left < streamentrySize {
return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short")}
}
var sentry streamentry
err = binary.Read(r, binary.LittleEndian, &sentry)
if err != nil {
return nil, 0, &ParseError{Oper: "stream entry", Err: err}
}
left -= streamentrySize
if left < int64(sentry.NameLength) {
return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short for name")}
}
names := make([]uint16, sentry.NameLength/2)
err = binary.Read(r, binary.LittleEndian, names)
if err != nil {
return nil, 0, &ParseError{Oper: "file name", Err: err}
}
left -= int64(sentry.NameLength)
name := string(utf16.Decode(names))
var offset resourceDescriptor
if sentry.Hash != (SHA1Hash{}) {
var ok bool
offset, ok = img.wim.fileData[sentry.Hash]
if !ok {
return nil, 0, &ParseError{Oper: "stream entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %v", sentry.Hash)}
}
}
s := &Stream{
StreamHeader: StreamHeader{
Hash: sentry.Hash,
Size: offset.OriginalSize,
Name: name,
},
wim: img.wim,
offset: offset,
}
_, err = io.CopyN(ioutil.Discard, r, left)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, 0, err
}
return s, length, nil
}
// Open returns an io.ReadCloser that can be used to read the stream's contents.
func (s *Stream) Open() (io.ReadCloser, error) {
return s.wim.resourceReader(&s.offset)
}
// Open returns an io.ReadCloser that can be used to read the file's contents.
func (f *File) Open() (io.ReadCloser, error) {
return f.img.wim.resourceReader(&f.offset)
}
// Readdir reads the directory entries.
func (f *File) Readdir() ([]*File, error) {
if !f.IsDir() {
return nil, errors.New("not a directory")
}
return f.img.readdir(f.subdirOffset)
}
// IsDir returns whether the given file is a directory. It returns false when it
// is a directory reparse point.
func (f *FileHeader) IsDir() bool {
return f.Attributes&(FILE_ATTRIBUTE_DIRECTORY|FILE_ATTRIBUTE_REPARSE_POINT) == FILE_ATTRIBUTE_DIRECTORY
}

View file

@ -1,26 +0,0 @@
package histogram
import (
"math/rand"
"testing"
)
func BenchmarkInsert10Bins(b *testing.B) {
b.StopTimer()
h := New(10)
b.StartTimer()
for i := 0; i < b.N; i++ {
f := rand.ExpFloat64()
h.Insert(f)
}
}
func BenchmarkInsert100Bins(b *testing.B) {
b.StopTimer()
h := New(100)
b.StartTimer()
for i := 0; i < b.N; i++ {
f := rand.ExpFloat64()
h.Insert(f)
}
}

View file

@ -1,108 +0,0 @@
// Package histogram provides a Go implementation of BigML's histogram package
// for Clojure/Java. It is currently experimental.
package histogram
import (
"container/heap"
"math"
"sort"
)
type Bin struct {
Count int
Sum float64
}
func (b *Bin) Update(x *Bin) {
b.Count += x.Count
b.Sum += x.Sum
}
func (b *Bin) Mean() float64 {
return b.Sum / float64(b.Count)
}
type Bins []*Bin
func (bs Bins) Len() int { return len(bs) }
func (bs Bins) Less(i, j int) bool { return bs[i].Mean() < bs[j].Mean() }
func (bs Bins) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
func (bs *Bins) Push(x interface{}) {
*bs = append(*bs, x.(*Bin))
}
func (bs *Bins) Pop() interface{} {
return bs.remove(len(*bs) - 1)
}
func (bs *Bins) remove(n int) *Bin {
if n < 0 || len(*bs) < n {
return nil
}
x := (*bs)[n]
*bs = append((*bs)[:n], (*bs)[n+1:]...)
return x
}
type Histogram struct {
res *reservoir
}
func New(maxBins int) *Histogram {
return &Histogram{res: newReservoir(maxBins)}
}
func (h *Histogram) Insert(f float64) {
h.res.insert(&Bin{1, f})
h.res.compress()
}
func (h *Histogram) Bins() Bins {
return h.res.bins
}
type reservoir struct {
n int
maxBins int
bins Bins
}
func newReservoir(maxBins int) *reservoir {
return &reservoir{maxBins: maxBins}
}
func (r *reservoir) insert(bin *Bin) {
r.n += bin.Count
i := sort.Search(len(r.bins), func(i int) bool {
return r.bins[i].Mean() >= bin.Mean()
})
if i < 0 || i == r.bins.Len() {
// TODO(blake): Maybe use an .insert(i, bin) instead of
// performing the extra work of a heap.Push.
heap.Push(&r.bins, bin)
return
}
r.bins[i].Update(bin)
}
func (r *reservoir) compress() {
for r.bins.Len() > r.maxBins {
minGapIndex := -1
minGap := math.MaxFloat64
for i := 0; i < r.bins.Len()-1; i++ {
gap := gapWeight(r.bins[i], r.bins[i+1])
if minGap > gap {
minGap = gap
minGapIndex = i
}
}
prev := r.bins[minGapIndex]
next := r.bins.remove(minGapIndex + 1)
prev.Update(next)
}
}
func gapWeight(prev, next *Bin) float64 {
return next.Mean() - prev.Mean()
}

View file

@ -1,38 +0,0 @@
package histogram
import (
"math/rand"
"testing"
)
func TestHistogram(t *testing.T) {
const numPoints = 1e6
const maxBins = 3
h := New(maxBins)
for i := 0; i < numPoints; i++ {
f := rand.ExpFloat64()
h.Insert(f)
}
bins := h.Bins()
if g := len(bins); g > maxBins {
t.Fatalf("got %d bins, wanted <= %d", g, maxBins)
}
for _, b := range bins {
t.Logf("%+v", b)
}
if g := count(h.Bins()); g != numPoints {
t.Fatalf("binned %d points, wanted %d", g, numPoints)
}
}
func count(bins Bins) int {
binCounts := 0
for _, b := range bins {
binCounts += b.Count
}
return binCounts
}

View file

@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream {
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targets map[float64]float64) *Stream {
func NewTargeted(targetMap map[float64]float64) *Stream {
// Convert map to slice to avoid slow iterations on a map.
// ƒ is called on the hot path, so converting the map to a slice
// beforehand results in significant CPU savings.
targets := targetMapToSlice(targetMap)
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for quantile, epsilon := range targets {
if quantile*s.n <= r {
f = (2 * epsilon * r) / quantile
for _, t := range targets {
if t.quantile*s.n <= r {
f = (2 * t.epsilon * r) / t.quantile
} else {
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
}
if f < m {
m = f
@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream {
return newStream(ƒ)
}
type target struct {
quantile float64
epsilon float64
}
func targetMapToSlice(targetMap map[float64]float64) []target {
targets := make([]target, 0, len(targetMap))
for quantile, epsilon := range targetMap {
t := target{
quantile: quantile,
epsilon: epsilon,
}
targets = append(targets, t)
}
return targets
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {

View file

@ -1,90 +0,0 @@
package topk
import (
"sort"
)
// http://www.cs.ucsb.edu/research/tech_reports/reports/2005-23.pdf
type Element struct {
Value string
Count int
}
type Samples []*Element
func (sm Samples) Len() int {
return len(sm)
}
func (sm Samples) Less(i, j int) bool {
return sm[i].Count < sm[j].Count
}
func (sm Samples) Swap(i, j int) {
sm[i], sm[j] = sm[j], sm[i]
}
type Stream struct {
k int
mon map[string]*Element
// the minimum Element
min *Element
}
func New(k int) *Stream {
s := new(Stream)
s.k = k
s.mon = make(map[string]*Element)
s.min = &Element{}
// Track k+1 so that less frequenet items contended for that spot,
// resulting in k being more accurate.
return s
}
func (s *Stream) Insert(x string) {
s.insert(&Element{x, 1})
}
func (s *Stream) Merge(sm Samples) {
for _, e := range sm {
s.insert(e)
}
}
func (s *Stream) insert(in *Element) {
e := s.mon[in.Value]
if e != nil {
e.Count++
} else {
if len(s.mon) < s.k+1 {
e = &Element{in.Value, in.Count}
s.mon[in.Value] = e
} else {
e = s.min
delete(s.mon, e.Value)
e.Value = in.Value
e.Count += in.Count
s.min = e
}
}
if e.Count < s.min.Count {
s.min = e
}
}
func (s *Stream) Query() Samples {
var sm Samples
for _, e := range s.mon {
sm = append(sm, e)
}
sort.Sort(sort.Reverse(sm))
if len(sm) < s.k {
return sm
}
return sm[:s.k]
}

View file

@ -1,57 +0,0 @@
package topk
import (
"fmt"
"math/rand"
"sort"
"testing"
)
func TestTopK(t *testing.T) {
stream := New(10)
ss := []*Stream{New(10), New(10), New(10)}
m := make(map[string]int)
for _, s := range ss {
for i := 0; i < 1e6; i++ {
v := fmt.Sprintf("%x", int8(rand.ExpFloat64()))
s.Insert(v)
m[v]++
}
stream.Merge(s.Query())
}
var sm Samples
for x, s := range m {
sm = append(sm, &Element{x, s})
}
sort.Sort(sort.Reverse(sm))
g := stream.Query()
if len(g) != 10 {
t.Fatalf("got %d, want 10", len(g))
}
for i, e := range g {
if sm[i].Value != e.Value {
t.Errorf("at %d: want %q, got %q", i, sm[i].Value, e.Value)
}
}
}
func TestQuery(t *testing.T) {
queryTests := []struct {
value string
expected int
}{
{"a", 1},
{"b", 2},
{"c", 2},
}
stream := New(2)
for _, tt := range queryTests {
stream.Insert(tt.value)
if n := len(stream.Query()); n != tt.expected {
t.Errorf("want %d, got %d", tt.expected, n)
}
}
}

40
vendor/github.com/containerd/containerd/.appveyor.yml generated vendored Normal file
View file

@ -0,0 +1,40 @@
version: "{build}"
image: Visual Studio 2017
clone_folder: c:\gopath\src\github.com\containerd\containerd
branches:
only:
- master
environment:
GOPATH: C:\gopath
CGO_ENABLED: 1
GO_VERSION: 1.9
before_build:
- choco install -y mingw
# Install Go
- rd C:\Go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GO_VERSION%.windows-amd64.zip
- 7z x go%GO_VERSION%.windows-amd64.zip -oC:\ >nul
- go version
- choco install codecov
build_script:
- bash.exe -elc "export PATH=/c/tools/mingw64/bin:/c/gopath/bin:$PATH;
script/setup/install-dev-tools;
mingw32-make.exe check"
- bash.exe -elc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe build binaries"
test_script:
# TODO: need an equivalent of TRAVIS_COMMIT_RANGE
# - GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" C:\MinGW\bin\mingw32-make.exe dco
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe coverage root-coverage"
- bash.exe -elc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe integration"
# Run the integration suite a second time. See discussion in github.com/containerd/containerd/pull/1759
- bash.exe -elc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH; TESTFLAGS_PARALLEL=1 mingw32-make.exe integration"
on_success:
codecov --flag windows -f coverage.txt

4
vendor/github.com/containerd/containerd/.gitignore generated vendored Normal file
View file

@ -0,0 +1,4 @@
/bin/
coverage.txt
profile.out
containerd.test

View file

@ -0,0 +1,24 @@
{
"Vendor": true,
"Deadline": "2m",
"Sort": ["linter", "severity", "path", "line"],
"Exclude": [
".*\\.pb\\.go",
"fetch\\.go:.*::error: unrecognized printf verb 'r'"
],
"EnableGC": true,
"WarnUnmatchedDirective": true,
"Enable": [
"structcheck",
"unused",
"varcheck",
"staticcheck",
"gofmt",
"goimports",
"golint",
"ineffassign",
"vet"
]
}

84
vendor/github.com/containerd/containerd/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,84 @@
dist: trusty
sudo: required
# setup travis so that we can run containers for integration tests
services:
- docker
language: go
go:
- 1.9.x
go_import_path: github.com/containerd/containerd
addons:
apt:
packages:
- btrfs-tools
- libseccomp-dev
- libapparmor-dev
- libnl-3-dev
- libnet-dev
- protobuf-c-compiler
# - protobuf-compiler
- python-minimal
- libcap-dev
- libaio-dev
- libprotobuf-c0-dev
- libprotobuf-dev
env:
- TRAVIS_GOOS=linux TRAVIS_CGO_ENABLED=1
- TRAVIS_GOOS=darwin TRAVIS_CGO_ENABLED=0
before_install:
- uname -r
install:
- wget https://github.com/google/protobuf/releases/download/v3.5.0/protoc-3.5.0-linux-x86_64.zip -O /tmp/protoc-3.5.0-linux-x86_64.zip
- sudo unzip -o -d /usr/local /tmp/protoc-3.5.0-linux-x86_64.zip
- sudo chmod +x /usr/local/bin/protoc
- sudo chmod og+rx /usr/local/include/google /usr/local/include/google/protobuf /usr/local/include/google/protobuf/compiler
- sudo chmod -R og+r /usr/local/include/google/protobuf/
- protoc --version
- go get -u github.com/vbatts/git-validation
- sudo wget https://github.com/crosbymichael/runc/releases/download/ctd-9/runc -O /bin/runc; sudo chmod +x /bin/runc
- wget https://github.com/xemul/criu/archive/v3.0.tar.gz -O /tmp/criu.tar.gz
- tar -C /tmp/ -zxf /tmp/criu.tar.gz
- cd /tmp/criu-3.0 && sudo make install-criu
- cd $TRAVIS_BUILD_DIR
script:
- export GOOS=$TRAVIS_GOOS
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
- GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
- GOOS=linux script/setup/install-dev-tools
- script/validate/vendor
- go build -i .
- make check
- if [ "$GOOS" = "linux" ]; then make check-protos check-api-descriptors; fi
- make build
- make binaries
- if [ "$GOOS" = "linux" ]; then sudo make install ; fi
- if [ "$GOOS" = "linux" ]; then make coverage ; fi
- if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH make root-coverage ; fi
- if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH make integration ; fi
# Run the integration suite a second time. See discussion in github.com/containerd/containerd/pull/1759
- if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH TESTFLAGS_PARALLEL=1 make integration ; fi
after_success:
- bash <(curl -s https://codecov.io/bash) -F linux
before_deploy:
- make release
deploy:
provider: releases
api_key:
secure: HO+WSIVVUMMsbU74x+YyFsTP3ahqnR4xjwKAziedJ5lZXKJszQBhiYTFmcTeVBoouNjTISd07GQzpoLChuGC20U3+1NbT+CkK8xWR/x1ao2D3JY3Ds6AD9ubWRNWRLptt/xOn5Vq3F8xZyUYchwvDMl4zKCuTKxQGVdHKsINb2DehKcP5cVL6MMvqzEdfj2g99vqXAqs8uuo6dOmvxmHV43bfzDaAJSabjZZs6TKlWTqCQMet8uxyx2Dmjl2lxLwdqv12oJdrszacasn41NYuEyHI2bXyef1mhWGYN4n9bU/Y5winctZ8DOSOZvYg/2ziAaUN0+CTn1IESwVesrPz23P2Sy7wdLxu8dSIZ2yUHl7OsA5T5a5rDchAGguRVNBWvoGtuepEhdRacxTQUo1cMFZsEXjgRKKjdfc1emYQPVdN8mBv8GJwndty473ZXdvFt5R0kNVFtvWuYCa6UYJD2cKrsPSAfbZCDC/LiR3FOoTaUPMZUVkR2ACEO7Dn4+KlmBajqT40Osk/A7k1XA/TzVhMIpLtE0Vk2DfPmGsjCv8bC+MFd+R2Sc8SFdE92oEWRdoPQY5SxMYQtGxA+cbKVlT1kSw6y80yEbx5JZsBnT6+NTHwmDO3kVU9ztLdawOozTElKNAK8HoAyFmzIZ3wL64oThuDrv/TUuY8Iyn814=
file_glob: true
file: releases/*.tar.gz
skip_cleanup: true
on:
repo: containerd/containerd
tags: true

233
vendor/github.com/containerd/containerd/BUILDING.md generated vendored Normal file
View file

@ -0,0 +1,233 @@
# Build containerd from source
This guide is useful if you intend to contribute on containerd. Thanks for your
effort. Every contribution is very appreciated.
This doc includes:
* [Build requirements](#build-requirements)
* [Build the development environment](#build-the-development-environment)
* [Build containerd](#build-containerd)
* [Via docker container](#via-docker-container)
* [Testing](#testing-containerd)
## Build requirements
To build the `containerd` daemon, and the `ctr` simple test client, the following build system dependencies are required:
* Go 1.9.x or above
* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via the build tag `no_btrfs`, removing this dependency.
## Build the development environment
First you need to setup your Go development environment. You can follow this
guideline [How to write go code](https://golang.org/doc/code.html) and at the
end you need to have `GOPATH` and `GOROOT` set in your environment.
At this point you can use `go` to checkout `containerd` in your `GOPATH`:
```sh
go get github.com/containerd/containerd
```
For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.5.0 release for a 64-bit Linux host:
```
$ wget -c https://github.com/google/protobuf/releases/download/v3.5.0/protoc-3.5.0-linux-x86_64.zip
$ sudo unzip protoc-3.5.0-linux-x86_64.zip -d /usr/local
```
`containerd` uses [Btrfs](https://en.wikipedia.org/wiki/Btrfs) it means that you
need to satisfy this dependencies in your system:
* CentOS/Fedora: `yum install btrfs-progs-devel`
* Debian/Ubuntu: `apt-get install btrfs-tools`
At this point you are ready to build `containerd` yourself!
## Build containerd
`containerd` uses `make` to create a repeatable build flow. It means that you
can run:
```sudo
make
```
This is going to build all the project binaries in the `./bin/` directory.
You can move them in your global path, `/usr/local/bin` with:
```sudo
sudo make install
```
When making any changes to the gRPC API, you can use the installed `protoc`
compiler to regenerate the API generated code packages with:
```sudo
make generate
```
> *Note*: A build tag is currently available to disable building the btrfs snapshot driver.
> Adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
> Makefile target will disable the btrfs driver within the containerd Go build.
Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vndr) which uses a simple config file, `vendor.conf`, to provide the URL and version or hash details for each vendored import. After modifying `vendor.conf` run the `vndr` tool to update the `vendor/` directory contents. Combining the `vendor.conf` update with the changeset in `vendor/` after running `vndr` should become a single commit for a PR which relies on vendored updates.
Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
### Static binaries
You can build static binaries by providing a few variables to `make`:
```sudo
make EXTRA_FLAGS="-buildmode pie" \
EXTRA_LDFLAGS='-extldflags "-fno-PIC -static"' \
BUILDTAGS="static_build"
```
> *Note*:
> - static build is discouraged
> - static containerd binary does not support loading plugins
# Via Docker container
## Build containerd
You can build `containerd` via a Linux-based Docker container.
You can build an image from this `Dockerfile`:
```
FROM golang
RUN apt-get update && \
apt-get install btrfs-tools
```
Let's suppose that you built an image called `containerd/build`. From the
containerd source root directory you can run the following command:
```sh
docker run -it \
-v ${PWD}:/go/src/github.com/containerd/containerd \
-e GOPATH=/go \
-w /go/src/github.com/containerd/containerd containerd/build sh
```
This mounts `containerd` repository
You are now ready to [build](#build-containerd):
```sh
make && make install
```
## Build containerd and runc
To have complete core container runtime, you will both `containerd` and `runc`. It is possible to build both of these via Docker container.
You can use `go` to checkout `runc` in your `GOPATH`:
```sh
go get github.com/opencontainers/runc
```
We can build an image from this `Dockerfile`:
```sh
FROM golang
RUN apt-get update && \
apt-get install -y btrfs-tools libapparmor-dev libseccomp-dev
```
In our Docker container we will use a specific `runc` build which includes [seccomp](https://en.wikipedia.org/wiki/seccomp) and [apparmor](https://en.wikipedia.org/wiki/AppArmor) support. Hence why our Dockerfile includes these dependencies: `libapparmor-dev` `libseccomp-dev`. Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
Let's suppose you build an image called `containerd/build` from the above Dockerfile. You can run the following command:
```sh
docker run -it --privileged \
-v /var/lib/containerd \
-v ${GOPATH}/src/github.com/opencontainers/runc:/go/src/github.com/opencontainers/runc \
-v ${GOPATH}/src/github.com/containerd/containerd:/go/src/github.com/containerd/containerd \
-e GOPATH=/go \
-w /go/src/github.com/containerd/containerd containerd/build sh
```
This mounts both `runc` and `containerd` repositories in our Docker container.
From within our Docker container let's build `containerd`:
```sh
cd /go/src/github.com/containerd/containerd
make && make install
```
These binaries can be found in the `./bin` directory in your host.
`make install` will move the binaries in your `$PATH`.
Next, let's build `runc`:
```sh
cd /go/src/github.com/opencontainers/runc
make BUILDTAGS='seccomp apparmor' && make install
```
When working with `ctr`, the simple test client we just built, don't forget to start the daemon!
```sh
containerd --config config.toml
```
# Testing containerd
During the automated CI the unit tests and integration tests are run as part of the PR validation. As a developer you can run these tests locally by using any of the following `Makefile` targets:
- `make test`: run all non-integration tests that do not require `root` privileges
- `make root-test`: run all non-integration tests which require `root`
- `make integration`: run all tests, including integration tests and those which require `root`
- `make integration-parallel`: run all tests (integration and root-required included) in parallel mode
To execute a specific test or set of tests you can use the `go test` capabilities
without using the `Makefile` targets. The following examples show how to specify a test
name and also how to use the flag directly against `go test` to run root-requiring tests.
```sh
# run the test <TEST_NAME>:
go test -v -run "<TEST_NAME>" .
# enable the root-requiring tests:
go test -v -run . -test.root
```
Example output from directly running `go test` to execute the `TestContainerList` test:
```sh
sudo go test -v -run "TestContainerList" . -test.root
INFO[0000] running tests against containerd revision=f2ae8a020a985a8d9862c9eb5ab66902c2888361 version=v1.0.0-beta.2-49-gf2ae8a0
=== RUN TestContainerList
--- PASS: TestContainerList (0.00s)
PASS
ok github.com/containerd/containerd 4.778s
```
## Additional tools
### containerd-stress
In addition to `go test`-based testing executed via the `Makefile` targets, the `containerd-stress` tool is available and built with the `all` or `binaries` targets and installed during `make install`.
With this tool you can stress a running containerd daemon for a specified period of time, selecting a concurrency level to generate stress against the daemon. The following command is an example of having five workers running for two hours against a default containerd gRPC socket address:
```sh
containerd-stress -c 5 -t 120
```
For more information on this tool's options please run `containerd-stress --help`.
### bucketbench
[Bucketbench](https://github.com/estesp/bucketbench) is an external tool which can be used to drive load against a container runtime, specifying a particular set of lifecycle operations to run with a specified amount of concurrency. Bucketbench is more focused on generating performance details than simply inducing load against containerd.
Bucketbench differs from the `containerd-stress` tool in a few ways:
- Bucketbench has support for testing the Docker engine, the `runc` binary, and containerd 0.2.x (via `ctr`) and 1.0 (via the client library) branches.
- Bucketbench is driven via configuration file that allows specifying a list of lifecycle operations to execute. This can be used to generate detailed statistics per-command (e.g. start, stop, pause, delete).
- Bucketbench generates detailed reports and timing data at the end of the configured test run.
More details on how to install and run `bucketbench` are available at the [GitHub project page](https://github.com/estesp/bucketbench).

118
vendor/github.com/containerd/containerd/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,118 @@
# Contributing
Contributions should be made via pull requests. Pull requests will be reviewed
by one or more maintainers and merged when acceptable.
This project is in an early state, making the impact of contributions much
greater than at other stages. In this respect, it is important to consider any
changes or additions for their future impact more so than their current impact.
## Successful Changes
We ask that before contributing, please make the effort to coordinate with the
maintainers of the project before submitting large or high impact PRs. This
will prevent you from doing extra work that may or may not be merged.
PRs that are just submitted without any prior communication will likely be
summarily closed.
While pull requests are the methodology for submitting changes to code, changes
are much more likely to be accepted if they are accompanied by additional
engineering work. While we don't define this explicitly, most of these goals
are accomplished through communication of the design goals and subsequent
solutions. Often times, it helps to first state the problem before presenting
solutions.
Typically, the best methods of accomplishing this are to submit an issue,
stating the problem. This issue can include a problem statement and a
checklist with requirements. If solutions are proposed, alternatives should be
listed and eliminated. Even if the criteria for elimination of a solution is
frivolous, say so.
Larger changes typically work best with design documents, similar to those found
in `design/`. These are focused on providing context to the design at the time
the feature was conceived and can inform future documentation contributions.
Make sure that new tests are added for bugs in order to catch regressions and tests
with new features to exercise the new functionality that is added.
## Commit Messages
There are times for one line commit messages and this is not one of them.
Commit messages should follow best practices, including explaining the context
of the problem and how it was solved, including in caveats or follow up changes
required. They should tell the story of the change and provide readers
understanding of what led to it.
If you're lost about what this even means, please see [How to Write a Git
Commit Message](http://chris.beams.io/posts/git-commit/) for a start.
In practice, the best approach to maintaining a nice commit message is to
leverage a `git add -p` and `git commit --amend` to formulate a solid
changeset. This allows one to piece together a change, as information becomes
available.
If you squash a series of commits, don't just submit that. Re-write the commit
message, as if the series of commits was a single stroke of brilliance.
That said, there is no requirement to have a single commit for a PR, as long as
each commit tells the story. For example, if there is a feature that requires a
package, it might make sense to have the package in a separate commit then have
a subsequent commit that uses it.
Remember, you're telling part of the story with the commit message. Don't make
your chapter weird.
## Sign your work
The sign-off is a simple line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are pretty simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.

191
vendor/github.com/containerd/containerd/LICENSE.code generated vendored Normal file
View file

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

395
vendor/github.com/containerd/containerd/LICENSE.docs generated vendored Normal file
View file

@ -0,0 +1,395 @@
Attribution 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution 4.0 International Public License ("Public License"). To the
extent this Public License may be interpreted as a contract, You are
granted the Licensed Rights in consideration of Your acceptance of
these terms and conditions, and the Licensor grants You such rights in
consideration of benefits the Licensor receives from making the
Licensed Material available under these terms and conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
d. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
e. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
f. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
g. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
h. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
i. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
j. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
k. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
4. If You Share Adapted Material You produce, the Adapter's
License You apply must not prevent recipients of the Adapted
Material from complying with this Public License.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material; and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.
Creative Commons may be contacted at creativecommons.org.

251
vendor/github.com/containerd/containerd/MAINTAINERS generated vendored Normal file
View file

@ -0,0 +1,251 @@
# containerd project maintainers file
#
# This file describes who runs the containerd project and how.
# This is a living document - if you see something out of date or missing,
# speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant
# parser.
[Rules]
[Rules.maintainers]
title = "What is a maintainer?"
text = """
There are different types of maintainers, with different responsibilities, but
all maintainers have 3 things in common:
1) They share responsibility in the project's success.
2) They have made a long-term, recurring time investment to improve the project.
3) They spend that time doing whatever needs to be done, not necessarily what
is the most interesting or fun.
Maintainers are often under-appreciated, because their work is harder to appreciate.
It's easy to appreciate a really cool and technically advanced feature. It's harder
to appreciate the absence of bugs, the slow but steady improvement in stability,
or the reliability of a release process. But those things distinguish a good
project from a great one.
"""
[Rules.adding-maintainers]
title = "How are maintainers added?"
text = """
Maintainers are first and foremost contributors that have shown they are
committed to the long term success of a project. Contributors wanting to become
maintainers are expected to be deeply involved in contributing code, pull
request review, and triage of issues in the project for more than three months.
Just contributing does not make you a maintainer, it is about building trust
with the current maintainers of the project and being a person that they can
depend on and trust to make decisions in the best interest of the project.
Periodically, the existing maintainers curate a list of contributors that have
shown regular activity on the project over the prior months. From this list,
maintainer candidates are selected and proposed on the maintainers mailing list.
After a candidate has been announced on the maintainers mailing list, the
existing maintainers are given five business days to discuss the candidate,
raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing
list. Only maintainers of the repository that the candidate is proposed for are
allowed to vote.
If a candidate is approved, a maintainer will contact the candidate to invite
the candidate to open a pull request that adds the contributor to the
MAINTAINERS file. The candidate becomes a maintainer once the pull request is
merged.
"""
[Rules.adding-sub-projects]
title = "How are sub projects added?"
text = """
Similar to adding maintainers, new sub projects can be added to containerd
GitHub organization as long as they adhere to the CNCF
[charter](https://www.cncf.io/about/charter/) and mission. After a project
proposal has been announced on a public forum (GitHub issue or mailing list),
the existing maintainers are given five business days to discuss the new
project, raise objections and cast their vote. Projects must be approved by at
least 66% of the current maintainers by adding their vote.
If a project is approved, a maintainer will add the project to the containerd
GitHub organization, and make an announcement on a public forum.
"""
[Rules.stepping-down-policy]
title = "Stepping down policy"
text = """
Life priorities, interests, and passions can change. If you're a maintainer but
feel you must remove yourself from the list, inform other maintainers that you
intend to step down, and if possible, help find someone to pick up your work.
At the very least, ensure your work can be continued where you left off.
After you've informed other maintainers, create a pull request to remove
yourself from the MAINTAINERS file.
"""
[Rules.inactive-maintainers]
title = "Removal of inactive maintainers"
text = """
Similar to the procedure for adding new maintainers, existing maintainers can
be removed from the list if they do not show significant activity on the
project. Periodically, the maintainers review the list of maintainers and their
activity over the last three months.
If a maintainer has shown insufficient activity over this period, a neutral
person will contact the maintainer to ask if they want to continue being
a maintainer. If the maintainer decides to step down as a maintainer, they
open a pull request to be removed from the MAINTAINERS file.
If the maintainer wants to remain a maintainer, but is unable to perform the
required duties they can be removed with a vote of at least 66% of
the current maintainers. An e-mail is sent to the
mailing list, inviting maintainers of the project to vote. The voting period is
five business days. Issues related to a maintainer's performance should be
discussed with them among the other maintainers so that they are not surprised
by a pull request removing them.
"""
[Rules.decisions]
title = "How are decisions made?"
text = """
Short answer: EVERYTHING IS A PULL REQUEST.
containerd is an open-source project with an open design philosophy. This means
that the repository is the source of truth for EVERY aspect of the project,
including its philosophy, design, road map, and APIs. *If it's part of the
project, it's in the repo. If it's in the repo, it's part of the project.*
As a result, all decisions can be expressed as changes to the repository. An
implementation change is a change to the source code. An API change is a change
to the API specification. A philosophy change is a change to the philosophy
manifesto, and so on.
All decisions affecting containerd, big and small, follow the same 3 steps:
* Step 1: Open a pull request. Anyone can do this.
* Step 2: Discuss the pull request. Anyone can do this.
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
of the pull request and which areas of the project it affects.
"""
[Rules.DCO]
title = "Helping contributors with the DCO"
text = """
The [DCO or `Sign your work`](
https://github.com/containerd/containerd/blob/master/CONTRIBUTING.md#sign-your-work)
requirement is not intended as a roadblock or speed bump.
Some containerd contributors are not as familiar with `git`, or have used a web
based editor, and thus asking them to `git commit --amend -s` is not the best
way forward.
In this case, maintainers can update the commits based on clause (c) of the DCO.
The most trivial way for a contributor to allow the maintainer to do this, is to
add a DCO signature in a pull requests's comment, or a maintainer can simply
note that the change is sufficiently trivial that it does not substantially
change the existing contribution - i.e., a spelling change.
When you add someone's DCO, please also add your own to keep a log.
"""
[Rules."no direct push"]
title = "I'm a maintainer. Should I make pull requests too?"
text = """
Yes. Nobody should ever push to master directly. All changes should be
made through a pull request.
"""
[Rules.tsc]
title = "Conflict Resolution and technical disputes"
text = """
containerd defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters."
"""
[Rules.meta]
title = "How is this process changed?"
text = "Just like everything else: by making a pull request :)"
# Current project organization
[Org]
[Org.Maintainers]
people = [
"akihirosuda",
"crosbymichael",
"dqminh",
"dmcgowan",
"estesp",
"hqhq",
"jhowardmsft",
"mlaventure",
"stevvooe",
]
[People]
[people.akihirosuda]
Name = "Akihiro Suda"
Email = "suda.akihiro@lab.ntt.co.jp"
GitHub = "AkihiroSuda"
[People.crosbymichael]
Name = "Michael Crosby"
Email = "crosbymichael@gmail.com"
GitHub = "crosbymichael"
[People.dqminh]
Name = "Daniel, Dao Quang Minh"
Email = "dqminh89@gmail.com"
GitHub = "dqminh"
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@mcgstyle.net"
GitHub = "dmcgowan"
[People.estesp]
Name = "Phil Estes"
Email = "estesp@gmail.com"
GitHub = "estesp"
[People.hqhq]
Name = "Qiang Huang"
Email = "h.huangqiang@huawei.com"
GitHub = "hqhq"
[People.jhowardmsft]
Name = "John Howard"
Email = "jhoward@microsoft.com"
GitHub = "jhowardmsft"
[People.mlaventure]
Name = "Kenfe-Mickaël Laventure"
Email = "mickael.laventure@gmail.com"
GitHub = "mlaventure"
[People.stevvooe]
Name = "Stephen Day"
Email = "stephen.day@docker.com"
GitHub = "stevvooe"

197
vendor/github.com/containerd/containerd/Makefile generated vendored Normal file
View file

@ -0,0 +1,197 @@
# Root directory of the project (absolute path).
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
# Base path used to install.
DESTDIR=/usr/local
# Used to populate variables in version package.
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
ifneq "$(strip $(shell command -v go 2>/dev/null))" ""
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
else
GOOS ?= $$GOOS
GOARCH ?= $$GOARCH
endif
WHALE = "🇩"
ONI = "👹"
RELEASE=containerd-$(VERSION:v%=%).${GOOS}-${GOARCH}
PKG=github.com/containerd/containerd
# Project packages.
PACKAGES=$(shell go list ./... | grep -v /vendor/)
INTEGRATION_PACKAGE=${PKG}
TEST_REQUIRES_ROOT_PACKAGES=$(filter \
${PACKAGES}, \
$(shell \
for f in $$(git grep -l testutil.RequiresRoot | grep -v Makefile); do \
d="$$(dirname $$f)"; \
[ "$$d" = "." ] && echo "${PKG}" && continue; \
echo "${PKG}/$$d"; \
done | sort -u) \
)
# Project binaries.
COMMANDS=ctr containerd containerd-stress containerd-release
BINARIES=$(addprefix bin/,$(COMMANDS))
GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)'
SHIM_GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) -extldflags "-static"'
TESTFLAGS_RACE=
GO_GCFLAGS=
#Detect the target os
include Makefile.OS
#include platform specific makefile
include Makefile.$(target_os)
# Flags passed to `go test`
TESTFLAGS ?= -v $(TESTFLAGS_RACE)
TESTFLAGS_PARALLEL ?= 8
.PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release
.DEFAULT: default
all: binaries
check: proto-fmt ## run all linters
@echo "$(WHALE) $@"
gometalinter --config .gometalinter.json ./...
ci: check binaries checkprotos coverage coverage-integration ## to be used by the CI
AUTHORS: .mailmap .git/HEAD
git log --format='%aN <%aE>' | sort -fu > $@
generate: protos
@echo "$(WHALE) $@"
@PATH=${ROOTDIR}/bin:${PATH} go generate -x ${PACKAGES}
protos: bin/protoc-gen-gogoctrd ## generate protobuf
@echo "$(WHALE) $@"
@PATH=${ROOTDIR}/bin:${PATH} protobuild --quiet ${PACKAGES}
check-protos: protos ## check if protobufs needs to be generated again
@echo "$(WHALE) $@"
@test -z "$$(git status --short | grep ".pb.go" | tee /dev/stderr)" || \
((git diff | cat) && \
(echo "$(ONI) please run 'make protos' when making changes to proto files" && false))
check-api-descriptors: protos ## check that protobuf changes aren't present.
@echo "$(WHALE) $@"
@test -z "$$(git status --short | grep ".pb.txt" | tee /dev/stderr)" || \
((git diff $$(find . -name '*.pb.txt') | cat) && \
(echo "$(ONI) please run 'make protos' when making changes to proto files and check-in the generated descriptor file changes" && false))
proto-fmt: ## check format of proto files
@echo "$(WHALE) $@"
@test -z "$$(find . -path ./vendor -prune -o -path ./protobuf/google/rpc -prune -o -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \
(echo "$(ONI) please indent proto files with tabs only" && false)
@test -z "$$(find . -path ./vendor -prune -o -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \
(echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false)
dco: ## dco check
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false)
ifdef TRAVIS_COMMIT_RANGE
git-validation -q -run DCO,short-subject,dangling-whitespace
else
git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD
endif
build: ## build the go packages
@echo "$(WHALE) $@"
@go build ${EXTRA_FLAGS} ${GO_LDFLAGS} ${GO_GCFLAGS} ${PACKAGES}
test: ## run tests, except integration tests and tests that require root
@echo "$(WHALE) $@"
@go test ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
root-test: ## run tests, except integration tests
@echo "$(WHALE) $@"
@go test ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}) -test.root
integration: ## run integration tests
@echo "$(WHALE) $@"
@go test ${TESTFLAGS} -test.root -parallel ${TESTFLAGS_PARALLEL}
benchmark: ## run benchmarks tests
@echo "$(WHALE) $@"
@go test ${TESTFLAGS} -bench . -run Benchmark -test.root
FORCE:
# Build a binary from a cmd.
bin/%: cmd/% FORCE
@echo "$(WHALE) $@${BINARY_SUFFIX}"
@go build -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ${GO_GCFLAGS} ./$<
bin/containerd-shim: cmd/containerd-shim FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
@echo "$(WHALE) bin/containerd-shim"
@CGO_ENABLED=0 go build -o bin/containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim
binaries: $(BINARIES) ## build binaries
@echo "$(WHALE) $@"
release: $(BINARIES)
@echo "$(WHALE) $@"
@rm -rf releases/$(RELEASE) releases/$(RELEASE).tar.gz
@install -d releases/$(RELEASE)/bin
@install $(BINARIES) releases/$(RELEASE)/bin
@cd releases/$(RELEASE) && tar -czf ../$(RELEASE).tar.gz *
clean: ## clean up binaries
@echo "$(WHALE) $@"
@rm -f $(BINARIES)
install: ## install binaries
@echo "$(WHALE) $@ $(BINARIES)"
@mkdir -p $(DESTDIR)/bin
@install $(BINARIES) $(DESTDIR)/bin
uninstall:
@echo "$(WHALE) $@"
@rm -f $(addprefix $(DESTDIR)/bin/,$(notdir $(BINARIES)))
coverage: ## generate coverprofiles from the unit tests, except tests that require root
@echo "$(WHALE) $@"
@rm -f coverage.txt
@go test -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) 2> /dev/null
@( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}); do \
go test ${TESTFLAGS} \
-cover \
-coverprofile=profile.out \
-covermode=atomic $$pkg || exit; \
if [ -f profile.out ]; then \
cat profile.out >> coverage.txt; \
rm profile.out; \
fi; \
done )
root-coverage: ## generate coverage profiles for unit tests that require root
@echo "$(WHALE) $@"
@go test -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}) 2> /dev/null
@( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}); do \
go test ${TESTFLAGS} \
-cover \
-coverprofile=profile.out \
-covermode=atomic $$pkg -test.root || exit; \
if [ -f profile.out ]; then \
cat profile.out >> coverage.txt; \
rm profile.out; \
fi; \
done )
vendor:
@echo "$(WHALE) $@"
@vndr
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort

16
vendor/github.com/containerd/containerd/Makefile.OS generated vendored Normal file
View file

@ -0,0 +1,16 @@
#Detect the OS, and return installos value same as GOOS
target_os =
ifeq ($(OS),Windows_NT)
target_os = windows
else
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Linux)
target_os = linux
endif
ifeq ($(UNAME_S),Darwin)
target_os = darwin
endif
ifeq ($(UNAME_S),FreeBSD)
target_os = freebsd
endif
endif

View file

@ -0,0 +1,7 @@
#darwin specific settings
COMMANDS += containerd-shim
# amd64 supports go test -race
ifeq ($(GOARCH),amd64)
TESTFLAGS_RACE= -race
endif

View file

@ -0,0 +1,7 @@
#freebsd specific settings
COMMANDS += containerd-shim
# amd64 supports go test -race
ifeq ($(GOARCH),amd64)
TESTFLAGS_RACE= -race
endif

12
vendor/github.com/containerd/containerd/Makefile.linux generated vendored Normal file
View file

@ -0,0 +1,12 @@
#linux specific settings
COMMANDS += containerd-shim
# check GOOS for cross compile builds
ifeq ($(GOOS),linux)
GO_GCFLAGS= -buildmode=pie
endif
# amd64 supports go test -race
ifeq ($(GOARCH),amd64)
TESTFLAGS_RACE= -race
endif

View file

@ -0,0 +1,10 @@
#Windows specific settings.
WHALE = "+"
ONI = "-"
BINARY_SUFFIX=".exe"
# amd64 supports go test -race
ifeq ($(GOARCH),amd64)
TESTFLAGS_RACE= -race
endif

View file

@ -1,11 +1,8 @@
Docker
Copyright 2012-2017 Docker, Inc.
Copyright 2012-2015 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:

View file

@ -0,0 +1,67 @@
version = "unstable"
generator = "gogoctrd"
plugins = ["grpc", "fieldpath"]
# Control protoc include paths. Below are usually some good defaults, but feel
# free to try it without them if it works for your project.
[includes]
# Include paths that will be added before all others. Typically, you want to
# treat the root of the project as an include, but this may not be necessary.
before = ["./protobuf"]
# Paths that should be treated as include roots in relation to the vendor
# directory. These will be calculated with the vendor directory nearest the
# target package.
packages = ["github.com/gogo/protobuf"]
# Paths that will be added untouched to the end of the includes. We use
# `/usr/local/include` to pickup the common install location of protobuf.
# This is the default.
after = ["/usr/local/include"]
# This section maps protobuf imports to Go packages. These will become
# `-M` directives in the call to the go protobuf generator.
[packages]
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/empty.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/duration.proto" = "github.com/gogo/protobuf/types"
"google/rpc/status.proto" = "github.com/containerd/containerd/protobuf/google/rpc"
[[overrides]]
prefixes = ["github.com/containerd/containerd/api/events"]
plugins = ["fieldpath"] # disable grpc for this package
[[overrides]]
# enable ttrpc and disable fieldpath and grpc for the shim
prefixes = ["github.com/containerd/containerd/linux/shim/v1"]
plugins = ["ttrpc"]
# Aggregrate the API descriptors to lock down API changes.
[[descriptors]]
prefix = "github.com/containerd/containerd/api"
target = "api/next.pb.txt"
ignore_files = [
"google/protobuf/descriptor.proto",
"gogoproto/gogo.proto"
]
# Lock down runc config
[[descriptors]]
prefix = "github.com/containerd/containerd/linux/runctypes"
target = "linux/runctypes/next.pb.txt"
ignore_files = [
"google/protobuf/descriptor.proto",
"gogoproto/gogo.proto"
]
[[descriptors]]
prefix = "github.com/containerd/containerd/windows/hcsshimtypes"
target = "windows/hcsshimtypes/next.pb.txt"
ignore_files = [
"google/protobuf/descriptor.proto",
"gogoproto/gogo.proto"
]

215
vendor/github.com/containerd/containerd/README.md generated vendored Normal file
View file

@ -0,0 +1,215 @@
![banner](/docs/images/containerd-dark.png?raw=true)
[![GoDoc](https://godoc.org/github.com/containerd/containerd?status.svg)](https://godoc.org/github.com/containerd/containerd)
[![Build Status](https://travis-ci.org/containerd/containerd.svg?branch=master)](https://travis-ci.org/containerd/containerd)
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd)
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc.
containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users.
![architecture](design/architecture.png)
## Getting Started
See our documentation on [containerd.io](containerd.io):
* [for ops and admins](docs/ops.md)
* [namespaces](docs/namespaces.md)
* [client options](docs/client-opts.md)
See how to build containerd from source at [BUILDING](BUILDING.md).
If you are interested in trying out containerd see our example at [Getting Started](docs/getting-started.md).
## Runtime Requirements
Runtime requirements for containerd are very minimal. Most interactions with
the Linux and Windows container feature sets are handled via [runc](https://github.com/opencontainers/runc) and/or
OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft). The current required version of `runc` is always listed in [RUNC.md](/RUNC.md).
There are specific features
used by containerd core code and snapshotters that will require a minimum kernel
version on Linux. With the understood caveat of distro kernel versioning, a
reasonable starting point for Linux is a minimum 4.x kernel version.
The overlay filesystem snapshotter, used by default, uses features that were
finalized in the 4.x kernel series. If you choose to use btrfs, there may
be more flexibility in kernel version (minimum recommended is 3.18), but will
require the btrfs kernel module and btrfs tools to be installed on your Linux
distribution.
To use Linux checkpoint and restore features, you will need `criu` installed on
your system. See more details in [Checkpoint and Restore](#checkpoint-and-restore).
Build requirements for developers are listed in [BUILDING](BUILDING.md).
## Features
### Client
containerd offers a full client package to help you integrate containerd into your platform.
```go
import (
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
)
func main() {
client, err := containerd.New("/run/containerd/containerd.sock")
defer client.Close()
}
```
### Namespaces
Namespaces allow multiple consumers to use the same containerd without conflicting with each other. It has the benefit of sharing content but still having separation with containers and images.
To set a namespace for requests to the API:
```go
context = context.Background()
// create a context for docker
docker = namespaces.WithNamespace(context, "docker")
containerd, err := client.NewContainer(docker, "id")
```
To set a default namespace on the client:
```go
client, err := containerd.New(address, containerd.WithDefaultNamespace("docker"))
```
### Distribution
```go
// pull an image
image, err := client.Pull(context, "docker.io/library/redis:latest")
// push an image
err := client.Push(context, "docker.io/library/redis:latest", image.Target())
```
### Containers
In containerd, a container is a metadata object. Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container.
```go
redis, err := client.NewContainer(context, "redis-master")
defer redis.Delete(context)
```
### OCI Runtime Specification
containerd fully supports the OCI runtime specification for running containers. We have built in functions to help you generate runtime specifications based on images as well as custom parameters.
You can specify options when creating a container about how to modify the specification.
```go
redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpec(oci.WithImageConfig(image)))
```
### Root Filesystems
containerd allows you to use overlay or snapshot filesystems with your containers. It comes with builtin support for overlayfs and btrfs.
```go
// pull an image and unpack it into the configured snapshotter
image, err := client.Pull(context, "docker.io/library/redis:latest", containerd.WithPullUnpack)
// allocate a new RW root filesystem for a container based on the image
redis, err := client.NewContainer(context, "redis-master",
containerd.WithNewSnapshot("redis-rootfs", image),
containerd.WithNewSpec(oci.WithImageConfig(image)),
)
// use a readonly filesystem with multiple containers
for i := 0; i < 10; i++ {
id := fmt.Sprintf("id-%s", i)
container, err := client.NewContainer(ctx, id,
containerd.WithNewSnapshotView(id, image),
containerd.WithNewSpec(oci.WithImageConfig(image)),
)
}
```
### Tasks
Taking a container object and turning it into a runnable process on a system is done by creating a new `Task` from the container. A task represents the runnable object within containerd.
```go
// create a new task
task, err := redis.NewTask(context, cio.Stdio)
defer task.Delete(context)
// the task is now running and has a pid that can be use to setup networking
// or other runtime settings outside of containerd
pid := task.Pid()
// start the redis-server process inside the container
err := task.Start(context)
// wait for the task to exit and get the exit status
status, err := task.Wait(context)
```
### Checkpoint and Restore
If you have [criu](https://criu.org/Main_Page) installed on your machine you can checkpoint and restore containers and their tasks. This allow you to clone and/or live migrate containers to other machines.
```go
// checkpoint the task then push it to a registry
checkpoint, err := task.Checkpoint(context, containerd.WithExit)
err := client.Push(context, "myregistry/checkpoints/redis:master", checkpoint)
// on a new machine pull the checkpoint and restore the redis container
image, err := client.Pull(context, "myregistry/checkpoints/redis:master")
checkpoint := image.Target()
redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
defer container.Delete(context)
task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
defer task.Delete(context)
err := task.Start(context)
```
### Releases and API Stability
Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
of containerd components.
### Development reports.
Weekly summary on the progress and what is being worked on.
https://github.com/containerd/containerd/tree/master/reports
### Communication
For async communication and long running discussions please use issues and pull requests on the github repo.
This will be the best place to discuss design and implementation.
For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
**Slack:** https://dockr.ly/community
### Reporting security issues
__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
## Licenses
The containerd codebase is released under the [Apache 2.0 license](LICENSE.code).
The README.md file, and files in the "docs" folder are licensed under the
Creative Commons Attribution 4.0 International License under the terms and
conditions set forth in the file "[LICENSE.docs](LICENSE.docs)". You may obtain a duplicate
copy of the same license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.

272
vendor/github.com/containerd/containerd/RELEASES.md generated vendored Normal file
View file

@ -0,0 +1,272 @@
# Versioning and Release
This document details the versioning and release plan for containerd. Stability
is a top goal for this project and we hope that this document and the processes
it entails will help to achieve that. It covers the release process, versioning
numbering, backporting, API stability and support horizons.
If you rely on containerd, it would be good to spend time understanding the
areas of the API that are and are not supported and how they impact your
project in the future.
This document will be considered a living document. Supported timelines,
backport targets and API stability guarantees will be updated here as they
change.
If there is something that you require or this document leaves out, please
reach out by [filing an issue](https://github.com/containerd/containerd/issues).
## Releases
Releases of containerd will be versioned using dotted triples, similar to
[Semantic Version](http://semver.org/). For the purposes of this document, we
will refer to the respective components of this triple as
`<major>.<minor>.<patch>`. The version number may have additional information,
such as alpha, beta and release candidate qualifications. Such releases will be
considered "pre-releases".
### Major and Minor Releases
Major and minor releases of containerd will be made from master. Releases of
containerd will be marked with GPG signed tags and announced at
https://github.com/containerd/containerd/releases. The tag will be of the
format `v<major>.<minor>.<patch>` and should be made with the command `git tag
-s v<major>.<minor>.<patch>`.
After a minor release, a branch will be created, with the format
`release/<major>.<minor>` from the minor tag. All further patch releases will
be done from that branch. For example, once we release `v1.0.0`, a branch
`release/1.0` will be created from that tag. All future patch releases will be
done against that branch.
### Pre-releases
Pre-releases, such as alphas, betas and release candidates will be conducted
from their source branch. For major and minor releases, these releases will be
done from master. For patch releases, these pre-releases should be done within
the corresponding release branch.
While pre-releases are done to assist in the stabilization process, no
guarantees are provided.
### Upgrade Path
The upgrade path for containerd is such that the 0.0.x patch releases are
always backward compatible with its major and minor version. Minor (0.x.0)
version will always be compatible with the previous minor release. i.e. 1.2.0
is backwards compatible with 1.1.0 and 1.1.0 is compatible with 1.0.0. There is
no compatibility guarantees for upgrades that span multiple, _minor_ releases.
For example, 1.0.0 to 1.2.0 is not supported. One should first upgrade to 1.1,
then 1.2.
There are no compatibility guarantees with upgrades to _major_ versions. For
example, upgrading from 1.0.0 to 2.0.0 may require resources to migrated or
integrations to change. Each major version will be supported for at least 1
year with bug fixes and security patches.
### Next Release
The activity for the next release will be tracked in the
[milestones](https://github.com/containerd/containerd/milestones). If your
issue or PR is not present in a milestone, please reach out to the maintainers
to create the milestone or add an issue or PR to an existing milestone.
### Support Horizon
Support horizons will be defined corresponding to a release branch, identified
by `<major>.<minor>`. Releases branches will be in one of several states:
- __*Next*__: The next planned release branch.
- __*Active*__: The release is currently supported and accepting patches.
- __*End of Life*__: The release branch is no longer support and no new patches will be accepted.
Releases will be supported up to one year after a _minor_ release. This means that
we will accept bug reports and backports to release branches until the end of
life date. If no new _minor_ release has been made, that release will be
considered supported until the next _minor_ is released or one year, whichever
is longer.
The current state is available in the following table:
| Release | Status | Start | End of Life |
|---------|-------------|------------------|-------------------|
| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - |
| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - |
| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 |
| [1.0](https://github.com/containerd/contaienrd/releases/tag/v1.0.0) | Active | December 5, 2017 | max(December 5, 2018, release of 1.1.0) |
| [1.1](https://github.com/containerd/containerd/milestone/15) | Next | TBD | max(TBD+1 year, release of 1.2.0) |
Note that branches and release from before 1.0 may not follow these rules.
This table should be updated as part of the release preparation process.
### Backporting
Backports in containerd are community driven. As maintainers, we'll try to
ensure that sensible bugfixes make it into _active_ release, but our main focus
will be features for the next _minor_ or _major_ release. For the most part,
this process is straightforward and we are here to help make it as smooth as
possible.
If there are important fixes that need to be backported, please let use know in
one of three ways:
1. Open an issue.
2. Open a PR with cherry-picked change from master.
3. Open a PR with a ported fix.
__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
Remember that backported PRs must follow the versioning guidelines from this document.
Any release that is "active" can accept backports. Opening a backport PR is
fairly straightforward. The steps differ depending on whether you are pulling
a fix from master or need to draft a new commit specific to a particular
branch.
To cherry pick a straightforward commit from master, simply use the cherry pick
process:
1. Pick the branch to which you want backported, usually in the format
`release/<minor>.<major>`. The following will create a branch you can
use to open a PR:
```console
$ git checkout -b my-backport-branch release/<major>.<minor>.
```
2. Find the commit you want backported.
3. Apply it to the release branch:
```console
$ git cherry-pick -xsS <commit>
```
4. Push the branch and open up a PR against the _release branch_:
```
$ git push -u stevvooe my-backport-branch
```
Make sure to replace `stevvooe` with whatever fork you are using to open
the PR. When you open the PR, make sure to switch `master` with whatever
release branch you are targeting with the fix.
If there is no existing fix in master, you should first fix the bug in master,
or ask us a maintainer or contributor to do it via an issue. Once that PR is
completed, open a PR using the process above.
Only when the bug is not seen in master and must be made for the specific
release branch should you open a PR with new code.
## Public API Stability
The following table provides an overview of the components covered by
containerd versions:
| Component | Status | Stablized Version | Links |
|---------------|----------|-------------------|---------------|
| GRPC API | Beta | 1.0 | [api/](api) |
| Metrics API | Beta | 1.0 | -
| Go client API | Unstable | 1.1 tentative | [godoc](https://godoc.org/github.com/containerd/containerd) |
| `ctr` tool | Unstable | Out of scope | - |
From the version stated in the above table, that component must adhere to the
stability constraints expected in release versions.
Unless explicitly stated here, components that are called out as unstable or
not covered may change in a future minor version. Breaking changes to
"unstable" components will be avoided in patch versions.
### GRPC API
The primary product of containerd is the GRPC API. As of the 1.0.0 release, the
GRPC API will not have any backwards incompatible changes without a _major_
version jump.
To ensure compatibility, we have collected the entire GRPC API symbol set into
a single file. At each _minor_ release of containerd, we will move the current
`next.pb.txt` file to a file named for the minor version, such as `1.0.pb.txt`,
enumerating the support services and messages. See [api/](api) for details.
Note that new services may be added in _minor_ releases. New service methods
and new fields on messages may be added if they are optional.
### Metrics API
The metrics API that outputs prometheus style metrics will be versioned independently,
prefixed with the API version. i.e. `/v1/metrics`, `/v2/metrics`.
The metrics API version will be incremented when breaking changes are made to the prometheus
output. New metrics can be added to the output in a backwards compatible manner without
bumping the API version.
### Plugins API
containerd is based on a modular design where plugins are implemented to provide the core functionality.
Plugins implemented in tree are supported by the containerd community unless explicitly specified as non-stable.
Out of tree plugins are not supported by the containerd maintainers.
Currently, the Windows runtime and snapshot plugins are not stable and not supported.
Please refer to the github milestones for Windows support in a future release.
#### Error Codes
Error codes will not change in a patch release, unless a missing error code
causes a blocking bug. Error codes of type "unknown" may change to more
specific types in the future. Any error code that is not "unknown" that is
currently returned by a service will not change without a _major_ release or a
new version of the service.
If you find that an error code that is required by your application is not
well-documented in the protobuf service description or tested explicitly,
please file and issue and we will clarify.
#### Opaque Fields
Unless explicitly stated, the formats of certain fields may not be covered by
this guarantee and should be treated opaquely. For example, don't rely on the
format details of a URL field unless we explicitly say that the field will
follow that format.
### Go client API
The Go client API, documented in
[godoc](https://godoc.org/github.com/containerd/containerd), is currently
considered unstable. It is recommended to vendor the necessary components to
stabilize your project build. Note that because the Go API interfaces with the
GRPC API, clients written against a 1.0 Go API should remain compatible with
future 1.x series releases.
We intend to stabilize the API in a future release when more integrations have
been carried out.
Any changes to the API should be detectable at compile time, so upgrading will
be a matter of fixing compilation errors and moving from there.
### `ctr` tool
The `ctr` tool provides the ability to introspect and understand the containerd
API. At this time, it is not considered a primary offering of the project. It
may be completely refactored or have breaking changes in _minor_ releases.
We will try not break the tool in _patch_ releases.
### Not Covered
As a general rule, anything not mentioned in this document is not covered by
the stability guidelines and may change in any release. Explicitly, this
pertains to this non-exhaustive list of components:
- File System layout
- Storage formats
- Snapshot formats
Between upgrades of subsequent, _minor_ versions, we may migrate these formats.
Any outside processes relying on details of these file system layouts may break
in that process. Container root file systems will be maintained on upgrade.
### Exceptions
We may make exceptions in the interest of __security patches__. If a break is
required, it will be communicated clearly and the solution will be considered
against total impact.

3
vendor/github.com/containerd/containerd/ROADMAP.md generated vendored Normal file
View file

@ -0,0 +1,3 @@
# containerd Roadmap
Please review the milestones on [github](https://github.com/containerd/containerd/milestones) for the updated roadmap and release information.

27
vendor/github.com/containerd/containerd/RUNC.md generated vendored Normal file
View file

@ -0,0 +1,27 @@
containerd is built with OCI support and with support for advanced features provided by [runc](https://github.com/opencontainers/runc).
We depend on a specific `runc` version when dealing with advanced features. You should have a specific runc build for development. The current supported runc commit is:
RUNC_COMMIT = 9f9c96235cc97674e935002fc3d78361b696a69e
For more information on how to clone and build runc see the runc Building [documentation](https://github.com/opencontainers/runc#building).
Note: before building you may need to install additional support, which will vary by platform. For example, you may need to install `libseccomp` and `libapparmor` e.g. `libseccomp-dev` and `libapparmor-dev` for Ubuntu.
## building
From within your `opencontainers/runc` repository run:
### apparmor
```bash
make BUILDTAGS='seccomp apparmor' && sudo make install
```
### selinux
```bash
make BUILDTAGS='seccomp selinux' && sudo make install
```
After an official runc release we will start pinning containerd support to a specific version but various development and testing features may require a newer runc version than the latest release. If you encounter any runtime errors, please make sure your runc is in sync with the commit/tag provided in this document.

57
vendor/github.com/containerd/containerd/SCOPE.md generated vendored Normal file
View file

@ -0,0 +1,57 @@
# Scope and Principles
Having a clearly defined scope of a project is important for ensuring consistency and focus.
These following criteria will be used when reviewing pull requests, features, and changes for the project before being accepted.
### Components
Components should not have tight dependencies on each other so that they are able to be used independently.
The APIs for images and containers should be designed in a way that when used together the components have a natural flow but still be useful independently.
An example for this design can be seen with the overlay filesystems and the container execution layer.
The execution layer and overlay filesystems can be used independently but if you were to use both, they share a common `Mount` struct that the filesystems produce and the execution layer consumes.
### Primitives
containerd should expose primitives to solve problems instead of building high level abstractions in the API.
A common example of this is how build would be implemented.
Instead of having a build API in containerd we should expose the lower level primitives that allow things required in build to work.
Breaking up the filesystem APIs to allow snapshots, copy functionality, and mounts allow people implementing build at the higher levels with more flexibility.
### Extensibility and Defaults
For the various components in containerd there should be defined extension points where implementations can be swapped for alternatives.
The best example of this is that containerd will use `runc` from OCI as the default runtime in the execution layer but other runtimes conforming to the OCI Runtime specification can be easily added to containerd.
containerd will come with a default implementation for the various components.
These defaults will be chosen by the maintainers of the project and should not change unless better tech for that component comes out.
Additional implementations will not be accepted into the core repository and should be developed in a separate repository not maintained by the containerd maintainers.
## Scope
The following table specifies the various components of containerd and general features of container runtimes.
The table specifies whether or not the feature/component is in or out of scope.
| Name | Description | In/Out | Reason |
|------------------------------|--------------------------------------------------------------------------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| execution | Provide an extensible execution layer for executing a container | in | Create,start, stop pause, resume exec, signal, delete |
| cow filesystem | Built in functionality for overlay, aufs, and other copy on write filesystems for containers | in | |
| distribution | Having the ability to push and pull images as well as operations on images as a first class API object | in | containerd will fully support the management and retrieval of images |
| metrics | container-level metrics, cgroup stats, and OOM events | in |
| networking | creation and management of network interfaces | out | Networking will be handled and provided to containerd via higher level systems. |
| build | Building images as a first class API | out | Build is a higher level tooling feature and can be implemented in many different ways on top of containerd |
| volumes | Volume management for external data | out | The API supports mounts, binds, etc where all volumes type systems can be built on top of containerd. |
| logging | Persisting container logs | out | Logging can be build on top of containerd because the containers STDIO will be provided to the clients and they can persist any way they see fit. There is no io copying of container STDIO in containerd. |
containerd is scoped to a single host and makes assumptions based on that fact.
It can be used to build things like a node agent that launches containers but does not have any concepts of a distributed system.
containerd is designed to be embedded into a larger system, hence it only includes a barebone CLI (`ctr`) specifically for development and debugging purpose, with no mandate to be human-friendly, and no guarantee of interface stability over time.
### How is the scope changed?
The scope of this project is a whitelist.
If it's not mentioned as being in scope, it is out of scope.
For the scope of this project to change it requires a 100% vote from all maintainers of the project.

View file

@ -0,0 +1,108 @@
package containerd
import (
"fmt"
"testing"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/oci"
)
func BenchmarkContainerCreate(b *testing.B) {
client, err := newClient(b, address)
if err != nil {
b.Fatal(err)
}
defer client.Close()
ctx, cancel := testContext()
defer cancel()
image, err := client.GetImage(ctx, testImage)
if err != nil {
b.Error(err)
return
}
spec, err := oci.GenerateSpec(ctx, client, &containers.Container{ID: b.Name()}, oci.WithImageConfig(image), withTrue())
if err != nil {
b.Error(err)
return
}
var containers []Container
defer func() {
for _, c := range containers {
if err := c.Delete(ctx, WithSnapshotCleanup); err != nil {
b.Error(err)
}
}
}()
// reset the timer before creating containers
b.ResetTimer()
for i := 0; i < b.N; i++ {
id := fmt.Sprintf("%s-%d", b.Name(), i)
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewSnapshot(id, image))
if err != nil {
b.Error(err)
return
}
containers = append(containers, container)
}
b.StopTimer()
}
func BenchmarkContainerStart(b *testing.B) {
client, err := newClient(b, address)
if err != nil {
b.Fatal(err)
}
defer client.Close()
ctx, cancel := testContext()
defer cancel()
image, err := client.GetImage(ctx, testImage)
if err != nil {
b.Error(err)
return
}
spec, err := oci.GenerateSpec(ctx, client, &containers.Container{ID: b.Name()}, oci.WithImageConfig(image), withTrue())
if err != nil {
b.Error(err)
return
}
var containers []Container
defer func() {
for _, c := range containers {
if err := c.Delete(ctx, WithSnapshotCleanup); err != nil {
b.Error(err)
}
}
}()
for i := 0; i < b.N; i++ {
id := fmt.Sprintf("%s-%d", b.Name(), i)
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewSnapshot(id, image))
if err != nil {
b.Error(err)
return
}
containers = append(containers, container)
}
// reset the timer before starting tasks
b.ResetTimer()
for _, c := range containers {
task, err := c.NewTask(ctx, empty())
if err != nil {
b.Error(err)
return
}
defer task.Delete(ctx)
if err := task.Start(ctx); err != nil {
b.Error(err)
return
}
}
b.StopTimer()
}

598
vendor/github.com/containerd/containerd/client.go generated vendored Normal file
View file

@ -0,0 +1,598 @@
package containerd
import (
"context"
"fmt"
"io"
"net/http"
"runtime"
"strconv"
"strings"
"sync"
"time"
containersapi "github.com/containerd/containerd/api/services/containers/v1"
contentapi "github.com/containerd/containerd/api/services/content/v1"
diffapi "github.com/containerd/containerd/api/services/diff/v1"
eventsapi "github.com/containerd/containerd/api/services/events/v1"
imagesapi "github.com/containerd/containerd/api/services/images/v1"
introspectionapi "github.com/containerd/containerd/api/services/introspection/v1"
namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1"
snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1"
"github.com/containerd/containerd/api/services/tasks/v1"
versionservice "github.com/containerd/containerd/api/services/version/v1"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/dialer"
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/remotes/docker/schema1"
"github.com/containerd/containerd/snapshots"
"github.com/containerd/typeurl"
ptypes "github.com/gogo/protobuf/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
)
func init() {
const prefix = "types.containerd.io"
// register TypeUrls for commonly marshaled external types
major := strconv.Itoa(specs.VersionMajor)
typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
typeurl.Register(&specs.Process{}, prefix, "opencontainers/runtime-spec", major, "Process")
typeurl.Register(&specs.LinuxResources{}, prefix, "opencontainers/runtime-spec", major, "LinuxResources")
typeurl.Register(&specs.WindowsResources{}, prefix, "opencontainers/runtime-spec", major, "WindowsResources")
}
// New returns a new containerd client that is connected to the containerd
// instance provided by address
func New(address string, opts ...ClientOpt) (*Client, error) {
var copts clientOpts
for _, o := range opts {
if err := o(&copts); err != nil {
return nil, err
}
}
gopts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.WithTimeout(60 * time.Second),
grpc.FailOnNonTempDialError(true),
grpc.WithBackoffMaxDelay(3 * time.Second),
grpc.WithDialer(dialer.Dialer),
}
if len(copts.dialOptions) > 0 {
gopts = copts.dialOptions
}
if copts.defaultns != "" {
unary, stream := newNSInterceptors(copts.defaultns)
gopts = append(gopts,
grpc.WithUnaryInterceptor(unary),
grpc.WithStreamInterceptor(stream),
)
}
conn, err := grpc.Dial(dialer.DialAddress(address), gopts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", address)
}
return NewWithConn(conn, opts...)
}
// NewWithConn returns a new containerd client that is connected to the containerd
// instance provided by the connection
func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) {
return &Client{
conn: conn,
runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
}, nil
}
// Client is the client to interact with containerd and its various services
// using a uniform interface
type Client struct {
conn *grpc.ClientConn
runtime string
}
// IsServing returns true if the client can successfully connect to the
// containerd daemon and the healthcheck service returns the SERVING
// response.
// This call will block if a transient error is encountered during
// connection. A timeout can be set in the context to ensure it returns
// early.
func (c *Client) IsServing(ctx context.Context) (bool, error) {
r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.FailFast(false))
if err != nil {
return false, err
}
return r.Status == grpc_health_v1.HealthCheckResponse_SERVING, nil
}
// Containers returns all containers created in containerd
func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container, error) {
r, err := c.ContainerService().List(ctx, filters...)
if err != nil {
return nil, err
}
var out []Container
for _, container := range r {
out = append(out, containerFromRecord(c, container))
}
return out, nil
}
// NewContainer will create a new container in container with the provided id
// the id must be unique within the namespace
func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
ctx, done, err := c.WithLease(ctx)
if err != nil {
return nil, err
}
defer done()
container := containers.Container{
ID: id,
Runtime: containers.RuntimeInfo{
Name: c.runtime,
},
}
for _, o := range opts {
if err := o(ctx, c, &container); err != nil {
return nil, err
}
}
r, err := c.ContainerService().Create(ctx, container)
if err != nil {
return nil, err
}
return containerFromRecord(c, r), nil
}
// LoadContainer loads an existing container from metadata
func (c *Client) LoadContainer(ctx context.Context, id string) (Container, error) {
r, err := c.ContainerService().Get(ctx, id)
if err != nil {
return nil, err
}
return containerFromRecord(c, r), nil
}
// RemoteContext is used to configure object resolutions and transfers with
// remote content stores and image providers.
type RemoteContext struct {
// Resolver is used to resolve names to objects, fetchers, and pushers.
// If no resolver is provided, defaults to Docker registry resolver.
Resolver remotes.Resolver
// Unpack is done after an image is pulled to extract into a snapshotter.
// If an image is not unpacked on pull, it can be unpacked any time
// afterwards. Unpacking is required to run an image.
Unpack bool
// Snapshotter used for unpacking
Snapshotter string
// Labels to be applied to the created image
Labels map[string]string
// BaseHandlers are a set of handlers which get are called on dispatch.
// These handlers always get called before any operation specific
// handlers.
BaseHandlers []images.Handler
// ConvertSchema1 is whether to convert Docker registry schema 1
// manifests. If this option is false then any image which resolves
// to schema 1 will return an error since schema 1 is not supported.
ConvertSchema1 bool
}
func defaultRemoteContext() *RemoteContext {
return &RemoteContext{
Resolver: docker.NewResolver(docker.ResolverOptions{
Client: http.DefaultClient,
}),
Snapshotter: DefaultSnapshotter,
}
}
// Pull downloads the provided content into containerd's content store
func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image, error) {
pullCtx := defaultRemoteContext()
for _, o := range opts {
if err := o(c, pullCtx); err != nil {
return nil, err
}
}
store := c.ContentStore()
ctx, done, err := c.WithLease(ctx)
if err != nil {
return nil, err
}
defer done()
name, desc, err := pullCtx.Resolver.Resolve(ctx, ref)
if err != nil {
return nil, err
}
fetcher, err := pullCtx.Resolver.Fetcher(ctx, name)
if err != nil {
return nil, err
}
var (
schema1Converter *schema1.Converter
handler images.Handler
)
if desc.MediaType == images.MediaTypeDockerSchema1Manifest && pullCtx.ConvertSchema1 {
schema1Converter = schema1.NewConverter(store, fetcher)
handler = images.Handlers(append(pullCtx.BaseHandlers, schema1Converter)...)
} else {
handler = images.Handlers(append(pullCtx.BaseHandlers,
remotes.FetchHandler(store, fetcher),
images.ChildrenHandler(store, platforms.Default()))...,
)
}
if err := images.Dispatch(ctx, handler, desc); err != nil {
return nil, err
}
if schema1Converter != nil {
desc, err = schema1Converter.Convert(ctx)
if err != nil {
return nil, err
}
}
imgrec := images.Image{
Name: name,
Target: desc,
Labels: pullCtx.Labels,
}
is := c.ImageService()
if created, err := is.Create(ctx, imgrec); err != nil {
if !errdefs.IsAlreadyExists(err) {
return nil, err
}
updated, err := is.Update(ctx, imgrec)
if err != nil {
return nil, err
}
imgrec = updated
} else {
imgrec = created
}
img := &image{
client: c,
i: imgrec,
}
if pullCtx.Unpack {
if err := img.Unpack(ctx, pullCtx.Snapshotter); err != nil {
return nil, err
}
}
return img, nil
}
// Push uploads the provided content to a remote resource
func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, opts ...RemoteOpt) error {
pushCtx := defaultRemoteContext()
for _, o := range opts {
if err := o(c, pushCtx); err != nil {
return err
}
}
pusher, err := pushCtx.Resolver.Pusher(ctx, ref)
if err != nil {
return err
}
var m sync.Mutex
manifestStack := []ocispec.Descriptor{}
filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
m.Lock()
manifestStack = append(manifestStack, desc)
m.Unlock()
return nil, images.ErrStopHandler
default:
return nil, nil
}
})
cs := c.ContentStore()
pushHandler := remotes.PushHandler(cs, pusher)
handlers := append(pushCtx.BaseHandlers,
images.ChildrenHandler(cs, platforms.Default()),
filterHandler,
pushHandler,
)
if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil {
return err
}
// Iterate in reverse order as seen, parent always uploaded after child
for i := len(manifestStack) - 1; i >= 0; i-- {
_, err := pushHandler(ctx, manifestStack[i])
if err != nil {
// TODO(estesp): until we have a more complete method for index push, we need to report
// missing dependencies in an index/manifest list by sensing the "400 Bad Request"
// as a marker for this problem
if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex ||
manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) &&
errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") {
return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry")
}
return err
}
}
return nil
}
// GetImage returns an existing image
func (c *Client) GetImage(ctx context.Context, ref string) (Image, error) {
i, err := c.ImageService().Get(ctx, ref)
if err != nil {
return nil, err
}
return &image{
client: c,
i: i,
}, nil
}
// ListImages returns all existing images
func (c *Client) ListImages(ctx context.Context, filters ...string) ([]Image, error) {
imgs, err := c.ImageService().List(ctx, filters...)
if err != nil {
return nil, err
}
images := make([]Image, len(imgs))
for i, img := range imgs {
images[i] = &image{
client: c,
i: img,
}
}
return images, nil
}
// Subscribe to events that match one or more of the provided filters.
//
// Callers should listen on both the envelope channel and errs channel. If the
// errs channel returns nil or an error, the subscriber should terminate.
//
// To cancel shutdown reciept of events, cancel the provided context. The errs
// channel will be closed and return a nil error.
func (c *Client) Subscribe(ctx context.Context, filters ...string) (ch <-chan *eventsapi.Envelope, errs <-chan error) {
var (
evq = make(chan *eventsapi.Envelope)
errq = make(chan error, 1)
)
errs = errq
ch = evq
session, err := c.EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{
Filters: filters,
})
if err != nil {
errq <- err
close(errq)
return
}
go func() {
defer close(errq)
for {
ev, err := session.Recv()
if err != nil {
errq <- err
return
}
select {
case evq <- ev:
case <-ctx.Done():
return
}
}
}()
return ch, errs
}
// Close closes the clients connection to containerd
func (c *Client) Close() error {
return c.conn.Close()
}
// NamespaceService returns the underlying Namespaces Store
func (c *Client) NamespaceService() namespaces.Store {
return NewNamespaceStoreFromClient(namespacesapi.NewNamespacesClient(c.conn))
}
// ContainerService returns the underlying container Store
func (c *Client) ContainerService() containers.Store {
return NewRemoteContainerStore(containersapi.NewContainersClient(c.conn))
}
// ContentStore returns the underlying content Store
func (c *Client) ContentStore() content.Store {
return NewContentStoreFromClient(contentapi.NewContentClient(c.conn))
}
// SnapshotService returns the underlying snapshotter for the provided snapshotter name
func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter {
return NewSnapshotterFromClient(snapshotsapi.NewSnapshotsClient(c.conn), snapshotterName)
}
// TaskService returns the underlying TasksClient
func (c *Client) TaskService() tasks.TasksClient {
return tasks.NewTasksClient(c.conn)
}
// ImageService returns the underlying image Store
func (c *Client) ImageService() images.Store {
return NewImageStoreFromClient(imagesapi.NewImagesClient(c.conn))
}
// DiffService returns the underlying Differ
func (c *Client) DiffService() diff.Differ {
return NewDiffServiceFromClient(diffapi.NewDiffClient(c.conn))
}
// IntrospectionService returns the underlying Introspection Client
func (c *Client) IntrospectionService() introspectionapi.IntrospectionClient {
return introspectionapi.NewIntrospectionClient(c.conn)
}
// HealthService returns the underlying GRPC HealthClient
func (c *Client) HealthService() grpc_health_v1.HealthClient {
return grpc_health_v1.NewHealthClient(c.conn)
}
// EventService returns the underlying EventsClient
func (c *Client) EventService() eventsapi.EventsClient {
return eventsapi.NewEventsClient(c.conn)
}
// VersionService returns the underlying VersionClient
func (c *Client) VersionService() versionservice.VersionClient {
return versionservice.NewVersionClient(c.conn)
}
// Version of containerd
type Version struct {
// Version number
Version string
// Revision from git that was built
Revision string
}
// Version returns the version of containerd that the client is connected to
func (c *Client) Version(ctx context.Context) (Version, error) {
response, err := c.VersionService().Version(ctx, &ptypes.Empty{})
if err != nil {
return Version{}, err
}
return Version{
Version: response.Version,
Revision: response.Revision,
}, nil
}
type importOpts struct {
}
// ImportOpt allows the caller to specify import specific options
type ImportOpt func(c *importOpts) error
func resolveImportOpt(opts ...ImportOpt) (importOpts, error) {
var iopts importOpts
for _, o := range opts {
if err := o(&iopts); err != nil {
return iopts, err
}
}
return iopts, nil
}
// Import imports an image from a Tar stream using reader.
// Caller needs to specify importer. Future version may use oci.v1 as the default.
// Note that unreferrenced blobs may be imported to the content store as well.
func (c *Client) Import(ctx context.Context, importer images.Importer, reader io.Reader, opts ...ImportOpt) ([]Image, error) {
_, err := resolveImportOpt(opts...) // unused now
if err != nil {
return nil, err
}
ctx, done, err := c.WithLease(ctx)
if err != nil {
return nil, err
}
defer done()
imgrecs, err := importer.Import(ctx, c.ContentStore(), reader)
if err != nil {
// is.Update() is not called on error
return nil, err
}
is := c.ImageService()
var images []Image
for _, imgrec := range imgrecs {
if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
if !errdefs.IsNotFound(err) {
return nil, err
}
created, err := is.Create(ctx, imgrec)
if err != nil {
return nil, err
}
imgrec = created
} else {
imgrec = updated
}
images = append(images, &image{
client: c,
i: imgrec,
})
}
return images, nil
}
type exportOpts struct {
}
// ExportOpt allows the caller to specify export-specific options
type ExportOpt func(c *exportOpts) error
func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
var eopts exportOpts
for _, o := range opts {
if err := o(&eopts); err != nil {
return eopts, err
}
}
return eopts, nil
}
// Export exports an image to a Tar stream.
// OCI format is used by default.
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
_, err := resolveExportOpt(opts...) // unused now
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
pw.CloseWithError(exporter.Export(ctx, c.ContentStore(), desc, pw))
}()
return pr, nil
}

103
vendor/github.com/containerd/containerd/client_opts.go generated vendored Normal file
View file

@ -0,0 +1,103 @@
package containerd
import (
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes"
"google.golang.org/grpc"
)
type clientOpts struct {
defaultns string
dialOptions []grpc.DialOption
}
// ClientOpt allows callers to set options on the containerd client
type ClientOpt func(c *clientOpts) error
// WithDefaultNamespace sets the default namespace on the client
//
// Any operation that does not have a namespace set on the context will
// be provided the default namespace
func WithDefaultNamespace(ns string) ClientOpt {
return func(c *clientOpts) error {
c.defaultns = ns
return nil
}
}
// WithDialOpts allows grpc.DialOptions to be set on the connection
func WithDialOpts(opts []grpc.DialOption) ClientOpt {
return func(c *clientOpts) error {
c.dialOptions = opts
return nil
}
}
// RemoteOpt allows the caller to set distribution options for a remote
type RemoteOpt func(*Client, *RemoteContext) error
// WithPullUnpack is used to unpack an image after pull. This
// uses the snapshotter, content store, and diff service
// configured for the client.
func WithPullUnpack(_ *Client, c *RemoteContext) error {
c.Unpack = true
return nil
}
// WithPullSnapshotter specifies snapshotter name used for unpacking
func WithPullSnapshotter(snapshotterName string) RemoteOpt {
return func(_ *Client, c *RemoteContext) error {
c.Snapshotter = snapshotterName
return nil
}
}
// WithPullLabel sets a label to be associated with a pulled reference
func WithPullLabel(key, value string) RemoteOpt {
return func(_ *Client, rc *RemoteContext) error {
if rc.Labels == nil {
rc.Labels = make(map[string]string)
}
rc.Labels[key] = value
return nil
}
}
// WithPullLabels associates a set of labels to a pulled reference
func WithPullLabels(labels map[string]string) RemoteOpt {
return func(_ *Client, rc *RemoteContext) error {
if rc.Labels == nil {
rc.Labels = make(map[string]string)
}
for k, v := range labels {
rc.Labels[k] = v
}
return nil
}
}
// WithSchema1Conversion is used to convert Docker registry schema 1
// manifests to oci manifests on pull. Without this option schema 1
// manifests will return a not supported error.
func WithSchema1Conversion(client *Client, c *RemoteContext) error {
c.ConvertSchema1 = true
return nil
}
// WithResolver specifies the resolver to use.
func WithResolver(resolver remotes.Resolver) RemoteOpt {
return func(client *Client, c *RemoteContext) error {
c.Resolver = resolver
return nil
}
}
// WithImageHandler adds a base handler to be called on dispatch.
func WithImageHandler(h images.Handler) RemoteOpt {
return func(client *Client, c *RemoteContext) error {
c.BaseHandlers = append(c.BaseHandlers, h)
return nil
}
}

190
vendor/github.com/containerd/containerd/client_test.go generated vendored Normal file
View file

@ -0,0 +1,190 @@
package containerd
import (
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
golog "log"
"os"
"os/exec"
"runtime"
"testing"
"time"
"google.golang.org/grpc/grpclog"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/testutil"
"github.com/sirupsen/logrus"
)
var (
address string
noDaemon bool
noCriu bool
supportsCriu bool
ctrd = &daemon{}
)
func init() {
// Discard grpc logs so that they don't mess with our stdio
grpclog.SetLogger(golog.New(ioutil.Discard, "", golog.LstdFlags))
flag.StringVar(&address, "address", defaultAddress, "The address to the containerd socket for use in the tests")
flag.BoolVar(&noDaemon, "no-daemon", false, "Do not start a dedicated daemon for the tests")
flag.BoolVar(&noCriu, "no-criu", false, "Do not run the checkpoint tests")
flag.Parse()
}
func testContext() (context.Context, context.CancelFunc) {
ctx, cancel := context.WithCancel(context.Background())
ctx = namespaces.WithNamespace(ctx, "testing")
return ctx, cancel
}
func TestMain(m *testing.M) {
if testing.Short() {
os.Exit(m.Run())
}
testutil.RequiresRootM()
// check if criu is installed on the system
_, err := exec.LookPath("criu")
supportsCriu = err == nil && !noCriu
var (
buf = bytes.NewBuffer(nil)
ctx, cancel = testContext()
)
defer cancel()
if !noDaemon {
os.RemoveAll(defaultRoot)
err := ctrd.start("containerd", address, []string{
"--root", defaultRoot,
"--state", defaultState,
"--log-level", "debug",
}, buf, buf)
if err != nil {
fmt.Fprintf(os.Stderr, "%s: %s", err, buf.String())
os.Exit(1)
}
}
waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second)
client, err := ctrd.waitForStart(waitCtx)
waitCancel()
if err != nil {
ctrd.Kill()
ctrd.Wait()
fmt.Fprintf(os.Stderr, "%s: %s\n", err, buf.String())
os.Exit(1)
}
// print out the version in information
version, err := client.Version(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "error getting version: %s\n", err)
os.Exit(1)
}
// allow comparison with containerd under test
log.G(ctx).WithFields(logrus.Fields{
"version": version.Version,
"revision": version.Revision,
}).Info("running tests against containerd")
// pull a seed image
if runtime.GOOS != "windows" { // TODO: remove once pull is supported on windows
if _, err = client.Pull(ctx, testImage, WithPullUnpack); err != nil {
ctrd.Stop()
ctrd.Wait()
fmt.Fprintf(os.Stderr, "%s: %s\n", err, buf.String())
os.Exit(1)
}
}
if err := platformTestSetup(client); err != nil {
fmt.Fprintln(os.Stderr, "platform test setup failed", err)
os.Exit(1)
}
if err := client.Close(); err != nil {
fmt.Fprintln(os.Stderr, "failed to close client", err)
}
// run the test
status := m.Run()
if !noDaemon {
// tear down the daemon and resources created
if err := ctrd.Stop(); err != nil {
if err := ctrd.Kill(); err != nil {
fmt.Fprintln(os.Stderr, "failed to signal containerd", err)
}
}
if err := ctrd.Wait(); err != nil {
if _, ok := err.(*exec.ExitError); !ok {
fmt.Fprintln(os.Stderr, "failed to wait for containerd", err)
}
}
if err := os.RemoveAll(defaultRoot); err != nil {
fmt.Fprintln(os.Stderr, "failed to remove test root dir", err)
os.Exit(1)
}
// only print containerd logs if the test failed
if status != 0 {
fmt.Fprintln(os.Stderr, buf.String())
}
}
os.Exit(status)
}
func newClient(t testing.TB, address string, opts ...ClientOpt) (*Client, error) {
if testing.Short() {
t.Skip()
}
// testutil.RequiresRoot(t) is not needed here (already called in TestMain)
return New(address, opts...)
}
func TestNewClient(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
if client == nil {
t.Fatal("New() returned nil client")
}
if err := client.Close(); err != nil {
t.Errorf("client closed returned errror %v", err)
}
}
// All the container's tests depends on this, we need it to run first.
func TestImagePull(t *testing.T) {
if runtime.GOOS == "windows" {
// TODO: remove once Windows has a snapshotter
t.Skip("Windows does not have a snapshotter yet")
}
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
ctx, cancel := testContext()
defer cancel()
_, err = client.Pull(ctx, testImage)
if err != nil {
t.Error(err)
return
}
}

View file

@ -0,0 +1,38 @@
// +build !windows
package containerd
import (
"runtime"
)
const (
defaultRoot = "/var/lib/containerd-test"
defaultState = "/run/containerd-test"
defaultAddress = "/run/containerd-test/containerd.sock"
)
var (
testImage string
)
func platformTestSetup(client *Client) error {
return nil
}
func init() {
switch runtime.GOARCH {
case "386":
testImage = "docker.io/i386/alpine:latest"
case "arm":
testImage = "docker.io/arm32v6/alpine:latest"
case "arm64":
testImage = "docker.io/arm64v8/alpine:latest"
case "ppc64le":
testImage = "docker.io/ppc64le/alpine:latest"
case "s390x":
testImage = "docker.io/s390x/alpine:latest"
default:
testImage = "docker.io/library/alpine:latest"
}
}

View file

@ -0,0 +1,88 @@
package containerd
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/pkg/errors"
)
const (
defaultAddress = `\\.\pipe\containerd-containerd-test`
testImage = "docker.io/library/go:nanoserver"
)
var (
dockerLayerFolders []string
defaultRoot = filepath.Join(os.Getenv("programfiles"), "containerd", "root-test")
defaultState = filepath.Join(os.Getenv("programfiles"), "containerd", "state-test")
)
func platformTestSetup(client *Client) error {
var (
roots []string
layerChains = make(map[string]string)
)
// Since we can't pull images yet, we'll piggyback on the default
// docker's images
wfPath := `C:\ProgramData\docker\windowsfilter`
wf, err := os.Open(wfPath)
if err != nil {
return errors.Wrapf(err, "failed to access docker layers @ %s", wfPath)
}
defer wf.Close()
entries, err := wf.Readdirnames(0)
if err != nil {
return errors.Wrapf(err, "failed to read %s entries", wfPath)
}
for _, fn := range entries {
layerChainPath := filepath.Join(wfPath, fn, "layerchain.json")
lfi, err := os.Stat(layerChainPath)
switch {
case err == nil && lfi.Mode().IsRegular():
f, err := os.OpenFile(layerChainPath, os.O_RDONLY, 0660)
if err != nil {
fmt.Fprintln(os.Stderr,
errors.Wrapf(err, "failed to open %s", layerChainPath))
continue
}
defer f.Close()
l := make([]string, 0)
if err := json.NewDecoder(f).Decode(&l); err != nil {
fmt.Fprintln(os.Stderr,
errors.Wrapf(err, "failed to decode %s", layerChainPath))
continue
}
switch {
case len(l) == 1:
layerChains[l[0]] = filepath.Join(wfPath, fn)
case len(l) > 1:
fmt.Fprintf(os.Stderr, "Too many entries in %s: %d", layerChainPath, len(l))
case len(l) == 0:
roots = append(roots, filepath.Join(wfPath, fn))
}
case os.IsNotExist(err):
// keep on going
default:
return errors.Wrapf(err, "error trying to access %s", layerChainPath)
}
}
// They'll be 2 roots, just take the first one
l := roots[0]
dockerLayerFolders = append(dockerLayerFolders, l)
for {
l = layerChains[l]
if l == "" {
break
}
dockerLayerFolders = append([]string{l}, dockerLayerFolders...)
}
return nil
}

View file

@ -0,0 +1,3 @@
## containerd Community Code of Conduct
containerd follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

316
vendor/github.com/containerd/containerd/container.go generated vendored Normal file
View file

@ -0,0 +1,316 @@
package containerd
import (
"context"
"encoding/json"
"path/filepath"
"strings"
"github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/typeurl"
prototypes "github.com/gogo/protobuf/types"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
// Container is a metadata object for container resources and task creation
type Container interface {
// ID identifies the container
ID() string
// Info returns the underlying container record type
Info(context.Context) (containers.Container, error)
// Delete removes the container
Delete(context.Context, ...DeleteOpts) error
// NewTask creates a new task based on the container metadata
NewTask(context.Context, cio.Creation, ...NewTaskOpts) (Task, error)
// Spec returns the OCI runtime specification
Spec(context.Context) (*specs.Spec, error)
// Task returns the current task for the container
//
// If cio.Attach options are passed the client will reattach to the IO for the running
// task. If no task exists for the container a NotFound error is returned
//
// Clients must make sure that only one reader is attached to the task and consuming
// the output from the task's fifos
Task(context.Context, cio.Attach) (Task, error)
// Image returns the image that the container is based on
Image(context.Context) (Image, error)
// Labels returns the labels set on the container
Labels(context.Context) (map[string]string, error)
// SetLabels sets the provided labels for the container and returns the final label set
SetLabels(context.Context, map[string]string) (map[string]string, error)
// Extensions returns the extensions set on the container
Extensions(context.Context) (map[string]prototypes.Any, error)
// Update a container
Update(context.Context, ...UpdateContainerOpts) error
}
func containerFromRecord(client *Client, c containers.Container) *container {
return &container{
client: client,
id: c.ID,
}
}
var _ = (Container)(&container{})
type container struct {
client *Client
id string
}
// ID returns the container's unique id
func (c *container) ID() string {
return c.id
}
func (c *container) Info(ctx context.Context) (containers.Container, error) {
return c.get(ctx)
}
func (c *container) Extensions(ctx context.Context) (map[string]prototypes.Any, error) {
r, err := c.get(ctx)
if err != nil {
return nil, err
}
return r.Extensions, nil
}
func (c *container) Labels(ctx context.Context) (map[string]string, error) {
r, err := c.get(ctx)
if err != nil {
return nil, err
}
return r.Labels, nil
}
func (c *container) SetLabels(ctx context.Context, labels map[string]string) (map[string]string, error) {
container := containers.Container{
ID: c.id,
Labels: labels,
}
var paths []string
// mask off paths so we only muck with the labels encountered in labels.
// Labels not in the passed in argument will be left alone.
for k := range labels {
paths = append(paths, strings.Join([]string{"labels", k}, "."))
}
r, err := c.client.ContainerService().Update(ctx, container, paths...)
if err != nil {
return nil, err
}
return r.Labels, nil
}
// Spec returns the current OCI specification for the container
func (c *container) Spec(ctx context.Context) (*specs.Spec, error) {
r, err := c.get(ctx)
if err != nil {
return nil, err
}
var s specs.Spec
if err := json.Unmarshal(r.Spec.Value, &s); err != nil {
return nil, err
}
return &s, nil
}
// Delete deletes an existing container
// an error is returned if the container has running tasks
func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) error {
if _, err := c.loadTask(ctx, nil); err == nil {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot delete running task %v", c.id)
}
r, err := c.get(ctx)
if err != nil {
return err
}
for _, o := range opts {
if err := o(ctx, c.client, r); err != nil {
return err
}
}
return c.client.ContainerService().Delete(ctx, c.id)
}
func (c *container) Task(ctx context.Context, attach cio.Attach) (Task, error) {
return c.loadTask(ctx, attach)
}
// Image returns the image that the container is based on
func (c *container) Image(ctx context.Context) (Image, error) {
r, err := c.get(ctx)
if err != nil {
return nil, err
}
if r.Image == "" {
return nil, errors.Wrap(errdefs.ErrNotFound, "container not created from an image")
}
i, err := c.client.ImageService().Get(ctx, r.Image)
if err != nil {
return nil, errors.Wrapf(err, "failed to get image %s for container", r.Image)
}
return &image{
client: c.client,
i: i,
}, nil
}
func (c *container) NewTask(ctx context.Context, ioCreate cio.Creation, opts ...NewTaskOpts) (_ Task, err error) {
i, err := ioCreate(c.id)
if err != nil {
return nil, err
}
defer func() {
if err != nil && i != nil {
i.Cancel()
i.Close()
}
}()
cfg := i.Config()
request := &tasks.CreateTaskRequest{
ContainerID: c.id,
Terminal: cfg.Terminal,
Stdin: cfg.Stdin,
Stdout: cfg.Stdout,
Stderr: cfg.Stderr,
}
r, err := c.get(ctx)
if err != nil {
return nil, err
}
if r.SnapshotKey != "" {
if r.Snapshotter == "" {
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "unable to resolve rootfs mounts without snapshotter on container")
}
// get the rootfs from the snapshotter and add it to the request
mounts, err := c.client.SnapshotService(r.Snapshotter).Mounts(ctx, r.SnapshotKey)
if err != nil {
return nil, err
}
for _, m := range mounts {
request.Rootfs = append(request.Rootfs, &types.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
})
}
}
var info TaskInfo
for _, o := range opts {
if err := o(ctx, c.client, &info); err != nil {
return nil, err
}
}
if info.RootFS != nil {
for _, m := range info.RootFS {
request.Rootfs = append(request.Rootfs, &types.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
})
}
}
if info.Options != nil {
any, err := typeurl.MarshalAny(info.Options)
if err != nil {
return nil, err
}
request.Options = any
}
t := &task{
client: c.client,
io: i,
id: c.id,
}
if info.Checkpoint != nil {
request.Checkpoint = info.Checkpoint
}
response, err := c.client.TaskService().Create(ctx, request)
if err != nil {
return nil, errdefs.FromGRPC(err)
}
t.pid = response.Pid
return t, nil
}
func (c *container) Update(ctx context.Context, opts ...UpdateContainerOpts) error {
// fetch the current container config before updating it
r, err := c.get(ctx)
if err != nil {
return err
}
for _, o := range opts {
if err := o(ctx, c.client, &r); err != nil {
return err
}
}
if _, err := c.client.ContainerService().Update(ctx, r); err != nil {
return errdefs.FromGRPC(err)
}
return nil
}
func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, error) {
response, err := c.client.TaskService().Get(ctx, &tasks.GetRequest{
ContainerID: c.id,
})
if err != nil {
err = errdefs.FromGRPC(err)
if errdefs.IsNotFound(err) {
return nil, errors.Wrapf(err, "no running task found")
}
return nil, err
}
var i cio.IO
if ioAttach != nil {
if i, err = attachExistingIO(response, ioAttach); err != nil {
return nil, err
}
}
t := &task{
client: c.client,
io: i,
id: response.Process.ID,
pid: response.Process.Pid,
}
return t, nil
}
func (c *container) get(ctx context.Context) (containers.Container, error) {
return c.client.ContainerService().Get(ctx, c.id)
}
func attachExistingIO(response *tasks.GetResponse, ioAttach cio.Attach) (cio.IO, error) {
// get the existing fifo paths from the task information stored by the daemon
paths := &cio.FIFOSet{
Dir: getFifoDir([]string{
response.Process.Stdin,
response.Process.Stdout,
response.Process.Stderr,
}),
In: response.Process.Stdin,
Out: response.Process.Stdout,
Err: response.Process.Stderr,
Terminal: response.Process.Terminal,
}
return ioAttach(paths)
}
// getFifoDir looks for any non-empty path for a stdio fifo
// and returns the dir for where it is located
func getFifoDir(paths []string) string {
for _, p := range paths {
if p != "" {
return filepath.Dir(p)
}
}
return ""
}

View file

@ -0,0 +1,252 @@
// +build !windows
package containerd
import (
"syscall"
"testing"
"github.com/containerd/containerd/oci"
)
func TestCheckpointRestore(t *testing.T) {
if !supportsCriu {
t.Skip("system does not have criu installed")
}
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
ctx, cancel = testContext()
id = t.Name()
)
defer cancel()
image, err := client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "100")), WithNewSnapshot(id, image))
if err != nil {
t.Error(err)
return
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Error(err)
return
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Error(err)
return
}
if err := task.Start(ctx); err != nil {
t.Error(err)
return
}
checkpoint, err := task.Checkpoint(ctx, WithExit)
if err != nil {
t.Error(err)
return
}
<-statusC
if _, err := task.Delete(ctx); err != nil {
t.Error(err)
return
}
if task, err = container.NewTask(ctx, empty(), WithTaskCheckpoint(checkpoint)); err != nil {
t.Error(err)
return
}
defer task.Delete(ctx)
statusC, err = task.Wait(ctx)
if err != nil {
t.Error(err)
return
}
if err := task.Start(ctx); err != nil {
t.Error(err)
return
}
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Error(err)
return
}
<-statusC
}
func TestCheckpointRestoreNewContainer(t *testing.T) {
if !supportsCriu {
t.Skip("system does not have criu installed")
}
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
id := t.Name()
ctx, cancel := testContext()
defer cancel()
image, err := client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "100")), WithNewSnapshot(id, image))
if err != nil {
t.Error(err)
return
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Error(err)
return
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Error(err)
return
}
if err := task.Start(ctx); err != nil {
t.Error(err)
return
}
checkpoint, err := task.Checkpoint(ctx, WithExit)
if err != nil {
t.Error(err)
return
}
<-statusC
if _, err := task.Delete(ctx); err != nil {
t.Error(err)
return
}
if err := container.Delete(ctx, WithSnapshotCleanup); err != nil {
t.Error(err)
return
}
if container, err = client.NewContainer(ctx, id, WithCheckpoint(checkpoint, id)); err != nil {
t.Error(err)
return
}
if task, err = container.NewTask(ctx, empty(), WithTaskCheckpoint(checkpoint)); err != nil {
t.Error(err)
return
}
defer task.Delete(ctx)
statusC, err = task.Wait(ctx)
if err != nil {
t.Error(err)
return
}
if err := task.Start(ctx); err != nil {
t.Error(err)
return
}
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Error(err)
return
}
<-statusC
}
func TestCheckpointLeaveRunning(t *testing.T) {
if testing.Short() {
t.Skip()
}
if !supportsCriu {
t.Skip("system does not have criu installed")
}
client, err := New(address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
ctx, cancel = testContext()
id = t.Name()
)
defer cancel()
image, err := client.GetImage(ctx, testImage)
if err != nil {
t.Error(err)
return
}
container, err := client.NewContainer(ctx, id, WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "100")), WithNewSnapshot(id, image))
if err != nil {
t.Error(err)
return
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Error(err)
return
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Error(err)
return
}
if err := task.Start(ctx); err != nil {
t.Error(err)
return
}
if _, err := task.Checkpoint(ctx); err != nil {
t.Error(err)
return
}
status, err := task.Status(ctx)
if err != nil {
t.Error(err)
return
}
if status.Status != Running {
t.Errorf("expected status %q but received %q", Running, status)
return
}
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Error(err)
return
}
<-statusC
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,194 @@
package containerd
import (
"context"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/oci"
"github.com/containerd/containerd/platforms"
"github.com/containerd/typeurl"
"github.com/gogo/protobuf/types"
"github.com/opencontainers/image-spec/identity"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
// DeleteOpts allows the caller to set options for the deletion of a container
type DeleteOpts func(ctx context.Context, client *Client, c containers.Container) error
// NewContainerOpts allows the caller to set additional options when creating a container
type NewContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
// UpdateContainerOpts allows the caller to set additional options when updating a container
type UpdateContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
// WithRuntime allows a user to specify the runtime name and additional options that should
// be used to create tasks for the container
func WithRuntime(name string, options interface{}) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
var (
any *types.Any
err error
)
if options != nil {
any, err = typeurl.MarshalAny(options)
if err != nil {
return err
}
}
c.Runtime = containers.RuntimeInfo{
Name: name,
Options: any,
}
return nil
}
}
// WithImage sets the provided image as the base for the container
func WithImage(i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
c.Image = i.Name()
return nil
}
}
// WithContainerLabels adds the provided labels to the container
func WithContainerLabels(labels map[string]string) NewContainerOpts {
return func(_ context.Context, _ *Client, c *containers.Container) error {
c.Labels = labels
return nil
}
}
// WithSnapshotter sets the provided snapshotter for use by the container
//
// This option must appear before other snapshotter options to have an effect.
func WithSnapshotter(name string) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
c.Snapshotter = name
return nil
}
}
// WithSnapshot uses an existing root filesystem for the container
func WithSnapshot(id string) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
setSnapshotterIfEmpty(c)
// check that the snapshot exists, if not, fail on creation
if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
return err
}
c.SnapshotKey = id
return nil
}
}
// WithNewSnapshot allocates a new snapshot to be used by the container as the
// root filesystem in read-write mode
func WithNewSnapshot(id string, i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
if err != nil {
return err
}
setSnapshotterIfEmpty(c)
parent := identity.ChainID(diffIDs).String()
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent); err != nil {
return err
}
c.SnapshotKey = id
c.Image = i.Name()
return nil
}
}
// WithSnapshotCleanup deletes the rootfs snapshot allocated for the container
func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error {
if c.SnapshotKey != "" {
if c.Snapshotter == "" {
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot")
}
return client.SnapshotService(c.Snapshotter).Remove(ctx, c.SnapshotKey)
}
return nil
}
// WithNewSnapshotView allocates a new snapshot to be used by the container as the
// root filesystem in read-only mode
func WithNewSnapshotView(id string, i Image) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
if err != nil {
return err
}
setSnapshotterIfEmpty(c)
parent := identity.ChainID(diffIDs).String()
if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent); err != nil {
return err
}
c.SnapshotKey = id
c.Image = i.Name()
return nil
}
}
func setSnapshotterIfEmpty(c *containers.Container) {
if c.Snapshotter == "" {
c.Snapshotter = DefaultSnapshotter
}
}
// WithContainerExtension appends extension data to the container object.
// Use this to decorate the container object with additional data for the client
// integration.
//
// Make sure to register the type of `extension` in the typeurl package via
// `typeurl.Register` or container creation may fail.
func WithContainerExtension(name string, extension interface{}) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
if name == "" {
return errors.Wrapf(errdefs.ErrInvalidArgument, "extension key must not be zero-length")
}
any, err := typeurl.MarshalAny(extension)
if err != nil {
if errors.Cause(err) == typeurl.ErrNotFound {
return errors.Wrapf(err, "extension %q is not registered with the typeurl package, see `typeurl.Register`", name)
}
return errors.Wrap(err, "error marshalling extension")
}
if c.Extensions == nil {
c.Extensions = make(map[string]types.Any)
}
c.Extensions[name] = *any
return nil
}
}
// WithNewSpec generates a new spec for a new container
func WithNewSpec(opts ...oci.SpecOpts) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
s, err := oci.GenerateSpec(ctx, client, c, opts...)
if err != nil {
return err
}
c.Spec, err = typeurl.MarshalAny(s)
return err
}
}
// WithSpec sets the provided spec on the container
func WithSpec(s *specs.Spec, opts ...oci.SpecOpts) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
for _, o := range opts {
if err := o(ctx, client, c, s); err != nil {
return err
}
}
var err error
c.Spec, err = typeurl.MarshalAny(s)
return err
}
}

View file

@ -0,0 +1,220 @@
// +build !windows
package containerd
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"syscall"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/platforms"
"github.com/gogo/protobuf/proto"
protobuf "github.com/gogo/protobuf/types"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/identity"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// WithCheckpoint allows a container to be created from the checkpointed information
// provided by the descriptor. The image, snapshot, and runtime specifications are
// restored on the container
func WithCheckpoint(im Image, snapshotKey string) NewContainerOpts {
// set image and rw, and spec
return func(ctx context.Context, client *Client, c *containers.Container) error {
var (
desc = im.Target()
id = desc.Digest
store = client.ContentStore()
)
index, err := decodeIndex(ctx, store, id)
if err != nil {
return err
}
var rw *v1.Descriptor
for _, m := range index.Manifests {
switch m.MediaType {
case v1.MediaTypeImageLayer:
fk := m
rw = &fk
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList:
config, err := images.Config(ctx, store, m, platforms.Default())
if err != nil {
return errors.Wrap(err, "unable to resolve image config")
}
diffIDs, err := images.RootFS(ctx, store, config)
if err != nil {
return errors.Wrap(err, "unable to get rootfs")
}
setSnapshotterIfEmpty(c)
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, snapshotKey, identity.ChainID(diffIDs).String()); err != nil {
if !errdefs.IsAlreadyExists(err) {
return err
}
}
c.Image = index.Annotations["image.name"]
case images.MediaTypeContainerd1CheckpointConfig:
data, err := content.ReadBlob(ctx, store, m.Digest)
if err != nil {
return errors.Wrap(err, "unable to read checkpoint config")
}
var any protobuf.Any
if err := proto.Unmarshal(data, &any); err != nil {
return err
}
c.Spec = &any
}
}
if rw != nil {
// apply the rw snapshot to the new rw layer
mounts, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, snapshotKey)
if err != nil {
return errors.Wrapf(err, "unable to get mounts for %s", snapshotKey)
}
if _, err := client.DiffService().Apply(ctx, *rw, mounts); err != nil {
return errors.Wrap(err, "unable to apply rw diff")
}
}
c.SnapshotKey = snapshotKey
return nil
}
}
// WithTaskCheckpoint allows a task to be created with live runtime and memory data from a
// previous checkpoint. Additional software such as CRIU may be required to
// restore a task from a checkpoint
func WithTaskCheckpoint(im Image) NewTaskOpts {
return func(ctx context.Context, c *Client, info *TaskInfo) error {
desc := im.Target()
id := desc.Digest
index, err := decodeIndex(ctx, c.ContentStore(), id)
if err != nil {
return err
}
for _, m := range index.Manifests {
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
info.Checkpoint = &types.Descriptor{
MediaType: m.MediaType,
Size_: m.Size,
Digest: m.Digest,
}
return nil
}
}
return fmt.Errorf("checkpoint not found in index %s", id)
}
}
func decodeIndex(ctx context.Context, store content.Store, id digest.Digest) (*v1.Index, error) {
var index v1.Index
p, err := content.ReadBlob(ctx, store, id)
if err != nil {
return nil, err
}
if err := json.Unmarshal(p, &index); err != nil {
return nil, err
}
return &index, nil
}
// WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the
// filesystem to be used by a container with user namespaces
func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts {
return withRemappedSnapshotBase(id, i, uid, gid, false)
}
// WithRemappedSnapshotView is similar to WithRemappedSnapshot but rootfs is mounted as read-only.
func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerOpts {
return withRemappedSnapshotBase(id, i, uid, gid, true)
}
func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error {
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
if err != nil {
return err
}
setSnapshotterIfEmpty(c)
var (
snapshotter = client.SnapshotService(c.Snapshotter)
parent = identity.ChainID(diffIDs).String()
usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid)
)
if _, err := snapshotter.Stat(ctx, usernsID); err == nil {
if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil {
c.SnapshotKey = id
c.Image = i.Name()
return nil
} else if !errdefs.IsNotFound(err) {
return err
}
}
mounts, err := snapshotter.Prepare(ctx, usernsID+"-remap", parent)
if err != nil {
return err
}
if err := remapRootFS(mounts, uid, gid); err != nil {
snapshotter.Remove(ctx, usernsID)
return err
}
if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap"); err != nil {
return err
}
if readonly {
_, err = snapshotter.View(ctx, id, usernsID)
} else {
_, err = snapshotter.Prepare(ctx, id, usernsID)
}
if err != nil {
return err
}
c.SnapshotKey = id
c.Image = i.Name()
return nil
}
}
func remapRootFS(mounts []mount.Mount, uid, gid uint32) error {
root, err := ioutil.TempDir("", "ctd-remap")
if err != nil {
return err
}
defer os.Remove(root)
for _, m := range mounts {
if err := m.Mount(root); err != nil {
return err
}
}
err = filepath.Walk(root, incrementFS(root, uid, gid))
if uerr := mount.Unmount(root, 0); err == nil {
err = uerr
}
return err
}
func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
var (
stat = info.Sys().(*syscall.Stat_t)
u, g = int(stat.Uid + uidInc), int(stat.Gid + gidInc)
)
// be sure the lchown the path as to not de-reference the symlink to a host file
return os.Lchown(path, u, g)
}
}

1623
vendor/github.com/containerd/containerd/container_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,18 @@
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Delegate=yes
KillMode=process
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,92 @@
package containers
import (
"context"
"time"
"github.com/gogo/protobuf/types"
)
// Container represents the set of data pinned by a container. Unless otherwise
// noted, the resources here are considered in use by the container.
//
// The resources specified in this object are used to create tasks from the container.
type Container struct {
// ID uniquely identifies the container in a nameapace.
//
// This property is required and cannot be changed after creation.
ID string
// Labels provide metadata extension for a contaienr.
//
// These are optional and fully mutable.
Labels map[string]string
// Image specifies the image reference used for a container.
//
// This property is optional but immutable.
Image string
// Runtime specifies which runtime should be used when launching container
// tasks.
//
// This property is required and immutable.
Runtime RuntimeInfo
// Spec should carry the the runtime specification used to implement the
// container.
//
// This field is required but mutable.
Spec *types.Any
// SnapshotKey specifies the snapshot key to use for the container's root
// filesystem. When starting a task from this container, a caller should
// look up the mounts from the snapshot service and include those on the
// task create request.
//
// This field is not required but immutable.
SnapshotKey string
// Snapshotter specifies the snapshotter name used for rootfs
//
// This field is not required but immutable.
Snapshotter string
// CreatedAt is the time at which the container was created.
CreatedAt time.Time
// UpdatedAt is the time at which the container was updated.
UpdatedAt time.Time
// Extensions stores client-specified metadata
Extensions map[string]types.Any
}
// RuntimeInfo holds runtime specific information
type RuntimeInfo struct {
Name string
Options *types.Any
}
// Store interacts with the underlying container storage
type Store interface {
Get(ctx context.Context, id string) (Container, error)
// List returns containers that match one or more of the provided filters.
List(ctx context.Context, filters ...string) ([]Container, error)
// Create a container in the store from the provided container.
Create(ctx context.Context, container Container) (Container, error)
// Update the container with the provided container object. ID must be set.
//
// If one or more fieldpaths are provided, only the field corresponding to
// the fieldpaths will be mutated.
Update(ctx context.Context, container Container, fieldpaths ...string) (Container, error)
// Delete a container using the id.
//
// nil will be returned on success. If the container is not known to the
// store, ErrNotFound will be returned.
Delete(ctx context.Context, id string) error
}

View file

@ -0,0 +1,135 @@
package containerd
import (
"context"
containersapi "github.com/containerd/containerd/api/services/containers/v1"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs"
ptypes "github.com/gogo/protobuf/types"
)
type remoteContainers struct {
client containersapi.ContainersClient
}
var _ containers.Store = &remoteContainers{}
// NewRemoteContainerStore returns the container Store connected with the provided client
func NewRemoteContainerStore(client containersapi.ContainersClient) containers.Store {
return &remoteContainers{
client: client,
}
}
func (r *remoteContainers) Get(ctx context.Context, id string) (containers.Container, error) {
resp, err := r.client.Get(ctx, &containersapi.GetContainerRequest{
ID: id,
})
if err != nil {
return containers.Container{}, errdefs.FromGRPC(err)
}
return containerFromProto(&resp.Container), nil
}
func (r *remoteContainers) List(ctx context.Context, filters ...string) ([]containers.Container, error) {
resp, err := r.client.List(ctx, &containersapi.ListContainersRequest{
Filters: filters,
})
if err != nil {
return nil, errdefs.FromGRPC(err)
}
return containersFromProto(resp.Containers), nil
}
func (r *remoteContainers) Create(ctx context.Context, container containers.Container) (containers.Container, error) {
created, err := r.client.Create(ctx, &containersapi.CreateContainerRequest{
Container: containerToProto(&container),
})
if err != nil {
return containers.Container{}, errdefs.FromGRPC(err)
}
return containerFromProto(&created.Container), nil
}
func (r *remoteContainers) Update(ctx context.Context, container containers.Container, fieldpaths ...string) (containers.Container, error) {
var updateMask *ptypes.FieldMask
if len(fieldpaths) > 0 {
updateMask = &ptypes.FieldMask{
Paths: fieldpaths,
}
}
updated, err := r.client.Update(ctx, &containersapi.UpdateContainerRequest{
Container: containerToProto(&container),
UpdateMask: updateMask,
})
if err != nil {
return containers.Container{}, errdefs.FromGRPC(err)
}
return containerFromProto(&updated.Container), nil
}
func (r *remoteContainers) Delete(ctx context.Context, id string) error {
_, err := r.client.Delete(ctx, &containersapi.DeleteContainerRequest{
ID: id,
})
return errdefs.FromGRPC(err)
}
func containerToProto(container *containers.Container) containersapi.Container {
return containersapi.Container{
ID: container.ID,
Labels: container.Labels,
Image: container.Image,
Runtime: &containersapi.Container_Runtime{
Name: container.Runtime.Name,
Options: container.Runtime.Options,
},
Spec: container.Spec,
Snapshotter: container.Snapshotter,
SnapshotKey: container.SnapshotKey,
Extensions: container.Extensions,
}
}
func containerFromProto(containerpb *containersapi.Container) containers.Container {
var runtime containers.RuntimeInfo
if containerpb.Runtime != nil {
runtime = containers.RuntimeInfo{
Name: containerpb.Runtime.Name,
Options: containerpb.Runtime.Options,
}
}
return containers.Container{
ID: containerpb.ID,
Labels: containerpb.Labels,
Image: containerpb.Image,
Runtime: runtime,
Spec: containerpb.Spec,
Snapshotter: containerpb.Snapshotter,
SnapshotKey: containerpb.SnapshotKey,
CreatedAt: containerpb.CreatedAt,
UpdatedAt: containerpb.UpdatedAt,
Extensions: containerpb.Extensions,
}
}
func containersFromProto(containerspb []containersapi.Container) []containers.Container {
var containers []containers.Container
for _, container := range containerspb {
containers = append(containers, containerFromProto(&container))
}
return containers
}

View file

@ -0,0 +1,128 @@
package content
import (
"context"
"io"
"time"
"github.com/opencontainers/go-digest"
)
// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer
type ReaderAt interface {
io.ReaderAt
io.Closer
Size() int64
}
// Provider provides a reader interface for specific content
type Provider interface {
ReaderAt(ctx context.Context, dgst digest.Digest) (ReaderAt, error)
}
// Ingester writes content
type Ingester interface {
Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (Writer, error)
}
// Info holds content specific information
//
// TODO(stevvooe): Consider a very different name for this struct. Info is way
// to general. It also reads very weird in certain context, like pluralization.
type Info struct {
Digest digest.Digest
Size int64
CreatedAt time.Time
UpdatedAt time.Time
Labels map[string]string
}
// Status of a content operation
type Status struct {
Ref string
Offset int64
Total int64
Expected digest.Digest
StartedAt time.Time
UpdatedAt time.Time
}
// WalkFunc defines the callback for a blob walk.
type WalkFunc func(Info) error
// Manager provides methods for inspecting, listing and removing content.
type Manager interface {
// Info will return metadata about content available in the content store.
//
// If the content is not present, ErrNotFound will be returned.
Info(ctx context.Context, dgst digest.Digest) (Info, error)
// Update updates mutable information related to content.
// If one or more fieldpaths are provided, only those
// fields will be updated.
// Mutable fields:
// labels.*
Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error)
// Walk will call fn for each item in the content store which
// match the provided filters. If no filters are given all
// items will be walked.
Walk(ctx context.Context, fn WalkFunc, filters ...string) error
// Delete removes the content from the store.
Delete(ctx context.Context, dgst digest.Digest) error
}
// IngestManager provides methods for managing ingests.
type IngestManager interface {
// Status returns the status of the provided ref.
Status(ctx context.Context, ref string) (Status, error)
// ListStatuses returns the status of any active ingestions whose ref match the
// provided regular expression. If empty, all active ingestions will be
// returned.
ListStatuses(ctx context.Context, filters ...string) ([]Status, error)
// Abort completely cancels the ingest operation targeted by ref.
Abort(ctx context.Context, ref string) error
}
// Writer handles the write of content into a content store
type Writer interface {
// Close is expected to be called after Commit() when commission is needed.
// Closing a writer without commit allows resuming or aborting.
io.WriteCloser
// Digest may return empty digest or panics until committed.
Digest() digest.Digest
// Commit commits the blob (but no roll-back is guaranteed on an error).
// size and expected can be zero-value when unknown.
Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error
// Status returns the current state of write
Status() (Status, error)
// Truncate updates the size of the target blob
Truncate(size int64) error
}
// Store combines the methods of content-oriented interfaces into a set that
// are commonly provided by complete implementations.
type Store interface {
Manager
Provider
IngestManager
Ingester
}
// Opt is used to alter the mutable properties of content
type Opt func(*Info) error
// WithLabels allows labels to be set on content
func WithLabels(labels map[string]string) Opt {
return func(info *Info) error {
info.Labels = labels
return nil
}
}

View file

@ -0,0 +1,139 @@
package content
import (
"context"
"io"
"io/ioutil"
"sync"
"github.com/containerd/containerd/errdefs"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
var bufPool = sync.Pool{
New: func() interface{} {
buffer := make([]byte, 1<<20)
return &buffer
},
}
// NewReader returns a io.Reader from a ReaderAt
func NewReader(ra ReaderAt) io.Reader {
rd := io.NewSectionReader(ra, 0, ra.Size())
return rd
}
// ReadBlob retrieves the entire contents of the blob from the provider.
//
// Avoid using this for large blobs, such as layers.
func ReadBlob(ctx context.Context, provider Provider, dgst digest.Digest) ([]byte, error) {
ra, err := provider.ReaderAt(ctx, dgst)
if err != nil {
return nil, err
}
defer ra.Close()
p := make([]byte, ra.Size())
_, err = ra.ReadAt(p, 0)
return p, err
}
// WriteBlob writes data with the expected digest into the content store. If
// expected already exists, the method returns immediately and the reader will
// not be consumed.
//
// This is useful when the digest and size are known beforehand.
//
// Copy is buffered, so no need to wrap reader in buffered io.
func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
cw, err := cs.Writer(ctx, ref, size, expected)
if err != nil {
if !errdefs.IsAlreadyExists(err) {
return err
}
return nil // all ready present
}
defer cw.Close()
return Copy(ctx, cw, r, size, expected, opts...)
}
// Copy copies data with the expected digest from the reader into the
// provided content store writer.
//
// This is useful when the digest and size are known beforehand. When
// the size or digest is unknown, these values may be empty.
//
// Copy is buffered, so no need to wrap reader in buffered io.
func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
ws, err := cw.Status()
if err != nil {
return err
}
if ws.Offset > 0 {
r, err = seekReader(r, ws.Offset, size)
if err != nil {
return errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
}
}
buf := bufPool.Get().(*[]byte)
defer bufPool.Put(buf)
if _, err := io.CopyBuffer(cw, r, *buf); err != nil {
return err
}
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
if !errdefs.IsAlreadyExists(err) {
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
}
}
return nil
}
// seekReader attempts to seek the reader to the given offset, either by
// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding
// up to the given offset.
func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
// attempt to resolve r as a seeker and setup the offset.
seeker, ok := r.(io.Seeker)
if ok {
nn, err := seeker.Seek(offset, io.SeekStart)
if nn != offset {
return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
}
if err != nil {
return nil, err
}
return r, nil
}
// ok, let's try io.ReaderAt!
readerAt, ok := r.(io.ReaderAt)
if ok && size > offset {
sr := io.NewSectionReader(readerAt, offset, size)
return sr, nil
}
// well then, let's just discard up to the offset
buf := bufPool.Get().(*[]byte)
defer bufPool.Put(buf)
n, err := io.CopyBuffer(ioutil.Discard, io.LimitReader(r, offset), *buf)
if err != nil {
return nil, errors.Wrap(err, "failed to discard to offset")
}
if n != offset {
return nil, errors.Errorf("unable to discard to offset")
}
return r, nil
}

View file

@ -0,0 +1,112 @@
package content
import (
"bytes"
"context"
"io"
"strings"
"testing"
"github.com/containerd/containerd/errdefs"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type copySource struct {
reader io.Reader
size int64
digest digest.Digest
}
func TestCopy(t *testing.T) {
defaultSource := newCopySource("this is the source to copy")
var testcases = []struct {
name string
source copySource
writer fakeWriter
expected string
}{
{
name: "copy no offset",
source: defaultSource,
writer: fakeWriter{},
expected: "this is the source to copy",
},
{
name: "copy with offset from seeker",
source: defaultSource,
writer: fakeWriter{status: Status{Offset: 8}},
expected: "the source to copy",
},
{
name: "copy with offset from unseekable source",
source: copySource{reader: bytes.NewBufferString("foobar"), size: 6},
writer: fakeWriter{status: Status{Offset: 3}},
expected: "bar",
},
{
name: "commit already exists",
source: defaultSource,
writer: fakeWriter{commitFunc: func() error {
return errdefs.ErrAlreadyExists
}},
},
}
for _, testcase := range testcases {
t.Run(testcase.name, func(t *testing.T) {
err := Copy(context.Background(),
&testcase.writer,
testcase.source.reader,
testcase.source.size,
testcase.source.digest)
require.NoError(t, err)
assert.Equal(t, testcase.source.digest, testcase.writer.commitedDigest)
assert.Equal(t, testcase.expected, testcase.writer.String())
})
}
}
func newCopySource(raw string) copySource {
return copySource{
reader: strings.NewReader(raw),
size: int64(len(raw)),
digest: digest.FromBytes([]byte(raw)),
}
}
type fakeWriter struct {
bytes.Buffer
commitedDigest digest.Digest
status Status
commitFunc func() error
}
func (f *fakeWriter) Close() error {
f.Buffer.Reset()
return nil
}
func (f *fakeWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error {
f.commitedDigest = expected
if f.commitFunc == nil {
return nil
}
return f.commitFunc()
}
func (f *fakeWriter) Digest() digest.Digest {
return f.commitedDigest
}
func (f *fakeWriter) Status() (Status, error) {
return f.status, nil
}
func (f *fakeWriter) Truncate(size int64) error {
f.Buffer.Truncate(int(size))
return nil
}

View file

@ -0,0 +1,49 @@
package containerd
import (
"context"
contentapi "github.com/containerd/containerd/api/services/content/v1"
digest "github.com/opencontainers/go-digest"
)
type remoteReaderAt struct {
ctx context.Context
digest digest.Digest
size int64
client contentapi.ContentClient
}
func (ra *remoteReaderAt) Size() int64 {
return ra.size
}
func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
rr := &contentapi.ReadContentRequest{
Digest: ra.digest,
Offset: off,
Size_: int64(len(p)),
}
rc, err := ra.client.Read(ra.ctx, rr)
if err != nil {
return 0, err
}
for len(p) > 0 {
var resp *contentapi.ReadContentResponse
// fill our buffer up until we can fill p.
resp, err = rc.Recv()
if err != nil {
return n, err
}
copied := copy(p, resp.Data)
n += copied
p = p[copied:]
}
return n, nil
}
func (ra *remoteReaderAt) Close() error {
return nil
}

View file

@ -0,0 +1,208 @@
package containerd
import (
"context"
"io"
contentapi "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
protobuftypes "github.com/gogo/protobuf/types"
digest "github.com/opencontainers/go-digest"
)
type remoteContent struct {
client contentapi.ContentClient
}
// NewContentStoreFromClient returns a new content store
func NewContentStoreFromClient(client contentapi.ContentClient) content.Store {
return &remoteContent{
client: client,
}
}
func (rs *remoteContent) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
resp, err := rs.client.Info(ctx, &contentapi.InfoRequest{
Digest: dgst,
})
if err != nil {
return content.Info{}, errdefs.FromGRPC(err)
}
return infoFromGRPC(resp.Info), nil
}
func (rs *remoteContent) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
session, err := rs.client.List(ctx, &contentapi.ListContentRequest{
Filters: filters,
})
if err != nil {
return errdefs.FromGRPC(err)
}
for {
msg, err := session.Recv()
if err != nil {
if err != io.EOF {
return errdefs.FromGRPC(err)
}
break
}
for _, info := range msg.Info {
if err := fn(infoFromGRPC(info)); err != nil {
return err
}
}
}
return nil
}
func (rs *remoteContent) Delete(ctx context.Context, dgst digest.Digest) error {
if _, err := rs.client.Delete(ctx, &contentapi.DeleteContentRequest{
Digest: dgst,
}); err != nil {
return errdefs.FromGRPC(err)
}
return nil
}
func (rs *remoteContent) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) {
i, err := rs.Info(ctx, dgst)
if err != nil {
return nil, err
}
return &remoteReaderAt{
ctx: ctx,
digest: dgst,
size: i.Size,
client: rs.client,
}, nil
}
func (rs *remoteContent) Status(ctx context.Context, ref string) (content.Status, error) {
resp, err := rs.client.Status(ctx, &contentapi.StatusRequest{
Ref: ref,
})
if err != nil {
return content.Status{}, errdefs.FromGRPC(err)
}
status := resp.Status
return content.Status{
Ref: status.Ref,
StartedAt: status.StartedAt,
UpdatedAt: status.UpdatedAt,
Offset: status.Offset,
Total: status.Total,
Expected: status.Expected,
}, nil
}
func (rs *remoteContent) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
resp, err := rs.client.Update(ctx, &contentapi.UpdateRequest{
Info: infoToGRPC(info),
UpdateMask: &protobuftypes.FieldMask{
Paths: fieldpaths,
},
})
if err != nil {
return content.Info{}, errdefs.FromGRPC(err)
}
return infoFromGRPC(resp.Info), nil
}
func (rs *remoteContent) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) {
resp, err := rs.client.ListStatuses(ctx, &contentapi.ListStatusesRequest{
Filters: filters,
})
if err != nil {
return nil, errdefs.FromGRPC(err)
}
var statuses []content.Status
for _, status := range resp.Statuses {
statuses = append(statuses, content.Status{
Ref: status.Ref,
StartedAt: status.StartedAt,
UpdatedAt: status.UpdatedAt,
Offset: status.Offset,
Total: status.Total,
Expected: status.Expected,
})
}
return statuses, nil
}
func (rs *remoteContent) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) {
wrclient, offset, err := rs.negotiate(ctx, ref, size, expected)
if err != nil {
return nil, errdefs.FromGRPC(err)
}
return &remoteWriter{
ref: ref,
client: wrclient,
offset: offset,
}, nil
}
// Abort implements asynchronous abort. It starts a new write session on the ref l
func (rs *remoteContent) Abort(ctx context.Context, ref string) error {
if _, err := rs.client.Abort(ctx, &contentapi.AbortRequest{
Ref: ref,
}); err != nil {
return errdefs.FromGRPC(err)
}
return nil
}
func (rs *remoteContent) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
wrclient, err := rs.client.Write(ctx)
if err != nil {
return nil, 0, err
}
if err := wrclient.Send(&contentapi.WriteContentRequest{
Action: contentapi.WriteActionStat,
Ref: ref,
Total: size,
Expected: expected,
}); err != nil {
return nil, 0, err
}
resp, err := wrclient.Recv()
if err != nil {
return nil, 0, err
}
return wrclient, resp.Offset, nil
}
func infoToGRPC(info content.Info) contentapi.Info {
return contentapi.Info{
Digest: info.Digest,
Size_: info.Size,
CreatedAt: info.CreatedAt,
UpdatedAt: info.UpdatedAt,
Labels: info.Labels,
}
}
func infoFromGRPC(info contentapi.Info) content.Info {
return content.Info{
Digest: info.Digest,
Size: info.Size_,
CreatedAt: info.CreatedAt,
UpdatedAt: info.UpdatedAt,
Labels: info.Labels,
}
}

View file

@ -0,0 +1,54 @@
package containerd
import (
"context"
"testing"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/testsuite"
"github.com/containerd/containerd/errdefs"
"github.com/pkg/errors"
)
func newContentStore(ctx context.Context, root string) (context.Context, content.Store, func() error, error) {
client, err := New(address)
if err != nil {
return nil, nil, nil, err
}
ctx, releaselease, err := client.WithLease(ctx)
if err != nil {
return nil, nil, nil, err
}
cs := client.ContentStore()
return ctx, cs, func() error {
statuses, err := cs.ListStatuses(ctx)
if err != nil {
return err
}
for _, st := range statuses {
if err := cs.Abort(ctx, st.Ref); err != nil {
return errors.Wrapf(err, "failed to abort %s", st.Ref)
}
}
releaselease()
return cs.Walk(ctx, func(info content.Info) error {
if err := cs.Delete(ctx, info.Digest); err != nil {
if errdefs.IsNotFound(err) {
return nil
}
return err
}
return nil
})
}, nil
}
func TestContentClient(t *testing.T) {
if testing.Short() {
t.Skip()
}
testsuite.ContentSuite(t, "ContentClient", newContentStore)
}

View file

@ -0,0 +1,123 @@
package containerd
import (
"context"
"io"
contentapi "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type remoteWriter struct {
ref string
client contentapi.Content_WriteClient
offset int64
digest digest.Digest
}
// send performs a synchronous req-resp cycle on the client.
func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.WriteContentResponse, error) {
if err := rw.client.Send(req); err != nil {
return nil, err
}
resp, err := rw.client.Recv()
if err == nil {
// try to keep these in sync
if resp.Digest != "" {
rw.digest = resp.Digest
}
}
return resp, err
}
func (rw *remoteWriter) Status() (content.Status, error) {
resp, err := rw.send(&contentapi.WriteContentRequest{
Action: contentapi.WriteActionStat,
})
if err != nil {
return content.Status{}, errors.Wrap(err, "error getting writer status")
}
return content.Status{
Ref: rw.ref,
Offset: resp.Offset,
Total: resp.Total,
StartedAt: resp.StartedAt,
UpdatedAt: resp.UpdatedAt,
}, nil
}
func (rw *remoteWriter) Digest() digest.Digest {
return rw.digest
}
func (rw *remoteWriter) Write(p []byte) (n int, err error) {
offset := rw.offset
resp, err := rw.send(&contentapi.WriteContentRequest{
Action: contentapi.WriteActionWrite,
Offset: offset,
Data: p,
})
if err != nil {
return 0, err
}
n = int(resp.Offset - offset)
if n < len(p) {
err = io.ErrShortWrite
}
rw.offset += int64(n)
if resp.Digest != "" {
rw.digest = resp.Digest
}
return
}
func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
var base content.Info
for _, opt := range opts {
if err := opt(&base); err != nil {
return err
}
}
resp, err := rw.send(&contentapi.WriteContentRequest{
Action: contentapi.WriteActionCommit,
Total: size,
Offset: rw.offset,
Expected: expected,
Labels: base.Labels,
})
if err != nil {
return errdefs.FromGRPC(err)
}
if size != 0 && resp.Offset != size {
return errors.Errorf("unexpected size: %v != %v", resp.Offset, size)
}
if expected != "" && resp.Digest != expected {
return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
}
rw.digest = resp.Digest
rw.offset = resp.Offset
return nil
}
func (rw *remoteWriter) Truncate(size int64) error {
// This truncation won't actually be validated until a write is issued.
rw.offset = size
return nil
}
func (rw *remoteWriter) Close() error {
return rw.client.CloseSend()
}

View file

@ -0,0 +1,11 @@
# contrib
The `contrib` directory contains packages that do not belong in the core containerd packages but still contribute to overall containerd usability.
Package such as Apparmor or Selinux are placed in `contrib` because they are platform dependent and often require higher level tools and profiles to work.
Packaging and other built tools can be added to `contrib` to aid in packaging containerd for various distributions.
## Testing
Code in the `contrib` directory may or may not have been tested in the normal test pipeline for core components.

View file

@ -0,0 +1,40 @@
// +build linux
package seccomp
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/oci"
"github.com/opencontainers/runtime-spec/specs-go"
)
// WithProfile receives the name of a file stored on disk comprising a json
// formated seccomp profile, as specified by the opencontainers/runtime-spec.
// The profile is read from the file, unmarshaled, and set to the spec.
func WithProfile(profile string) oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Linux.Seccomp = &specs.LinuxSeccomp{}
f, err := ioutil.ReadFile(profile)
if err != nil {
return fmt.Errorf("Cannot load seccomp profile %q: %v", profile, err)
}
if err := json.Unmarshal(f, s.Linux.Seccomp); err != nil {
return fmt.Errorf("Decoding seccomp profile failed %q: %v", profile, err)
}
return nil
}
}
// WithDefaultProfile sets the default seccomp profile to the spec.
// Note: must follow the setting of process capabilities
func WithDefaultProfile() oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Linux.Seccomp = DefaultProfile(s)
return nil
}
}

View file

@ -0,0 +1,581 @@
// +build linux
package seccomp
import (
"runtime"
"syscall"
"github.com/opencontainers/runtime-spec/specs-go"
)
func arches() []specs.Arch {
switch runtime.GOARCH {
case "amd64":
return []specs.Arch{specs.ArchX86_64, specs.ArchX86, specs.ArchX32}
case "arm64":
return []specs.Arch{specs.ArchARM, specs.ArchAARCH64}
case "mips64":
return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32}
case "mips64n32":
return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32}
case "mipsel64":
return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32}
case "mipsel64n32":
return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32}
case "s390x":
return []specs.Arch{specs.ArchS390, specs.ArchS390X}
default:
return []specs.Arch{}
}
}
// DefaultProfile defines the whitelist for the default seccomp profile.
func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
syscalls := []specs.LinuxSyscall{
{
Names: []string{
"accept",
"accept4",
"access",
"alarm",
"alarm",
"bind",
"brk",
"capget",
"capset",
"chdir",
"chmod",
"chown",
"chown32",
"clock_getres",
"clock_gettime",
"clock_nanosleep",
"close",
"connect",
"copy_file_range",
"creat",
"dup",
"dup2",
"dup3",
"epoll_create",
"epoll_create1",
"epoll_ctl",
"epoll_ctl_old",
"epoll_pwait",
"epoll_wait",
"epoll_wait_old",
"eventfd",
"eventfd2",
"execve",
"execveat",
"exit",
"exit_group",
"faccessat",
"fadvise64",
"fadvise64_64",
"fallocate",
"fanotify_mark",
"fchdir",
"fchmod",
"fchmodat",
"fchown",
"fchown32",
"fchownat",
"fcntl",
"fcntl64",
"fdatasync",
"fgetxattr",
"flistxattr",
"flock",
"fork",
"fremovexattr",
"fsetxattr",
"fstat",
"fstat64",
"fstatat64",
"fstatfs",
"fstatfs64",
"fsync",
"ftruncate",
"ftruncate64",
"futex",
"futimesat",
"getcpu",
"getcwd",
"getdents",
"getdents64",
"getegid",
"getegid32",
"geteuid",
"geteuid32",
"getgid",
"getgid32",
"getgroups",
"getgroups32",
"getitimer",
"getpeername",
"getpgid",
"getpgrp",
"getpid",
"getppid",
"getpriority",
"getrandom",
"getresgid",
"getresgid32",
"getresuid",
"getresuid32",
"getrlimit",
"get_robust_list",
"getrusage",
"getsid",
"getsockname",
"getsockopt",
"get_thread_area",
"gettid",
"gettimeofday",
"getuid",
"getuid32",
"getxattr",
"inotify_add_watch",
"inotify_init",
"inotify_init1",
"inotify_rm_watch",
"io_cancel",
"ioctl",
"io_destroy",
"io_getevents",
"ioprio_get",
"ioprio_set",
"io_setup",
"io_submit",
"ipc",
"kill",
"lchown",
"lchown32",
"lgetxattr",
"link",
"linkat",
"listen",
"listxattr",
"llistxattr",
"_llseek",
"lremovexattr",
"lseek",
"lsetxattr",
"lstat",
"lstat64",
"madvise",
"memfd_create",
"mincore",
"mkdir",
"mkdirat",
"mknod",
"mknodat",
"mlock",
"mlock2",
"mlockall",
"mmap",
"mmap2",
"mprotect",
"mq_getsetattr",
"mq_notify",
"mq_open",
"mq_timedreceive",
"mq_timedsend",
"mq_unlink",
"mremap",
"msgctl",
"msgget",
"msgrcv",
"msgsnd",
"msync",
"munlock",
"munlockall",
"munmap",
"nanosleep",
"newfstatat",
"_newselect",
"open",
"openat",
"pause",
"pipe",
"pipe2",
"poll",
"ppoll",
"prctl",
"pread64",
"preadv",
"prlimit64",
"pselect6",
"pwrite64",
"pwritev",
"read",
"readahead",
"readlink",
"readlinkat",
"readv",
"recv",
"recvfrom",
"recvmmsg",
"recvmsg",
"remap_file_pages",
"removexattr",
"rename",
"renameat",
"renameat2",
"restart_syscall",
"rmdir",
"rt_sigaction",
"rt_sigpending",
"rt_sigprocmask",
"rt_sigqueueinfo",
"rt_sigreturn",
"rt_sigsuspend",
"rt_sigtimedwait",
"rt_tgsigqueueinfo",
"sched_getaffinity",
"sched_getattr",
"sched_getparam",
"sched_get_priority_max",
"sched_get_priority_min",
"sched_getscheduler",
"sched_rr_get_interval",
"sched_setaffinity",
"sched_setattr",
"sched_setparam",
"sched_setscheduler",
"sched_yield",
"seccomp",
"select",
"semctl",
"semget",
"semop",
"semtimedop",
"send",
"sendfile",
"sendfile64",
"sendmmsg",
"sendmsg",
"sendto",
"setfsgid",
"setfsgid32",
"setfsuid",
"setfsuid32",
"setgid",
"setgid32",
"setgroups",
"setgroups32",
"setitimer",
"setpgid",
"setpriority",
"setregid",
"setregid32",
"setresgid",
"setresgid32",
"setresuid",
"setresuid32",
"setreuid",
"setreuid32",
"setrlimit",
"set_robust_list",
"setsid",
"setsockopt",
"set_thread_area",
"set_tid_address",
"setuid",
"setuid32",
"setxattr",
"shmat",
"shmctl",
"shmdt",
"shmget",
"shutdown",
"sigaltstack",
"signalfd",
"signalfd4",
"sigreturn",
"socket",
"socketcall",
"socketpair",
"splice",
"stat",
"stat64",
"statfs",
"statfs64",
"symlink",
"symlinkat",
"sync",
"sync_file_range",
"syncfs",
"sysinfo",
"syslog",
"tee",
"tgkill",
"time",
"timer_create",
"timer_delete",
"timerfd_create",
"timerfd_gettime",
"timerfd_settime",
"timer_getoverrun",
"timer_gettime",
"timer_settime",
"times",
"tkill",
"truncate",
"truncate64",
"ugetrlimit",
"umask",
"uname",
"unlink",
"unlinkat",
"utime",
"utimensat",
"utimes",
"vfork",
"vmsplice",
"wait4",
"waitid",
"waitpid",
"write",
"writev",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
},
{
Names: []string{"personality"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: 0x0,
Op: specs.OpEqualTo,
},
},
},
{
Names: []string{"personality"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: 0x0008,
Op: specs.OpEqualTo,
},
},
},
{
Names: []string{"personality"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: 0xffffffff,
Op: specs.OpEqualTo,
},
},
},
}
s := &specs.LinuxSeccomp{
DefaultAction: specs.ActErrno,
Architectures: arches(),
Syscalls: syscalls,
}
// include by arch
switch runtime.GOARCH {
case "arm", "arm64":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"arm_fadvise64_64",
"arm_sync_file_range",
"breakpoint",
"cacheflush",
"set_tls",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "amd64":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"arch_prctl",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
fallthrough
case "386":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"modify_ldt",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "s390", "s390x":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"s390_pci_mmio_read",
"s390_pci_mmio_write",
"s390_runtime_instr",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
}
// make a map of enabled capabilities
caps := make(map[string]bool)
for _, c := range sp.Process.Capabilities.Bounding {
caps[c] = true
}
for _, c := range sp.Process.Capabilities.Effective {
caps[c] = true
}
for _, c := range sp.Process.Capabilities.Inheritable {
caps[c] = true
}
for _, c := range sp.Process.Capabilities.Permitted {
caps[c] = true
}
for _, c := range sp.Process.Capabilities.Ambient {
caps[c] = true
}
for c := range caps {
switch c {
case "CAP_DAC_READ_SEARCH":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"open_by_handle_at"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_ADMIN":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"bpf",
"clone",
"fanotify_init",
"lookup_dcookie",
"mount",
"name_to_handle_at",
"perf_event_open",
"setdomainname",
"sethostname",
"setns",
"umount",
"umount2",
"unshare",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_BOOT":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"reboot"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_CHROOT":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"chroot"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_MODULE":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"delete_module",
"init_module",
"finit_module",
"query_module",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_PACCT":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"acct"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_PTRACE":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"kcmp",
"process_vm_readv",
"process_vm_writev",
"ptrace",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_RAWIO":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"iopl",
"ioperm",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_TIME":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"settimeofday",
"stime",
"adjtimex",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
case "CAP_SYS_TTY_CONFIG":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{"vhangup"},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{},
})
}
}
if !caps["CAP_SYS_ADMIN"] {
switch runtime.GOARCH {
case "s390", "s390x":
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"clone",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 1,
Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
ValueTwo: 0,
Op: specs.OpMaskedEqual,
},
},
})
default:
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
Names: []string{
"clone",
},
Action: specs.ActAllow,
Args: []specs.LinuxSeccompArg{
{
Index: 0,
Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
ValueTwo: 0,
Op: specs.OpMaskedEqual,
},
},
})
}
}
return s
}

117
vendor/github.com/containerd/containerd/daemon_test.go generated vendored Normal file
View file

@ -0,0 +1,117 @@
package containerd
import (
"context"
"io"
"os/exec"
"sync"
"syscall"
"github.com/pkg/errors"
)
type daemon struct {
sync.Mutex
addr string
cmd *exec.Cmd
}
func (d *daemon) start(name, address string, args []string, stdout, stderr io.Writer) error {
d.Lock()
defer d.Unlock()
if d.cmd != nil {
return errors.New("daemon is already running")
}
args = append(args, []string{"--address", address}...)
cmd := exec.Command(name, args...)
cmd.Stdout = stdout
cmd.Stderr = stderr
if err := cmd.Start(); err != nil {
cmd.Wait()
return errors.Wrap(err, "failed to start daemon")
}
d.addr = address
d.cmd = cmd
return nil
}
func (d *daemon) waitForStart(ctx context.Context) (*Client, error) {
var (
client *Client
serving bool
err error
)
client, err = New(address)
if err != nil {
return nil, err
}
serving, err = client.IsServing(ctx)
if !serving {
client.Close()
if err == nil {
err = errors.New("connection was successful but service is not available")
}
return nil, err
}
return client, err
}
func (d *daemon) Stop() error {
d.Lock()
defer d.Unlock()
if d.cmd == nil {
return errors.New("daemon is not running")
}
return d.cmd.Process.Signal(syscall.SIGTERM)
}
func (d *daemon) Kill() error {
d.Lock()
defer d.Unlock()
if d.cmd == nil {
return errors.New("daemon is not running")
}
return d.cmd.Process.Kill()
}
func (d *daemon) Wait() error {
d.Lock()
defer d.Unlock()
if d.cmd == nil {
return errors.New("daemon is not running")
}
err := d.cmd.Wait()
d.cmd = nil
return err
}
func (d *daemon) Restart(stopCb func()) error {
d.Lock()
defer d.Unlock()
if d.cmd == nil {
return errors.New("daemon is not running")
}
var err error
if err = d.cmd.Process.Signal(syscall.SIGTERM); err != nil {
return errors.Wrap(err, "failed to signal daemon")
}
d.cmd.Wait()
if stopCb != nil {
stopCb()
}
cmd := exec.Command(d.cmd.Path, d.cmd.Args[1:]...)
cmd.Stdout = d.cmd.Stdout
cmd.Stderr = d.cmd.Stderr
if err := cmd.Start(); err != nil {
cmd.Wait()
return errors.Wrap(err, "failed to start new daemon instance")
}
d.cmd = cmd
return nil
}

83
vendor/github.com/containerd/containerd/diff.go generated vendored Normal file
View file

@ -0,0 +1,83 @@
package containerd
import (
diffapi "github.com/containerd/containerd/api/services/diff/v1"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/mount"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/net/context"
)
// NewDiffServiceFromClient returns a new diff service which communicates
// over a GRPC connection.
func NewDiffServiceFromClient(client diffapi.DiffClient) diff.Differ {
return &diffRemote{
client: client,
}
}
type diffRemote struct {
client diffapi.DiffClient
}
func (r *diffRemote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) {
req := &diffapi.ApplyRequest{
Diff: fromDescriptor(diff),
Mounts: fromMounts(mounts),
}
resp, err := r.client.Apply(ctx, req)
if err != nil {
return ocispec.Descriptor{}, err
}
return toDescriptor(resp.Applied), nil
}
func (r *diffRemote) DiffMounts(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) {
var config diff.Config
for _, opt := range opts {
if err := opt(&config); err != nil {
return ocispec.Descriptor{}, err
}
}
req := &diffapi.DiffRequest{
Left: fromMounts(a),
Right: fromMounts(b),
MediaType: config.MediaType,
Ref: config.Reference,
Labels: config.Labels,
}
resp, err := r.client.Diff(ctx, req)
if err != nil {
return ocispec.Descriptor{}, err
}
return toDescriptor(resp.Diff), nil
}
func toDescriptor(d *types.Descriptor) ocispec.Descriptor {
return ocispec.Descriptor{
MediaType: d.MediaType,
Digest: d.Digest,
Size: d.Size_,
}
}
func fromDescriptor(d ocispec.Descriptor) *types.Descriptor {
return &types.Descriptor{
MediaType: d.MediaType,
Digest: d.Digest,
Size_: d.Size,
}
}
func fromMounts(mounts []mount.Mount) []*types.Mount {
apiMounts := make([]*types.Mount, len(mounts))
for i, m := range mounts {
apiMounts[i] = &types.Mount{
Type: m.Type,
Source: m.Source,
Options: m.Options,
}
}
return apiMounts
}

View file

@ -0,0 +1,62 @@
// Package errdefs defines the common errors used throughout containerd
// packages.
//
// Use with errors.Wrap and error.Wrapf to add context to an error.
//
// To detect an error class, use the IsXXX functions to tell whether an error
// is of a certain type.
//
// The functions ToGRPC and FromGRPC can be used to map server-side and
// client-side errors to the correct types.
package errdefs
import "github.com/pkg/errors"
// Definitions of common error types used throughout containerd. All containerd
// errors returned by most packages will map into one of these errors classes.
// Packages should return errors of these types when they want to instruct a
// client to take a particular action.
//
// For the most part, we just try to provide local grpc errors. Most conditions
// map very well to those defined by grpc.
var (
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
ErrInvalidArgument = errors.New("invalid argument")
ErrNotFound = errors.New("not found")
ErrAlreadyExists = errors.New("already exists")
ErrFailedPrecondition = errors.New("failed precondition")
ErrUnavailable = errors.New("unavailable")
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
)
// IsInvalidArgument returns true if the error is due to an invalid argument
func IsInvalidArgument(err error) bool {
return errors.Cause(err) == ErrInvalidArgument
}
// IsNotFound returns true if the error is due to a missing object
func IsNotFound(err error) bool {
return errors.Cause(err) == ErrNotFound
}
// IsAlreadyExists returns true if the error is due to an already existing
// metadata item
func IsAlreadyExists(err error) bool {
return errors.Cause(err) == ErrAlreadyExists
}
// IsFailedPrecondition returns true if an operation could not proceed to the
// lack of a particular condition
func IsFailedPrecondition(err error) bool {
return errors.Cause(err) == ErrFailedPrecondition
}
// IsUnavailable returns true if the error is due to a resource being unavailable
func IsUnavailable(err error) bool {
return errors.Cause(err) == ErrUnavailable
}
// IsNotImplemented returns true if the error is due to not being implemented
func IsNotImplemented(err error) bool {
return errors.Cause(err) == ErrNotImplemented
}

122
vendor/github.com/containerd/containerd/errdefs/grpc.go generated vendored Normal file
View file

@ -0,0 +1,122 @@
package errdefs
import (
"strings"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ToGRPC will attempt to map the backend containerd error into a grpc error,
// using the original error message as a description.
//
// Further information may be extracted from certain errors depending on their
// type.
//
// If the error is unmapped, the original error will be returned to be handled
// by the regular grpc error handling stack.
func ToGRPC(err error) error {
if err == nil {
return nil
}
if isGRPCError(err) {
// error has already been mapped to grpc
return err
}
switch {
case IsInvalidArgument(err):
return status.Errorf(codes.InvalidArgument, err.Error())
case IsNotFound(err):
return status.Errorf(codes.NotFound, err.Error())
case IsAlreadyExists(err):
return status.Errorf(codes.AlreadyExists, err.Error())
case IsFailedPrecondition(err):
return status.Errorf(codes.FailedPrecondition, err.Error())
case IsUnavailable(err):
return status.Errorf(codes.Unavailable, err.Error())
case IsNotImplemented(err):
return status.Errorf(codes.Unimplemented, err.Error())
}
return err
}
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
// and combining it with the target error string.
//
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
func ToGRPCf(err error, format string, args ...interface{}) error {
return ToGRPC(errors.Wrapf(err, format, args...))
}
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
func FromGRPC(err error) error {
if err == nil {
return nil
}
var cls error // divide these into error classes, becomes the cause
switch code(err) {
case codes.InvalidArgument:
cls = ErrInvalidArgument
case codes.AlreadyExists:
cls = ErrAlreadyExists
case codes.NotFound:
cls = ErrNotFound
case codes.Unavailable:
cls = ErrUnavailable
case codes.FailedPrecondition:
cls = ErrFailedPrecondition
case codes.Unimplemented:
cls = ErrNotImplemented
default:
cls = ErrUnknown
}
msg := rebaseMessage(cls, err)
if msg != "" {
err = errors.Wrapf(cls, msg)
} else {
err = errors.WithStack(cls)
}
return err
}
// rebaseMessage removes the repeats for an error at the end of an error
// string. This will happen when taking an error over grpc then remapping it.
//
// Effectively, we just remove the string of cls from the end of err if it
// appears there.
func rebaseMessage(cls error, err error) string {
desc := errDesc(err)
clss := cls.Error()
if desc == clss {
return ""
}
return strings.TrimSuffix(desc, ": "+clss)
}
func isGRPCError(err error) bool {
_, ok := status.FromError(err)
return ok
}
func code(err error) codes.Code {
if s, ok := status.FromError(err); ok {
return s.Code()
}
return codes.Unknown
}
func errDesc(err error) string {
if s, ok := status.FromError(err); ok {
return s.Message()
}
return err.Error()
}

View file

@ -0,0 +1,65 @@
package errdefs
import (
"testing"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pkg/errors"
)
func TestGRPCRoundTrip(t *testing.T) {
errShouldLeaveAlone := errors.New("unknown to package")
for _, testcase := range []struct {
input error
cause error
str string
}{
{
input: ErrAlreadyExists,
cause: ErrAlreadyExists,
},
{
input: ErrNotFound,
cause: ErrNotFound,
},
{
input: errors.Wrapf(ErrFailedPrecondition, "test test test"),
cause: ErrFailedPrecondition,
str: "test test test: failed precondition",
},
{
input: status.Errorf(codes.Unavailable, "should be not available"),
cause: ErrUnavailable,
str: "should be not available: unavailable",
},
{
input: errShouldLeaveAlone,
cause: ErrUnknown,
str: errShouldLeaveAlone.Error() + ": " + ErrUnknown.Error(),
},
} {
t.Run(testcase.input.Error(), func(t *testing.T) {
t.Logf("input: %v", testcase.input)
gerr := ToGRPC(testcase.input)
t.Logf("grpc: %v", gerr)
ferr := FromGRPC(gerr)
t.Logf("recovered: %v", ferr)
if errors.Cause(ferr) != testcase.cause {
t.Fatalf("unexpected cause: %v != %v", errors.Cause(ferr), testcase.cause)
}
expected := testcase.str
if expected == "" {
expected = testcase.cause.Error()
}
if ferr.Error() != expected {
t.Fatalf("unexpected string: %q != %q", ferr.Error(), expected)
}
})
}
}

65
vendor/github.com/containerd/containerd/export_test.go generated vendored Normal file
View file

@ -0,0 +1,65 @@
package containerd
import (
"archive/tar"
"io"
"runtime"
"testing"
"github.com/containerd/containerd/images/oci"
)
// TestOCIExport exports testImage as a tar stream
func TestOCIExport(t *testing.T) {
// TODO: support windows
if testing.Short() || runtime.GOOS == "windows" {
t.Skip()
}
ctx, cancel := testContext()
defer cancel()
client, err := New(address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
pulled, err := client.Pull(ctx, testImage)
if err != nil {
t.Fatal(err)
}
exportedStream, err := client.Export(ctx, &oci.V1Exporter{}, pulled.Target())
if err != nil {
t.Fatal(err)
}
assertOCITar(t, exportedStream)
}
func assertOCITar(t *testing.T, r io.Reader) {
// TODO: add more assertion
tr := tar.NewReader(r)
foundOCILayout := false
foundIndexJSON := false
for {
h, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Error(err)
continue
}
if h.Name == "oci-layout" {
foundOCILayout = true
}
if h.Name == "index.json" {
foundIndexJSON = true
}
}
if !foundOCILayout {
t.Error("oci-layout not found")
}
if !foundIndexJSON {
t.Error("index.json not found")
}
}

View file

@ -0,0 +1,83 @@
package fs
import (
"io"
"os"
"syscall"
"github.com/containerd/containerd/sys"
"github.com/containerd/continuity/sysx"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func copyFileInfo(fi os.FileInfo, name string) error {
st := fi.Sys().(*syscall.Stat_t)
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
return errors.Wrapf(err, "failed to chown %s", name)
}
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
if err := os.Chmod(name, fi.Mode()); err != nil {
return errors.Wrapf(err, "failed to chmod %s", name)
}
}
timespec := []unix.Timespec{unix.Timespec(sys.StatAtime(st)), unix.Timespec(sys.StatMtime(st))}
if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
return errors.Wrapf(err, "failed to utime %s", name)
}
return nil
}
func copyFileContent(dst, src *os.File) error {
st, err := src.Stat()
if err != nil {
return errors.Wrap(err, "unable to stat source")
}
n, err := unix.CopyFileRange(int(src.Fd()), nil, int(dst.Fd()), nil, int(st.Size()), 0)
if err != nil {
if err != unix.ENOSYS && err != unix.EXDEV {
return errors.Wrap(err, "copy file range failed")
}
buf := bufferPool.Get().(*[]byte)
_, err = io.CopyBuffer(dst, src, *buf)
bufferPool.Put(buf)
return err
}
if int64(n) != st.Size() {
return errors.Wrapf(err, "short copy: %d of %d", int64(n), st.Size())
}
return nil
}
func copyXAttrs(dst, src string) error {
xattrKeys, err := sysx.LListxattr(src)
if err != nil {
return errors.Wrapf(err, "failed to list xattrs on %s", src)
}
for _, xattr := range xattrKeys {
data, err := sysx.LGetxattr(src, xattr)
if err != nil {
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
}
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
}
}
return nil
}
func copyDevice(dst string, fi os.FileInfo) error {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return errors.New("unsupported stat type")
}
return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
}

View file

@ -0,0 +1,71 @@
package fs
import (
"io/ioutil"
"os"
"testing"
_ "crypto/sha256"
"github.com/containerd/containerd/fs/fstest"
"github.com/pkg/errors"
)
// TODO: Create copy directory which requires privilege
// chown
// mknod
// setxattr fstest.SetXAttr("/home", "trusted.overlay.opaque", "y"),
func TestCopyDirectory(t *testing.T) {
apply := fstest.Apply(
fstest.CreateDir("/etc/", 0755),
fstest.CreateFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644),
fstest.Link("/etc/hosts", "/etc/hosts.allow"),
fstest.CreateDir("/usr/local/lib", 0755),
fstest.CreateFile("/usr/local/lib/libnothing.so", []byte{0x00, 0x00}, 0755),
fstest.Symlink("libnothing.so", "/usr/local/lib/libnothing.so.2"),
fstest.CreateDir("/home", 0755),
)
if err := testCopy(apply); err != nil {
t.Fatalf("Copy test failed: %+v", err)
}
}
// This test used to fail because link-no-nothing.txt would be copied first,
// then file operations in dst during the CopyDir would follow the symlink and
// fail.
func TestCopyDirectoryWithLocalSymlink(t *testing.T) {
apply := fstest.Apply(
fstest.CreateFile("nothing.txt", []byte{0x00, 0x00}, 0755),
fstest.Symlink("nothing.txt", "link-no-nothing.txt"),
)
if err := testCopy(apply); err != nil {
t.Fatalf("Copy test failed: %+v", err)
}
}
func testCopy(apply fstest.Applier) error {
t1, err := ioutil.TempDir("", "test-copy-src-")
if err != nil {
return errors.Wrap(err, "failed to create temporary directory")
}
defer os.RemoveAll(t1)
t2, err := ioutil.TempDir("", "test-copy-dst-")
if err != nil {
return errors.Wrap(err, "failed to create temporary directory")
}
defer os.RemoveAll(t2)
if err := apply.Apply(t1); err != nil {
return errors.Wrap(err, "failed to apply changes")
}
if err := CopyDir(t2, t1); err != nil {
return errors.Wrap(err, "failed to copy")
}
return fstest.CheckDirectoryEqual(t1, t2)
}

View file

@ -0,0 +1,68 @@
// +build solaris darwin freebsd
package fs
import (
"io"
"os"
"syscall"
"github.com/containerd/containerd/sys"
"github.com/containerd/continuity/sysx"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func copyFileInfo(fi os.FileInfo, name string) error {
st := fi.Sys().(*syscall.Stat_t)
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
return errors.Wrapf(err, "failed to chown %s", name)
}
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
if err := os.Chmod(name, fi.Mode()); err != nil {
return errors.Wrapf(err, "failed to chmod %s", name)
}
}
timespec := []syscall.Timespec{sys.StatAtime(st), sys.StatMtime(st)}
if err := syscall.UtimesNano(name, timespec); err != nil {
return errors.Wrapf(err, "failed to utime %s", name)
}
return nil
}
func copyFileContent(dst, src *os.File) error {
buf := bufferPool.Get().(*[]byte)
_, err := io.CopyBuffer(dst, src, *buf)
bufferPool.Put(buf)
return err
}
func copyXAttrs(dst, src string) error {
xattrKeys, err := sysx.LListxattr(src)
if err != nil {
return errors.Wrapf(err, "failed to list xattrs on %s", src)
}
for _, xattr := range xattrKeys {
data, err := sysx.LGetxattr(src, xattr)
if err != nil {
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
}
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
}
}
return nil
}
func copyDevice(dst string, fi os.FileInfo) error {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return errors.New("unsupported stat type")
}
return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
}

396
vendor/github.com/containerd/containerd/fs/diff_test.go generated vendored Normal file
View file

@ -0,0 +1,396 @@
package fs
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/containerd/containerd/fs/fstest"
"github.com/pkg/errors"
)
// TODO: Additional tests
// - capability test (requires privilege)
// - chown test (requires privilege)
// - symlink test
// - hardlink test
func skipDiffTestOnWindows(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("diff implementation is incomplete on windows")
}
}
func TestSimpleDiff(t *testing.T) {
skipDiffTestOnWindows(t)
l1 := fstest.Apply(
fstest.CreateDir("/etc", 0755),
fstest.CreateFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644),
fstest.CreateFile("/etc/profile", []byte("PATH=/usr/bin"), 0644),
fstest.CreateFile("/etc/unchanged", []byte("PATH=/usr/bin"), 0644),
fstest.CreateFile("/etc/unexpected", []byte("#!/bin/sh"), 0644),
)
l2 := fstest.Apply(
fstest.CreateFile("/etc/hosts", []byte("mydomain 10.0.0.120"), 0644),
fstest.CreateFile("/etc/profile", []byte("PATH=/usr/bin"), 0666),
fstest.CreateDir("/root", 0700),
fstest.CreateFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644),
fstest.Remove("/etc/unexpected"),
)
diff := []TestChange{
Modify("/etc/hosts"),
Modify("/etc/profile"),
Delete("/etc/unexpected"),
Add("/root"),
Add("/root/.bashrc"),
}
if err := testDiffWithBase(l1, l2, diff); err != nil {
t.Fatalf("Failed diff with base: %+v", err)
}
}
func TestNestedDeletion(t *testing.T) {
skipDiffTestOnWindows(t)
l1 := fstest.Apply(
fstest.CreateDir("/d0", 0755),
fstest.CreateDir("/d1", 0755),
fstest.CreateDir("/d1/d2", 0755),
fstest.CreateFile("/d1/d2/f1", []byte("mydomain 10.0.0.1"), 0644),
)
l2 := fstest.Apply(
fstest.RemoveAll("/d0"),
fstest.RemoveAll("/d1"),
)
diff := []TestChange{
Delete("/d0"),
Delete("/d1"),
}
if err := testDiffWithBase(l1, l2, diff); err != nil {
t.Fatalf("Failed diff with base: %+v", err)
}
}
func TestDirectoryReplace(t *testing.T) {
skipDiffTestOnWindows(t)
l1 := fstest.Apply(
fstest.CreateDir("/dir1", 0755),
fstest.CreateFile("/dir1/f1", []byte("#####"), 0644),
fstest.CreateDir("/dir1/f2", 0755),
fstest.CreateFile("/dir1/f2/f3", []byte("#!/bin/sh"), 0644),
)
l2 := fstest.Apply(
fstest.CreateFile("/dir1/f11", []byte("#New file here"), 0644),
fstest.RemoveAll("/dir1/f2"),
fstest.CreateFile("/dir1/f2", []byte("Now file"), 0666),
)
diff := []TestChange{
Add("/dir1/f11"),
Modify("/dir1/f2"),
}
if err := testDiffWithBase(l1, l2, diff); err != nil {
t.Fatalf("Failed diff with base: %+v", err)
}
}
func TestRemoveDirectoryTree(t *testing.T) {
l1 := fstest.Apply(
fstest.CreateDir("/dir1/dir2/dir3", 0755),
fstest.CreateFile("/dir1/f1", []byte("f1"), 0644),
fstest.CreateFile("/dir1/dir2/f2", []byte("f2"), 0644),
)
l2 := fstest.Apply(
fstest.RemoveAll("/dir1"),
)
diff := []TestChange{
Delete("/dir1"),
}
if err := testDiffWithBase(l1, l2, diff); err != nil {
t.Fatalf("Failed diff with base: %+v", err)
}
}
func TestFileReplace(t *testing.T) {
l1 := fstest.Apply(
fstest.CreateFile("/dir1", []byte("a file, not a directory"), 0644),
)
l2 := fstest.Apply(
fstest.Remove("/dir1"),
fstest.CreateDir("/dir1/dir2", 0755),
fstest.CreateFile("/dir1/dir2/f1", []byte("also a file"), 0644),
)
diff := []TestChange{
Modify("/dir1"),
Add("/dir1/dir2"),
Add("/dir1/dir2/f1"),
}
if err := testDiffWithBase(l1, l2, diff); err != nil {
t.Fatalf("Failed diff with base: %+v", err)
}
}
func TestParentDirectoryPermission(t *testing.T) {
skipDiffTestOnWindows(t)
l1 := fstest.Apply(
fstest.CreateDir("/dir1", 0700),
fstest.CreateDir("/dir2", 0751),
fstest.CreateDir("/dir3", 0777),
)
l2 := fstest.Apply(
fstest.CreateDir("/dir1/d", 0700),
fstest.CreateFile("/dir1/d/f", []byte("irrelevant"), 0644),
fstest.CreateFile("/dir1/f", []byte("irrelevant"), 0644),
fstest.CreateFile("/dir2/f", []byte("irrelevant"), 0644),
fstest.CreateFile("/dir3/f", []byte("irrelevant"), 0644),
)
diff := []TestChange{
Add("/dir1/d"),
Add("/dir1/d/f"),
Add("/dir1/f"),
Add("/dir2/f"),
Add("/dir3/f"),
}
if err := testDiffWithBase(l1, l2, diff); err != nil {
t.Fatalf("Failed diff with base: %+v", err)
}
}
func TestUpdateWithSameTime(t *testing.T) {
skipDiffTestOnWindows(t)
tt := time.Now().Truncate(time.Second)
t1 := tt.Add(5 * time.Nanosecond)
t2 := tt.Add(6 * time.Nanosecond)
l1 := fstest.Apply(
fstest.CreateFile("/file-modified-time", []byte("1"), 0644),
fstest.Chtimes("/file-modified-time", t1, t1),
fstest.CreateFile("/file-no-change", []byte("1"), 0644),
fstest.Chtimes("/file-no-change", t1, t1),
fstest.CreateFile("/file-same-time", []byte("1"), 0644),
fstest.Chtimes("/file-same-time", t1, t1),
fstest.CreateFile("/file-truncated-time-1", []byte("1"), 0644),
fstest.Chtimes("/file-truncated-time-1", t1, t1),
fstest.CreateFile("/file-truncated-time-2", []byte("1"), 0644),
fstest.Chtimes("/file-truncated-time-2", tt, tt),
)
l2 := fstest.Apply(
fstest.CreateFile("/file-modified-time", []byte("2"), 0644),
fstest.Chtimes("/file-modified-time", t2, t2),
fstest.CreateFile("/file-no-change", []byte("1"), 0644),
fstest.Chtimes("/file-no-change", tt, tt), // use truncated time, should be regarded as no change
fstest.CreateFile("/file-same-time", []byte("2"), 0644),
fstest.Chtimes("/file-same-time", t1, t1),
fstest.CreateFile("/file-truncated-time-1", []byte("2"), 0644),
fstest.Chtimes("/file-truncated-time-1", tt, tt),
fstest.CreateFile("/file-truncated-time-2", []byte("2"), 0644),
fstest.Chtimes("/file-truncated-time-2", tt, tt),
)
diff := []TestChange{
// "/file-same-time" excluded because matching non-zero nanosecond values
Modify("/file-modified-time"),
Modify("/file-truncated-time-1"),
Modify("/file-truncated-time-2"),
}
if err := testDiffWithBase(l1, l2, diff); err != nil {
t.Fatalf("Failed diff with base: %+v", err)
}
}
// buildkit#172
func TestLchtimes(t *testing.T) {
skipDiffTestOnWindows(t)
mtimes := []time.Time{
time.Unix(0, 0), // nsec is 0
time.Unix(0, 42), // nsec > 0
}
for _, mtime := range mtimes {
atime := time.Unix(424242, 42)
l1 := fstest.Apply(
fstest.CreateFile("/foo", []byte("foo"), 0644),
fstest.Symlink("/foo", "/lnk0"),
fstest.Lchtimes("/lnk0", atime, mtime),
)
l2 := fstest.Apply() // empty
diff := []TestChange{}
if err := testDiffWithBase(l1, l2, diff); err != nil {
t.Fatalf("Failed diff with base: %+v", err)
}
}
}
func testDiffWithBase(base, diff fstest.Applier, expected []TestChange) error {
t1, err := ioutil.TempDir("", "diff-with-base-lower-")
if err != nil {
return errors.Wrap(err, "failed to create temp dir")
}
defer os.RemoveAll(t1)
t2, err := ioutil.TempDir("", "diff-with-base-upper-")
if err != nil {
return errors.Wrap(err, "failed to create temp dir")
}
defer os.RemoveAll(t2)
if err := base.Apply(t1); err != nil {
return errors.Wrap(err, "failed to apply base filesystem")
}
if err := CopyDir(t2, t1); err != nil {
return errors.Wrap(err, "failed to copy base directory")
}
if err := diff.Apply(t2); err != nil {
return errors.Wrap(err, "failed to apply diff filesystem")
}
changes, err := collectChanges(t1, t2)
if err != nil {
return errors.Wrap(err, "failed to collect changes")
}
return checkChanges(t2, changes, expected)
}
func TestBaseDirectoryChanges(t *testing.T) {
apply := fstest.Apply(
fstest.CreateDir("/etc", 0755),
fstest.CreateFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644),
fstest.CreateFile("/etc/profile", []byte("PATH=/usr/bin"), 0644),
fstest.CreateDir("/root", 0700),
fstest.CreateFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644),
)
changes := []TestChange{
Add("/etc"),
Add("/etc/hosts"),
Add("/etc/profile"),
Add("/root"),
Add("/root/.bashrc"),
}
if err := testDiffWithoutBase(apply, changes); err != nil {
t.Fatalf("Failed diff without base: %+v", err)
}
}
func testDiffWithoutBase(apply fstest.Applier, expected []TestChange) error {
tmp, err := ioutil.TempDir("", "diff-without-base-")
if err != nil {
return errors.Wrap(err, "failed to create temp dir")
}
defer os.RemoveAll(tmp)
if err := apply.Apply(tmp); err != nil {
return errors.Wrap(err, "failed to apply filesytem changes")
}
changes, err := collectChanges("", tmp)
if err != nil {
return errors.Wrap(err, "failed to collect changes")
}
return checkChanges(tmp, changes, expected)
}
func checkChanges(root string, changes, expected []TestChange) error {
if len(changes) != len(expected) {
return errors.Errorf("Unexpected number of changes:\n%s", diffString(changes, expected))
}
for i := range changes {
if changes[i].Path != expected[i].Path || changes[i].Kind != expected[i].Kind {
return errors.Errorf("Unexpected change at %d:\n%s", i, diffString(changes, expected))
}
if changes[i].Kind != ChangeKindDelete {
filename := filepath.Join(root, changes[i].Path)
efi, err := os.Stat(filename)
if err != nil {
return errors.Wrapf(err, "failed to stat %q", filename)
}
afi := changes[i].FileInfo
if afi.Size() != efi.Size() {
return errors.Errorf("Unexpected change size %d, %q has size %d", afi.Size(), filename, efi.Size())
}
if afi.Mode() != efi.Mode() {
return errors.Errorf("Unexpected change mode %s, %q has mode %s", afi.Mode(), filename, efi.Mode())
}
if afi.ModTime() != efi.ModTime() {
return errors.Errorf("Unexpected change modtime %s, %q has modtime %s", afi.ModTime(), filename, efi.ModTime())
}
if expected := filepath.Join(root, changes[i].Path); changes[i].Source != expected {
return errors.Errorf("Unexpected source path %s, expected %s", changes[i].Source, expected)
}
}
}
return nil
}
type TestChange struct {
Kind ChangeKind
Path string
FileInfo os.FileInfo
Source string
}
func collectChanges(a, b string) ([]TestChange, error) {
changes := []TestChange{}
err := Changes(context.Background(), a, b, func(k ChangeKind, p string, f os.FileInfo, err error) error {
if err != nil {
return err
}
changes = append(changes, TestChange{
Kind: k,
Path: p,
FileInfo: f,
Source: filepath.Join(b, p),
})
return nil
})
if err != nil {
return nil, errors.Wrap(err, "failed to compute changes")
}
return changes, nil
}
func diffString(c1, c2 []TestChange) string {
return fmt.Sprintf("got(%d):\n%s\nexpected(%d):\n%s", len(c1), changesString(c1), len(c2), changesString(c2))
}
func changesString(c []TestChange) string {
strs := make([]string, len(c))
for i := range c {
strs[i] = fmt.Sprintf("\t%s\t%s", c[i].Kind, c[i].Path)
}
return strings.Join(strs, "\n")
}
func Add(p string) TestChange {
return TestChange{
Kind: ChangeKindAdd,
Path: filepath.FromSlash(p),
}
}
func Delete(p string) TestChange {
return TestChange{
Kind: ChangeKindDelete,
Path: filepath.FromSlash(p),
}
}
func Modify(p string) TestChange {
return TestChange{
Kind: ChangeKindModify,
Path: filepath.FromSlash(p),
}
}

View file

@ -8,7 +8,8 @@ import (
"os/exec"
"testing"
"github.com/containerd/continuity/testutil"
"github.com/containerd/containerd/testutil"
"github.com/stretchr/testify/assert"
)
func testSupportsDType(t *testing.T, expected bool, mkfs ...string) {
@ -41,9 +42,7 @@ func testSupportsDType(t *testing.T, expected bool, mkfs ...string) {
t.Fatal(err)
}
t.Logf("Supports d_type: %v", result)
if expected != result {
t.Fatalf("expected %+v, got: %+v", expected, result)
}
assert.Equal(t, expected, result)
}
func TestSupportsDTypeWithFType0XFS(t *testing.T) {

View file

@ -3,7 +3,7 @@ package fs
import (
"testing"
"github.com/containerd/continuity/testutil"
"github.com/containerd/containerd/testutil"
)
func TestRequiresRootNOP(t *testing.T) {
@ -17,7 +17,7 @@ func TestRequiresRootNOP(t *testing.T) {
// that file is only built on linux. Since the Makefile is not
// go build tag aware it sees this file and then tries to run
// the following command on all platforms: "go test ...
// github.com/containerd/continuity/fs -test.root". On
// github.com/containerd/containerd/fs -test.root". On
// non-linux platforms this fails because there are no tests in
// the "fs" package that reference testutil.RequiresRoot. To
// fix this problem we'll add a reference to this symbol below.

View file

@ -15,15 +15,6 @@ type inode struct {
dev, ino uint64
}
func newInode(stat *syscall.Stat_t) inode {
return inode{
// Dev is uint32 on darwin/bsd, uint64 on linux/solaris
dev: uint64(stat.Dev), // nolint: unconvert
// Ino is uint32 on bsd, uint64 on darwin/linux/solaris
ino: uint64(stat.Ino), // nolint: unconvert
}
}
func diskUsage(roots ...string) (Usage, error) {
var (
@ -37,7 +28,9 @@ func diskUsage(roots ...string) (Usage, error) {
return err
}
inoKey := newInode(fi.Sys().(*syscall.Stat_t))
stat := fi.Sys().(*syscall.Stat_t)
inoKey := inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
if _, ok := inodes[inoKey]; !ok {
inodes[inoKey] = struct{}{}
size += fi.Size()
@ -67,7 +60,9 @@ func diffUsage(ctx context.Context, a, b string) (Usage, error) {
}
if kind == ChangeKindAdd || kind == ChangeKindModify {
inoKey := newInode(fi.Sys().(*syscall.Stat_t))
stat := fi.Sys().(*syscall.Stat_t)
inoKey := inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
if _, ok := inodes[inoKey]; !ok {
inodes[inoKey] = struct{}{}
size += fi.Size()

Some files were not shown because too many files have changed in this diff Show more