Add user namespace (mapping) support to the Docker engine

Adds support for the daemon to handle user namespace maps as a
per-daemon setting.

Support for handling uid/gid mapping is added to the builder,
archive/unarchive packages and functions, all graphdrivers (except
Windows), and the test suite is updated to handle user namespace daemon
rootgraph changes.

Docker-DCO-1.1-Signed-off-by: Phil Estes <estesp@linux.vnet.ibm.com> (github: estesp)
This commit is contained in:
Phil Estes 2015-10-08 11:51:41 -04:00
parent 5e8012caca
commit e118299052
14 changed files with 262 additions and 27 deletions

View file

@ -19,6 +19,7 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
@ -41,6 +42,8 @@ type (
ExcludePatterns []string ExcludePatterns []string
Compression Compression Compression Compression
NoLchown bool NoLchown bool
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
ChownOpts *TarChownOptions ChownOpts *TarChownOptions
IncludeSourceDir bool IncludeSourceDir bool
// When unpacking, specifies whether overwriting a directory with a // When unpacking, specifies whether overwriting a directory with a
@ -52,9 +55,13 @@ type (
} }
// Archiver allows the reuse of most utility functions of this package // Archiver allows the reuse of most utility functions of this package
// with a pluggable Untar function. // with a pluggable Untar function. Also, to facilitate the passing of
// specific id mappings for untar, an archiver can be created with maps
// which will then be passed to Untar operations
Archiver struct { Archiver struct {
Untar func(io.Reader, string, *TarOptions) error Untar func(io.Reader, string, *TarOptions) error
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
} }
// breakoutError is used to differentiate errors related to breaking out // breakoutError is used to differentiate errors related to breaking out
@ -66,7 +73,7 @@ type (
var ( var (
// ErrNotImplemented is the error message of function not implemented. // ErrNotImplemented is the error message of function not implemented.
ErrNotImplemented = errors.New("Function not implemented") ErrNotImplemented = errors.New("Function not implemented")
defaultArchiver = &Archiver{Untar} defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
) )
const ( const (
@ -194,6 +201,8 @@ type tarAppender struct {
// for hardlink mapping // for hardlink mapping
SeenFiles map[uint64]string SeenFiles map[uint64]string
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
} }
// canonicalTarName provides a platform-independent and consistent posix-style // canonicalTarName provides a platform-independent and consistent posix-style
@ -261,6 +270,25 @@ func (ta *tarAppender) addTarFile(path, name string) error {
hdr.Xattrs["security.capability"] = string(capability) hdr.Xattrs["security.capability"] = string(capability)
} }
//handle re-mapping container ID mappings back to host ID mappings before
//writing tar headers/files
if ta.UIDMaps != nil || ta.GIDMaps != nil {
uid, gid, err := getFileUIDGID(fi.Sys())
if err != nil {
return err
}
xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
if err != nil {
return err
}
xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
if err != nil {
return err
}
hdr.Uid = xUID
hdr.Gid = xGID
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil { if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err return err
} }
@ -427,6 +455,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
TarWriter: tar.NewWriter(compressWriter), TarWriter: tar.NewWriter(compressWriter),
Buffer: pools.BufioWriter32KPool.Get(nil), Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string), SeenFiles: make(map[uint64]string),
UIDMaps: options.UIDMaps,
GIDMaps: options.GIDMaps,
} }
defer func() { defer func() {
@ -554,6 +584,10 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
defer pools.BufioReader32KPool.Put(trBuf) defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header var dirs []*tar.Header
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return err
}
// Iterate through the files in the archive. // Iterate through the files in the archive.
loop: loop:
@ -631,6 +665,28 @@ loop:
} }
trBuf.Reset(tr) trBuf.Reset(tr)
// if the options contain a uid & gid maps, convert header uid/gid
// entries using the maps such that lchown sets the proper mapped
// uid/gid after writing the file. We only perform this mapping if
// the file isn't already owned by the remapped root UID or GID, as
// that specific uid/gid has no mapping from container -> host, and
// those files already have the proper ownership for inside the
// container.
if hdr.Uid != remappedRootUID {
xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
if err != nil {
return err
}
hdr.Uid = xUID
}
if hdr.Gid != remappedRootGID {
xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
if err != nil {
return err
}
hdr.Gid = xGID
}
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
return err return err
} }
@ -703,7 +759,15 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
return err return err
} }
defer archive.Close() defer archive.Close()
return archiver.Untar(archive, dst, nil)
var options *TarOptions
if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
options = &TarOptions{
UIDMaps: archiver.UIDMaps,
GIDMaps: archiver.GIDMaps,
}
}
return archiver.Untar(archive, dst, options)
} }
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
@ -719,7 +783,14 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
return err return err
} }
defer archive.Close() defer archive.Close()
if err := archiver.Untar(archive, dst, nil); err != nil { var options *TarOptions
if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
options = &TarOptions{
UIDMaps: archiver.UIDMaps,
GIDMaps: archiver.GIDMaps,
}
}
if err := archiver.Untar(archive, dst, options); err != nil {
return err return err
} }
return nil return nil
@ -801,6 +872,28 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
hdr.Name = filepath.Base(dst) hdr.Name = filepath.Base(dst)
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
if err != nil {
return err
}
// only perform mapping if the file being copied isn't already owned by the
// uid or gid of the remapped root in the container
if remappedRootUID != hdr.Uid {
xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
if err != nil {
return err
}
hdr.Uid = xUID
}
if remappedRootGID != hdr.Gid {
xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
if err != nil {
return err
}
hdr.Gid = xGID
}
tw := tar.NewWriter(w) tw := tar.NewWriter(w)
defer tw.Close() defer tw.Close()
if err := tw.WriteHeader(hdr); err != nil { if err := tw.WriteHeader(hdr); err != nil {
@ -816,6 +909,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
err = er err = er
} }
}() }()
return archiver.Untar(r, filepath.Dir(dst), nil) return archiver.Untar(r, filepath.Dir(dst), nil)
} }

View file

@ -61,6 +61,15 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st
return return
} }
func getFileUIDGID(stat interface{}) (int, int, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
}
return int(s.Uid), int(s.Gid), nil
}
func major(device uint64) uint64 { func major(device uint64) uint64 {
return (device >> 8) & 0xfff return (device >> 8) & 0xfff
} }

View file

@ -63,3 +63,8 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil return nil
} }
func getFileUIDGID(stat interface{}) (int, int, error) {
// no notion of file ownership mapping yet on Windows
return 0, 0, nil
}

View file

@ -14,6 +14,7 @@ import (
"time" "time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
) )
@ -341,13 +342,15 @@ func ChangesSize(newDir string, changes []Change) int64 {
} }
// ExportChanges produces an Archive from the provided changes, relative to dir. // ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change) (Archive, error) { func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
reader, writer := io.Pipe() reader, writer := io.Pipe()
go func() { go func() {
ta := &tarAppender{ ta := &tarAppender{
TarWriter: tar.NewWriter(writer), TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil), Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string), SeenFiles: make(map[uint64]string),
UIDMaps: uidMaps,
GIDMaps: gidMaps,
} }
// this buffer is needed for the duration of this piped stream // this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer) defer pools.BufioWriter32KPool.Put(ta.Buffer)

View file

@ -61,7 +61,7 @@ func TestHardLinkOrder(t *testing.T) {
sort.Sort(changesByPath(changes)) sort.Sort(changesByPath(changes))
// ExportChanges // ExportChanges
ar, err := ExportChanges(dest, changes) ar, err := ExportChanges(dest, changes, nil, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -73,7 +73,7 @@ func TestHardLinkOrder(t *testing.T) {
// reverse sort // reverse sort
sort.Sort(sort.Reverse(changesByPath(changes))) sort.Sort(sort.Reverse(changesByPath(changes)))
// ExportChanges // ExportChanges
arRev, err := ExportChanges(dest, changes) arRev, err := ExportChanges(dest, changes, nil, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -410,7 +410,7 @@ func TestApplyLayer(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
layer, err := ExportChanges(dst, changes) layer, err := ExportChanges(dst, changes, nil, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -11,6 +11,7 @@ import (
"strings" "strings"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
) )
@ -18,16 +19,23 @@ import (
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed. // compressed or uncompressed.
// Returns the size in bytes of the contents of the layer. // Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer Reader) (size int64, err error) { func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
tr := tar.NewReader(layer) tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr) trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf) defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header var dirs []*tar.Header
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return 0, err
}
aufsTempdir := "" aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header) aufsHardlinks := make(map[string]*tar.Header)
if options == nil {
options = &TarOptions{}
}
// Iterate through the files in the archive. // Iterate through the files in the archive.
for { for {
hdr, err := tr.Next() hdr, err := tr.Next()
@ -169,6 +177,27 @@ func UnpackLayer(dest string, layer Reader) (size int64, err error) {
srcData = tmpFile srcData = tmpFile
} }
// if the options contain a uid & gid maps, convert header uid/gid
// entries using the maps such that lchown sets the proper mapped
// uid/gid after writing the file. We only perform this mapping if
// the file isn't already owned by the remapped root UID or GID, as
// that specific uid/gid has no mapping from container -> host, and
// those files already have the proper ownership for inside the
// container.
if srcHdr.Uid != remappedRootUID {
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
if err != nil {
return 0, err
}
srcHdr.Uid = xUID
}
if srcHdr.Gid != remappedRootGID {
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
if err != nil {
return 0, err
}
srcHdr.Gid = xGID
}
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
return 0, err return 0, err
} }
@ -196,19 +225,19 @@ func UnpackLayer(dest string, layer Reader) (size int64, err error) {
// compressed or uncompressed. // compressed or uncompressed.
// Returns the size in bytes of the contents of the layer. // Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer Reader) (int64, error) { func ApplyLayer(dest string, layer Reader) (int64, error) {
return applyLayerHandler(dest, layer, true) return applyLayerHandler(dest, layer, &TarOptions{}, true)
} }
// ApplyUncompressedLayer parses a diff in the standard layer format from // ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer` // `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed. // can only be uncompressed.
// Returns the size in bytes of the contents of the layer. // Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer Reader) (int64, error) { func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, false) return applyLayerHandler(dest, layer, options, false)
} }
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream // do the bulk load of ApplyLayer, but allow for not calling DecompressStream
func applyLayerHandler(dest string, layer Reader, decompress bool) (int64, error) { func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
dest = filepath.Clean(dest) dest = filepath.Clean(dest)
// We need to be able to set any perms // We need to be able to set any perms
@ -224,5 +253,5 @@ func applyLayerHandler(dest string, layer Reader, decompress bool) (int64, error
return 0, err return 0, err
} }
} }
return UnpackLayer(dest, layer) return UnpackLayer(dest, layer, options)
} }

View file

@ -7,13 +7,13 @@ import "github.com/docker/docker/pkg/archive"
// uncompressed. // uncompressed.
// Returns the size in bytes of the contents of the layer. // Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) {
return applyLayerHandler(dest, layer, true) return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
} }
// ApplyUncompressedLayer parses a diff in the standard layer format from // ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer` // `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed. // can only be uncompressed.
// Returns the size in bytes of the contents of the layer. // Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer archive.Reader) (int64, error) { func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, false) return applyLayerHandler(dest, layer, options, false)
} }

View file

@ -27,8 +27,9 @@ type applyLayerResponse struct {
func applyLayer() { func applyLayer() {
var ( var (
tmpDir = "" tmpDir = ""
err error err error
options *archive.TarOptions
) )
runtime.LockOSThread() runtime.LockOSThread()
flag.Parse() flag.Parse()
@ -44,12 +45,16 @@ func applyLayer() {
fatal(err) fatal(err)
} }
if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
fatal(err)
}
if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil {
fatal(err) fatal(err)
} }
os.Setenv("TMPDIR", tmpDir) os.Setenv("TMPDIR", tmpDir)
size, err := archive.UnpackLayer("/", os.Stdin) size, err := archive.UnpackLayer("/", os.Stdin, options)
os.RemoveAll(tmpDir) os.RemoveAll(tmpDir)
if err != nil { if err != nil {
fatal(err) fatal(err)
@ -68,7 +73,7 @@ func applyLayer() {
// applyLayerHandler parses a diff in the standard layer format from `layer`, and // applyLayerHandler parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. Returns the size in bytes of the // applies it to the directory `dest`. Returns the size in bytes of the
// contents of the layer. // contents of the layer.
func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size int64, err error) { func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
dest = filepath.Clean(dest) dest = filepath.Clean(dest)
if decompress { if decompress {
decompressed, err := archive.DecompressStream(layer) decompressed, err := archive.DecompressStream(layer)
@ -79,9 +84,21 @@ func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size
layer = decompressed layer = decompressed
} }
if options == nil {
options = &archive.TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
data, err := json.Marshal(options)
if err != nil {
return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
}
cmd := reexec.Command("docker-applyLayer", dest) cmd := reexec.Command("docker-applyLayer", dest)
cmd.Stdin = layer cmd.Stdin = layer
cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
cmd.Stdout, cmd.Stderr = outBuf, errBuf cmd.Stdout, cmd.Stderr = outBuf, errBuf

View file

@ -13,7 +13,7 @@ import (
// applyLayerHandler parses a diff in the standard layer format from `layer`, and // applyLayerHandler parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. Returns the size in bytes of the // applies it to the directory `dest`. Returns the size in bytes of the
// contents of the layer. // contents of the layer.
func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size int64, err error) { func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
dest = filepath.Clean(dest) dest = filepath.Clean(dest)
// Ensure it is a Windows-style volume path // Ensure it is a Windows-style volume path
@ -34,7 +34,7 @@ func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size
return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err)
} }
s, err := archive.UnpackLayer(dest, layer) s, err := archive.UnpackLayer(dest, layer, nil)
os.RemoveAll(tmpDir) os.RemoveAll(tmpDir)
if err != nil { if err != nil {
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest)

26
directory/directory.go Normal file
View file

@ -0,0 +1,26 @@
package directory
import (
"io/ioutil"
"os"
"path/filepath"
)
// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path
func MoveToSubdir(oldpath, subdir string) error {
infos, err := ioutil.ReadDir(oldpath)
if err != nil {
return err
}
for _, info := range infos {
if info.Name() != subdir {
oldName := filepath.Join(oldpath, info.Name())
newName := filepath.Join(oldpath, subdir, info.Name())
if err := os.Rename(oldName, newName); err != nil {
return err
}
}
}
return nil
}

View file

@ -3,6 +3,9 @@ package directory
import ( import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath"
"reflect"
"sort"
"testing" "testing"
) )
@ -135,3 +138,45 @@ func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) {
t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size)
} }
} }
// Test migration of directory to a subdir underneath itself
func TestMoveToSubdir(t *testing.T) {
var outerDir, subDir string
var err error
if outerDir, err = ioutil.TempDir(os.TempDir(), "TestMoveToSubdir"); err != nil {
t.Fatalf("failed to create directory: %v", err)
}
if subDir, err = ioutil.TempDir(outerDir, "testSub"); err != nil {
t.Fatalf("failed to create subdirectory: %v", err)
}
// write 4 temp files in the outer dir to get moved
filesList := []string{"a", "b", "c", "d"}
for _, fName := range filesList {
if file, err := os.Create(filepath.Join(outerDir, fName)); err != nil {
t.Fatalf("couldn't create temp file %q: %v", fName, err)
} else {
file.WriteString(fName)
file.Close()
}
}
if err = MoveToSubdir(outerDir, filepath.Base(subDir)); err != nil {
t.Fatalf("Error during migration of content to subdirectory: %v", err)
}
// validate that the files were moved to the subdirectory
infos, err := ioutil.ReadDir(subDir)
if len(infos) != 4 {
t.Fatalf("Should be four files in the subdir after the migration: actual length: %d", len(infos))
}
var results []string
for _, info := range infos {
results = append(results, info.Name())
}
sort.Sort(sort.StringSlice(results))
if !reflect.DeepEqual(filesList, results) {
t.Fatalf("Results after migration do not equal list of files: expected: %v, got: %v", filesList, results)
}
}

View file

@ -5,7 +5,6 @@ package directory
import ( import (
"os" "os"
"path/filepath" "path/filepath"
"strings"
"github.com/docker/docker/pkg/longpath" "github.com/docker/docker/pkg/longpath"
) )

View file

@ -68,7 +68,11 @@ func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) e
return err return err
} }
defer body.Close() defer body.Close()
return json.NewDecoder(body).Decode(&ret) if err := json.NewDecoder(body).Decode(&ret); err != nil {
logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err)
return err
}
return nil
} }
// Stream calls the specified method with the specified arguments for the plugin and returns the response body // Stream calls the specified method with the specified arguments for the plugin and returns the response body
@ -86,7 +90,11 @@ func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{})
if err != nil { if err != nil {
return err return err
} }
return json.NewDecoder(body).Decode(&ret) if err := json.NewDecoder(body).Decode(&ret); err != nil {
logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err)
return err
}
return nil
} }
func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {