diff --git a/archive/archive.go b/archive/archive.go index 40bb3cb..69b7bee 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -19,6 +19,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" @@ -41,6 +42,8 @@ type ( ExcludePatterns []string Compression Compression NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap ChownOpts *TarChownOptions IncludeSourceDir bool // When unpacking, specifies whether overwriting a directory with a @@ -52,9 +55,13 @@ type ( } // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. + // with a pluggable Untar function. Also, to facilitate the passing of + // specific id mappings for untar, an archiver can be created with maps + // which will then be passed to Untar operations Archiver struct { - Untar func(io.Reader, string, *TarOptions) error + Untar func(io.Reader, string, *TarOptions) error + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap } // breakoutError is used to differentiate errors related to breaking out @@ -66,7 +73,7 @@ type ( var ( // ErrNotImplemented is the error message of function not implemented. ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar} + defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} ) const ( @@ -194,6 +201,8 @@ type tarAppender struct { // for hardlink mapping SeenFiles map[uint64]string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap } // canonicalTarName provides a platform-independent and consistent posix-style @@ -261,6 +270,25 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Xattrs["security.capability"] = string(capability) } + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files + if ta.UIDMaps != nil || ta.GIDMaps != nil { + uid, gid, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + if err != nil { + return err + } + xGID, err := idtools.ToContainer(gid, ta.GIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + hdr.Gid = xGID + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } @@ -427,6 +455,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) TarWriter: tar.NewWriter(compressWriter), Buffer: pools.BufioWriter32KPool.Get(nil), SeenFiles: make(map[uint64]string), + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, } defer func() { @@ -554,6 +584,10 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } // Iterate through the files in the archive. loop: @@ -631,6 +665,28 @@ loop: } trBuf.Reset(tr) + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if hdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if hdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { return err } @@ -703,7 +759,15 @@ func (archiver *Archiver) TarUntar(src, dst string) error { return err } defer archive.Close() - return archiver.Untar(archive, dst, nil) + + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. @@ -719,7 +783,14 @@ func (archiver *Archiver) UntarPath(src, dst string) error { return err } defer archive.Close() - if err := archiver.Untar(archive, dst, nil); err != nil { + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + if err := archiver.Untar(archive, dst, options); err != nil { return err } return nil @@ -801,6 +872,28 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + + // only perform mapping if the file being copied isn't already owned by the + // uid or gid of the remapped root in the container + if remappedRootUID != hdr.Uid { + xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if remappedRootGID != hdr.Gid { + xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { @@ -816,6 +909,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { err = er } }() + return archiver.Untar(r, filepath.Dir(dst), nil) } diff --git a/archive/archive_unix.go b/archive/archive_unix.go index 02ce3cd..51372d5 100644 --- a/archive/archive_unix.go +++ b/archive/archive_unix.go @@ -61,6 +61,15 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st return } +func getFileUIDGID(stat interface{}) (int, int, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + } + return int(s.Uid), int(s.Gid), nil +} + func major(device uint64) uint64 { return (device >> 8) & 0xfff } diff --git a/archive/archive_windows.go b/archive/archive_windows.go index 7d52105..f5cc997 100644 --- a/archive/archive_windows.go +++ b/archive/archive_windows.go @@ -63,3 +63,8 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { return nil } + +func getFileUIDGID(stat interface{}) (int, int, error) { + // no notion of file ownership mapping yet on Windows + return 0, 0, nil +} diff --git a/archive/changes.go b/archive/changes.go index c745362..e3003db 100644 --- a/archive/changes.go +++ b/archive/changes.go @@ -14,6 +14,7 @@ import ( "time" "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) @@ -341,13 +342,15 @@ func ChangesSize(newDir string, changes []Change) int64 { } // ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change) (Archive, error) { +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { reader, writer := io.Pipe() go func() { ta := &tarAppender{ TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), SeenFiles: make(map[uint64]string), + UIDMaps: uidMaps, + GIDMaps: gidMaps, } // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) diff --git a/archive/changes_posix_test.go b/archive/changes_posix_test.go index 9d528e6..5a3282b 100644 --- a/archive/changes_posix_test.go +++ b/archive/changes_posix_test.go @@ -61,7 +61,7 @@ func TestHardLinkOrder(t *testing.T) { sort.Sort(changesByPath(changes)) // ExportChanges - ar, err := ExportChanges(dest, changes) + ar, err := ExportChanges(dest, changes, nil, nil) if err != nil { t.Fatal(err) } @@ -73,7 +73,7 @@ func TestHardLinkOrder(t *testing.T) { // reverse sort sort.Sort(sort.Reverse(changesByPath(changes))) // ExportChanges - arRev, err := ExportChanges(dest, changes) + arRev, err := ExportChanges(dest, changes, nil, nil) if err != nil { t.Fatal(err) } diff --git a/archive/changes_test.go b/archive/changes_test.go index 509bdb2..afbb0b9 100644 --- a/archive/changes_test.go +++ b/archive/changes_test.go @@ -410,7 +410,7 @@ func TestApplyLayer(t *testing.T) { t.Fatal(err) } - layer, err := ExportChanges(dst, changes) + layer, err := ExportChanges(dst, changes, nil, nil) if err != nil { t.Fatal(err) } diff --git a/archive/diff.go b/archive/diff.go index c030fd5..f5f0d80 100644 --- a/archive/diff.go +++ b/archive/diff.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) @@ -18,16 +19,23 @@ import ( // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer Reader) (size int64, err error) { +func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { tr := tar.NewReader(layer) trBuf := pools.BufioReader32KPool.Get(tr) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return 0, err + } aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) + if options == nil { + options = &TarOptions{} + } // Iterate through the files in the archive. for { hdr, err := tr.Next() @@ -169,6 +177,27 @@ func UnpackLayer(dest string, layer Reader) (size int64, err error) { srcData = tmpFile } + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if srcHdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) + if err != nil { + return 0, err + } + srcHdr.Uid = xUID + } + if srcHdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) + if err != nil { + return 0, err + } + srcHdr.Gid = xGID + } if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { return 0, err } @@ -196,19 +225,19 @@ func UnpackLayer(dest string, layer Reader) (size int64, err error) { // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. func ApplyLayer(dest string, layer Reader) (int64, error) { - return applyLayerHandler(dest, layer, true) + return applyLayerHandler(dest, layer, &TarOptions{}, true) } // ApplyUncompressedLayer parses a diff in the standard layer format from // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer Reader) (int64, error) { - return applyLayerHandler(dest, layer, false) +func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) } // do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer Reader, decompress bool) (int64, error) { +func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { dest = filepath.Clean(dest) // We need to be able to set any perms @@ -224,5 +253,5 @@ func applyLayerHandler(dest string, layer Reader, decompress bool) (int64, error return 0, err } } - return UnpackLayer(dest, layer) + return UnpackLayer(dest, layer, options) } diff --git a/chrootarchive/diff.go b/chrootarchive/diff.go index c0f46ea..94131a6 100644 --- a/chrootarchive/diff.go +++ b/chrootarchive/diff.go @@ -7,13 +7,13 @@ import "github.com/docker/docker/pkg/archive" // uncompressed. // Returns the size in bytes of the contents of the layer. func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { - return applyLayerHandler(dest, layer, true) + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) } // ApplyUncompressedLayer parses a diff in the standard layer format from // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer archive.Reader) (int64, error) { - return applyLayerHandler(dest, layer, false) +func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) } diff --git a/chrootarchive/diff_unix.go b/chrootarchive/diff_unix.go index 805b163..4196dd4 100644 --- a/chrootarchive/diff_unix.go +++ b/chrootarchive/diff_unix.go @@ -27,8 +27,9 @@ type applyLayerResponse struct { func applyLayer() { var ( - tmpDir = "" - err error + tmpDir = "" + err error + options *archive.TarOptions ) runtime.LockOSThread() flag.Parse() @@ -44,12 +45,16 @@ func applyLayer() { fatal(err) } + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { fatal(err) } os.Setenv("TMPDIR", tmpDir) - size, err := archive.UnpackLayer("/", os.Stdin) + size, err := archive.UnpackLayer("/", os.Stdin, options) os.RemoveAll(tmpDir) if err != nil { fatal(err) @@ -68,7 +73,7 @@ func applyLayer() { // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. -func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size int64, err error) { +func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) if decompress { decompressed, err := archive.DecompressStream(layer) @@ -79,9 +84,21 @@ func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size layer = decompressed } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } cmd := reexec.Command("docker-applyLayer", dest) cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) cmd.Stdout, cmd.Stderr = outBuf, errBuf diff --git a/chrootarchive/diff_windows.go b/chrootarchive/diff_windows.go index dcff5ac..8e1830c 100644 --- a/chrootarchive/diff_windows.go +++ b/chrootarchive/diff_windows.go @@ -13,7 +13,7 @@ import ( // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. -func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size int64, err error) { +func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) // Ensure it is a Windows-style volume path @@ -34,7 +34,7 @@ func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) } - s, err := archive.UnpackLayer(dest, layer) + s, err := archive.UnpackLayer(dest, layer, nil) os.RemoveAll(tmpDir) if err != nil { return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) diff --git a/directory/directory.go b/directory/directory.go new file mode 100644 index 0000000..1715ef4 --- /dev/null +++ b/directory/directory.go @@ -0,0 +1,26 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/directory/directory_test.go b/directory/directory_test.go index a8da1ac..1b196b1 100644 --- a/directory/directory_test.go +++ b/directory/directory_test.go @@ -3,6 +3,9 @@ package directory import ( "io/ioutil" "os" + "path/filepath" + "reflect" + "sort" "testing" ) @@ -135,3 +138,45 @@ func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) { t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) } } + +// Test migration of directory to a subdir underneath itself +func TestMoveToSubdir(t *testing.T) { + var outerDir, subDir string + var err error + + if outerDir, err = ioutil.TempDir(os.TempDir(), "TestMoveToSubdir"); err != nil { + t.Fatalf("failed to create directory: %v", err) + } + + if subDir, err = ioutil.TempDir(outerDir, "testSub"); err != nil { + t.Fatalf("failed to create subdirectory: %v", err) + } + + // write 4 temp files in the outer dir to get moved + filesList := []string{"a", "b", "c", "d"} + for _, fName := range filesList { + if file, err := os.Create(filepath.Join(outerDir, fName)); err != nil { + t.Fatalf("couldn't create temp file %q: %v", fName, err) + } else { + file.WriteString(fName) + file.Close() + } + } + + if err = MoveToSubdir(outerDir, filepath.Base(subDir)); err != nil { + t.Fatalf("Error during migration of content to subdirectory: %v", err) + } + // validate that the files were moved to the subdirectory + infos, err := ioutil.ReadDir(subDir) + if len(infos) != 4 { + t.Fatalf("Should be four files in the subdir after the migration: actual length: %d", len(infos)) + } + var results []string + for _, info := range infos { + results = append(results, info.Name()) + } + sort.Sort(sort.StringSlice(results)) + if !reflect.DeepEqual(filesList, results) { + t.Fatalf("Results after migration do not equal list of files: expected: %v, got: %v", filesList, results) + } +} diff --git a/directory/directory_windows.go b/directory/directory_windows.go index a0fc048..6d41777 100644 --- a/directory/directory_windows.go +++ b/directory/directory_windows.go @@ -5,7 +5,6 @@ package directory import ( "os" "path/filepath" - "strings" "github.com/docker/docker/pkg/longpath" ) diff --git a/plugins/client.go b/plugins/client.go index add1361..b64f0d6 100644 --- a/plugins/client.go +++ b/plugins/client.go @@ -68,7 +68,11 @@ func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) e return err } defer body.Close() - return json.NewDecoder(body).Decode(&ret) + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil } // Stream calls the specified method with the specified arguments for the plugin and returns the response body @@ -86,7 +90,11 @@ func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) if err != nil { return err } - return json.NewDecoder(body).Decode(&ret) + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil } func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {