Merge pull request #15252 from coolljt0725/14765_enable_golint_3

Enable golint in pkg/archive
This commit is contained in:
Tibor Vass 2015-08-05 19:27:48 -04:00
commit 04e019f7cc
8 changed files with 62 additions and 20 deletions

View file

@ -25,12 +25,17 @@ import (
) )
type ( type (
// Archive is a type of io.ReadCloser which has two interfaces Read and Closer.
Archive io.ReadCloser Archive io.ReadCloser
ArchiveReader io.Reader // Reader is a type of io.Reader.
Reader io.Reader
// Compression is the state represtents if compressed or not.
Compression int Compression int
// TarChownOptions wraps the chown options UID and GID.
TarChownOptions struct { TarChownOptions struct {
UID, GID int UID, GID int
} }
// TarOptions wraps the tar options.
TarOptions struct { TarOptions struct {
IncludeFiles []string IncludeFiles []string
ExcludePatterns []string ExcludePatterns []string
@ -59,17 +64,23 @@ type (
) )
var ( var (
// ErrNotImplemented is the error message of function not implemented.
ErrNotImplemented = errors.New("Function not implemented") ErrNotImplemented = errors.New("Function not implemented")
defaultArchiver = &Archiver{Untar} defaultArchiver = &Archiver{Untar}
) )
const ( const (
// Uncompressed represents the uncompressed.
Uncompressed Compression = iota Uncompressed Compression = iota
// Bzip2 is bzip2 compression algorithm.
Bzip2 Bzip2
// Gzip is gzip compression algorithm.
Gzip Gzip
// Xz is xz compression algorithm.
Xz Xz
) )
// IsArchive checks if it is a archive by the header.
func IsArchive(header []byte) bool { func IsArchive(header []byte) bool {
compression := DetectCompression(header) compression := DetectCompression(header)
if compression != Uncompressed { if compression != Uncompressed {
@ -80,6 +91,7 @@ func IsArchive(header []byte) bool {
return err == nil return err == nil
} }
// DetectCompression detects the compression algorithm of the source.
func DetectCompression(source []byte) Compression { func DetectCompression(source []byte) Compression {
for compression, m := range map[Compression][]byte{ for compression, m := range map[Compression][]byte{
Bzip2: {0x42, 0x5A, 0x68}, Bzip2: {0x42, 0x5A, 0x68},
@ -103,6 +115,7 @@ func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
return CmdStream(exec.Command(args[0], args[1:]...), archive) return CmdStream(exec.Command(args[0], args[1:]...), archive)
} }
// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive.
func DecompressStream(archive io.Reader) (io.ReadCloser, error) { func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
p := pools.BufioReader32KPool p := pools.BufioReader32KPool
buf := p.Get(archive) buf := p.Get(archive)
@ -139,6 +152,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
} }
} }
// CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
p := pools.BufioWriter32KPool p := pools.BufioWriter32KPool
buf := p.Get(dest) buf := p.Get(dest)
@ -159,6 +173,7 @@ func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteClose
} }
} }
// Extension returns the extension of a file that uses the specified compression algorithm.
func (compression *Compression) Extension() string { func (compression *Compression) Extension() string {
switch *compression { switch *compression {
case Uncompressed: case Uncompressed:
@ -530,6 +545,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
return pipeReader, nil return pipeReader, nil
} }
// Unpack unpacks the decompressedArchive to dest with options.
func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
tr := tar.NewReader(decompressedArchive) tr := tar.NewReader(decompressedArchive)
trBuf := pools.BufioReader32KPool.Get(nil) trBuf := pools.BufioReader32KPool.Get(nil)
@ -643,7 +659,7 @@ func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
return untarHandler(tarArchive, dest, options, true) return untarHandler(tarArchive, dest, options, true)
} }
// Untar reads a stream of bytes from `archive`, parses it as a tar archive, // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`. // and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream. // The archive must be an uncompressed stream.
func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
@ -663,7 +679,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
options.ExcludePatterns = []string{} options.ExcludePatterns = []string{}
} }
var r io.Reader = tarArchive r := tarArchive
if decompress { if decompress {
decompressedArchive, err := DecompressStream(tarArchive) decompressedArchive, err := DecompressStream(tarArchive)
if err != nil { if err != nil {
@ -676,6 +692,8 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
return Unpack(r, dest, options) return Unpack(r, dest, options)
} }
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
// If either Tar or Untar fails, TarUntar aborts and returns the error.
func (archiver *Archiver) TarUntar(src, dst string) error { func (archiver *Archiver) TarUntar(src, dst string) error {
logrus.Debugf("TarUntar(%s %s)", src, dst) logrus.Debugf("TarUntar(%s %s)", src, dst)
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
@ -692,6 +710,7 @@ func TarUntar(src, dst string) error {
return defaultArchiver.TarUntar(src, dst) return defaultArchiver.TarUntar(src, dst)
} }
// UntarPath untar a file from path to a destination, src is the source tar file path.
func (archiver *Archiver) UntarPath(src, dst string) error { func (archiver *Archiver) UntarPath(src, dst string) error {
archive, err := os.Open(src) archive, err := os.Open(src)
if err != nil { if err != nil {
@ -710,6 +729,10 @@ func UntarPath(src, dst string) error {
return defaultArchiver.UntarPath(src, dst) return defaultArchiver.UntarPath(src, dst)
} }
// CopyWithTar creates a tar archive of filesystem path `src`, and
// unpacks it at filesystem path `dst`.
// The archive is streamed directly with fixed buffering and no
// intermediary disk IO.
func (archiver *Archiver) CopyWithTar(src, dst string) error { func (archiver *Archiver) CopyWithTar(src, dst string) error {
srcSt, err := os.Stat(src) srcSt, err := os.Stat(src)
if err != nil { if err != nil {
@ -735,6 +758,9 @@ func CopyWithTar(src, dst string) error {
return defaultArchiver.CopyWithTar(src, dst) return defaultArchiver.CopyWithTar(src, dst)
} }
// CopyFileWithTar emulates the behavior of the 'cp' command-line
// for a single file. It copies a regular file from path `src` to
// path `dst`, and preserves all its metadata.
func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
srcSt, err := os.Stat(src) srcSt, err := os.Stat(src)
@ -878,6 +904,8 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
return &TempArchive{File: f, Size: size}, nil return &TempArchive{File: f, Size: size}, nil
} }
// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
type TempArchive struct { type TempArchive struct {
*os.File *os.File
Size int64 // Pre-computed from Stat().Size() as a convenience Size int64 // Pre-computed from Stat().Size() as a convenience

View file

@ -18,14 +18,22 @@ import (
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
) )
// ChangeType represents the change type.
type ChangeType int type ChangeType int
const ( const (
// ChangeModify represents the modify operation.
ChangeModify = iota ChangeModify = iota
// ChangeAdd represents the add operation.
ChangeAdd ChangeAdd
// ChangeDelete represents the delete operation.
ChangeDelete ChangeDelete
) )
// Change represents a change, it wraps the change type and path.
// It describes changes of the files in the path respect to the
// parent layers. The change could be modify, add, delete.
// This is used for layer diff.
type Change struct { type Change struct {
Path string Path string
Kind ChangeType Kind ChangeType
@ -161,6 +169,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
return changes, nil return changes, nil
} }
// FileInfo describes the information of a file.
type FileInfo struct { type FileInfo struct {
parent *FileInfo parent *FileInfo
name string name string
@ -170,11 +179,12 @@ type FileInfo struct {
added bool added bool
} }
func (root *FileInfo) LookUp(path string) *FileInfo { // LookUp looks up the file information of a file.
func (info *FileInfo) LookUp(path string) *FileInfo {
// As this runs on the daemon side, file paths are OS specific. // As this runs on the daemon side, file paths are OS specific.
parent := root parent := info
if path == string(os.PathSeparator) { if path == string(os.PathSeparator) {
return root return info
} }
pathElements := strings.Split(path, string(os.PathSeparator)) pathElements := strings.Split(path, string(os.PathSeparator))
@ -275,6 +285,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
} }
// Changes add changes to file information.
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change var changes []Change

View file

@ -246,7 +246,7 @@ func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
// contain the archived resource described by srcInfo, to the destination // contain the archived resource described by srcInfo, to the destination
// described by dstInfo. Returns the possibly modified content archive along // described by dstInfo. Returns the possibly modified content archive along
// with the path to the destination directory which it should be extracted to. // with the path to the destination directory which it should be extracted to.
func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
// Separate the destination path between its directory and base // Separate the destination path between its directory and base
// components in case the source archive contents need to be rebased. // components in case the source archive contents need to be rebased.
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
@ -296,7 +296,7 @@ func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (ds
// rebaseArchiveEntries rewrites the given srcContent archive replacing // rebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurance of oldBase with newBase at the beginning of entry names. // an occurance of oldBase with newBase at the beginning of entry names.
func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive { func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
if oldBase == "/" { if oldBase == "/" {
// If oldBase specifies the root directory, use an empty string as // If oldBase specifies the root directory, use an empty string as
// oldBase instead so that newBase doesn't replace the path separator // oldBase instead so that newBase doesn't replace the path separator
@ -368,7 +368,7 @@ func CopyResource(srcPath, dstPath string) error {
// CopyTo handles extracting the given content whose // CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath. // entries should be sourced from srcInfo to dstPath.
func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error { func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
// The destination path need not exist, but CopyInfoDestinationPath will // The destination path need not exist, but CopyInfoDestinationPath will
// ensure that at least the parent directory exists. // ensure that at least the parent directory exists.
dstInfo, err := CopyInfoDestinationPath(dstPath) dstInfo, err := CopyInfoDestinationPath(dstPath)

View file

@ -16,7 +16,10 @@ import (
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
) )
func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer Reader) (size int64, err error) {
tr := tar.NewReader(layer) tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr) trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf) defer pools.BufioReader32KPool.Put(trBuf)
@ -177,7 +180,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
// and applies it to the directory `dest`. The stream `layer` can be // and applies it to the directory `dest`. The stream `layer` can be
// compressed or uncompressed. // compressed or uncompressed.
// Returns the size in bytes of the contents of the layer. // Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer ArchiveReader) (int64, error) { func ApplyLayer(dest string, layer Reader) (int64, error) {
return applyLayerHandler(dest, layer, true) return applyLayerHandler(dest, layer, true)
} }
@ -185,12 +188,12 @@ func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
// `layer`, and applies it to the directory `dest`. The stream `layer` // `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed. // can only be uncompressed.
// Returns the size in bytes of the contents of the layer. // Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) { func ApplyUncompressedLayer(dest string, layer Reader) (int64, error) {
return applyLayerHandler(dest, layer, false) return applyLayerHandler(dest, layer, false)
} }
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream // do the bulk load of ApplyLayer, but allow for not calling DecompressStream
func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) { func applyLayerHandler(dest string, layer Reader, decompress bool) (int64, error) {
dest = filepath.Clean(dest) dest = filepath.Clean(dest)
// We need to be able to set any perms // We need to be able to set any perms

View file

@ -16,7 +16,7 @@ var testUntarFns = map[string]func(string, io.Reader) error{
return Untar(r, dest, nil) return Untar(r, dest, nil)
}, },
"applylayer": func(dest string, r io.Reader) error { "applylayer": func(dest string, r io.Reader) error {
_, err := ApplyLayer(dest, ArchiveReader(r)) _, err := ApplyLayer(dest, Reader(r))
return err return err
}, },
} }

View file

@ -6,7 +6,7 @@ import "github.com/docker/docker/pkg/archive"
// and applies it to the directory `dest`. The stream `layer` can only be // and applies it to the directory `dest`. The stream `layer` can only be
// uncompressed. // uncompressed.
// Returns the size in bytes of the contents of the layer. // Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) { func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) {
return applyLayerHandler(dest, layer, true) return applyLayerHandler(dest, layer, true)
} }
@ -14,6 +14,6 @@ func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error
// `layer`, and applies it to the directory `dest`. The stream `layer` // `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed. // can only be uncompressed.
// Returns the size in bytes of the contents of the layer. // Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer archive.ArchiveReader) (int64, error) { func ApplyUncompressedLayer(dest string, layer archive.Reader) (int64, error) {
return applyLayerHandler(dest, layer, false) return applyLayerHandler(dest, layer, false)
} }

View file

@ -68,7 +68,7 @@ func applyLayer() {
// applyLayerHandler parses a diff in the standard layer format from `layer`, and // applyLayerHandler parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. Returns the size in bytes of the // applies it to the directory `dest`. Returns the size in bytes of the
// contents of the layer. // contents of the layer.
func applyLayerHandler(dest string, layer archive.ArchiveReader, decompress bool) (size int64, err error) { func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size int64, err error) {
dest = filepath.Clean(dest) dest = filepath.Clean(dest)
if decompress { if decompress {
decompressed, err := archive.DecompressStream(layer) decompressed, err := archive.DecompressStream(layer)

View file

@ -12,7 +12,7 @@ import (
// applyLayerHandler parses a diff in the standard layer format from `layer`, and // applyLayerHandler parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. Returns the size in bytes of the // applies it to the directory `dest`. Returns the size in bytes of the
// contents of the layer. // contents of the layer.
func applyLayerHandler(dest string, layer archive.ArchiveReader, decompress bool) (size int64, err error) { func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size int64, err error) {
dest = filepath.Clean(dest) dest = filepath.Clean(dest)
if decompress { if decompress {
decompressed, err := archive.DecompressStream(layer) decompressed, err := archive.DecompressStream(layer)