From fb58e0d9c263938a3bf50affc1ef37f218e0b873 Mon Sep 17 00:00:00 2001 From: Lei Jitang Date: Tue, 4 Aug 2015 09:52:54 +0800 Subject: [PATCH] Enable golint in pkg/arcive Signed-off-by: Lei Jitang --- archive/archive.go | 38 ++++++++++++++++++++++++++++++----- archive/changes.go | 17 +++++++++++++--- archive/copy.go | 6 +++--- archive/diff.go | 11 ++++++---- archive/utils_test.go | 2 +- chrootarchive/diff.go | 4 ++-- chrootarchive/diff_unix.go | 2 +- chrootarchive/diff_windows.go | 2 +- 8 files changed, 62 insertions(+), 20 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index b916232..0fb8e9b 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -25,12 +25,17 @@ import ( ) type ( - Archive io.ReadCloser - ArchiveReader io.Reader - Compression int + // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. + Archive io.ReadCloser + // Reader is a type of io.Reader. + Reader io.Reader + // Compression is the state represtents if compressed or not. + Compression int + // TarChownOptions wraps the chown options UID and GID. TarChownOptions struct { UID, GID int } + // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string @@ -59,17 +64,23 @@ type ( ) var ( + // ErrNotImplemented is the error message of function not implemented. ErrNotImplemented = errors.New("Function not implemented") defaultArchiver = &Archiver{Untar} ) const ( + // Uncompressed represents the uncompressed. Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. Bzip2 + // Gzip is gzip compression algorithm. Gzip + // Xz is xz compression algorithm. Xz ) +// IsArchive checks if it is a archive by the header. func IsArchive(header []byte) bool { compression := DetectCompression(header) if compression != Uncompressed { @@ -80,6 +91,7 @@ func IsArchive(header []byte) bool { return err == nil } +// DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, @@ -103,6 +115,7 @@ func xzDecompress(archive io.Reader) (io.ReadCloser, error) { return CmdStream(exec.Command(args[0], args[1:]...), archive) } +// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) @@ -139,6 +152,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { } } +// CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) @@ -159,6 +173,7 @@ func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteClose } } +// Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: @@ -530,6 +545,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) return pipeReader, nil } +// Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) @@ -643,7 +659,7 @@ func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { @@ -663,7 +679,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp options.ExcludePatterns = []string{} } - var r io.Reader = tarArchive + r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { @@ -676,6 +692,8 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp return Unpack(r, dest, options) } +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { logrus.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) @@ -692,6 +710,7 @@ func TarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } +// UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { @@ -710,6 +729,10 @@ func UntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { @@ -735,6 +758,9 @@ func CopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) @@ -878,6 +904,8 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) { return &TempArchive{File: f, Size: size}, nil } +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience diff --git a/archive/changes.go b/archive/changes.go index 689d9a2..4619a4c 100644 --- a/archive/changes.go +++ b/archive/changes.go @@ -18,14 +18,22 @@ import ( "github.com/docker/docker/pkg/system" ) +// ChangeType represents the change type. type ChangeType int const ( + // ChangeModify represents the modify operation. ChangeModify = iota + // ChangeAdd represents the add operation. ChangeAdd + // ChangeDelete represents the delete operation. ChangeDelete ) +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. type Change struct { Path string Kind ChangeType @@ -161,6 +169,7 @@ func Changes(layers []string, rw string) ([]Change, error) { return changes, nil } +// FileInfo describes the information of a file. type FileInfo struct { parent *FileInfo name string @@ -170,11 +179,12 @@ type FileInfo struct { added bool } -func (root *FileInfo) LookUp(path string) *FileInfo { +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { // As this runs on the daemon side, file paths are OS specific. - parent := root + parent := info if path == string(os.PathSeparator) { - return root + return info } pathElements := strings.Split(path, string(os.PathSeparator)) @@ -275,6 +285,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { } +// Changes add changes to file information. func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { var changes []Change diff --git a/archive/copy.go b/archive/copy.go index 90b3e81..446842a 100644 --- a/archive/copy.go +++ b/archive/copy.go @@ -246,7 +246,7 @@ func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { // contain the archived resource described by srcInfo, to the destination // described by dstInfo. Returns the possibly modified content archive along // with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { +func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { // Separate the destination path between its directory and base // components in case the source archive contents need to be rebased. dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) @@ -296,7 +296,7 @@ func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (ds // rebaseArchiveEntries rewrites the given srcContent archive replacing // an occurance of oldBase with newBase at the beginning of entry names. -func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive { +func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { if oldBase == "/" { // If oldBase specifies the root directory, use an empty string as // oldBase instead so that newBase doesn't replace the path separator @@ -368,7 +368,7 @@ func CopyResource(srcPath, dstPath string) error { // CopyTo handles extracting the given content whose // entries should be sourced from srcInfo to dstPath. -func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error { +func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { // The destination path need not exist, but CopyInfoDestinationPath will // ensure that at least the parent directory exists. dstInfo, err := CopyInfoDestinationPath(dstPath) diff --git a/archive/diff.go b/archive/diff.go index d310a17..23130a6 100644 --- a/archive/diff.go +++ b/archive/diff.go @@ -16,7 +16,10 @@ import ( "github.com/docker/docker/pkg/system" ) -func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer Reader) (size int64, err error) { tr := tar.NewReader(layer) trBuf := pools.BufioReader32KPool.Get(tr) defer pools.BufioReader32KPool.Put(trBuf) @@ -177,7 +180,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { // and applies it to the directory `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer ArchiveReader) (int64, error) { +func ApplyLayer(dest string, layer Reader) (int64, error) { return applyLayerHandler(dest, layer, true) } @@ -185,12 +188,12 @@ func ApplyLayer(dest string, layer ArchiveReader) (int64, error) { // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) { +func ApplyUncompressedLayer(dest string, layer Reader) (int64, error) { return applyLayerHandler(dest, layer, false) } // do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) { +func applyLayerHandler(dest string, layer Reader, decompress bool) (int64, error) { dest = filepath.Clean(dest) // We need to be able to set any perms diff --git a/archive/utils_test.go b/archive/utils_test.go index f5cacea..9871903 100644 --- a/archive/utils_test.go +++ b/archive/utils_test.go @@ -16,7 +16,7 @@ var testUntarFns = map[string]func(string, io.Reader) error{ return Untar(r, dest, nil) }, "applylayer": func(dest string, r io.Reader) error { - _, err := ApplyLayer(dest, ArchiveReader(r)) + _, err := ApplyLayer(dest, Reader(r)) return err }, } diff --git a/chrootarchive/diff.go b/chrootarchive/diff.go index 75a176d..c0f46ea 100644 --- a/chrootarchive/diff.go +++ b/chrootarchive/diff.go @@ -6,7 +6,7 @@ import "github.com/docker/docker/pkg/archive" // and applies it to the directory `dest`. The stream `layer` can only be // uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) { +func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { return applyLayerHandler(dest, layer, true) } @@ -14,6 +14,6 @@ func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer archive.ArchiveReader) (int64, error) { +func ApplyUncompressedLayer(dest string, layer archive.Reader) (int64, error) { return applyLayerHandler(dest, layer, false) } diff --git a/chrootarchive/diff_unix.go b/chrootarchive/diff_unix.go index 86b62be..805b163 100644 --- a/chrootarchive/diff_unix.go +++ b/chrootarchive/diff_unix.go @@ -68,7 +68,7 @@ func applyLayer() { // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. -func applyLayerHandler(dest string, layer archive.ArchiveReader, decompress bool) (size int64, err error) { +func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) if decompress { decompressed, err := archive.DecompressStream(layer) diff --git a/chrootarchive/diff_windows.go b/chrootarchive/diff_windows.go index 5850de1..40f9054 100644 --- a/chrootarchive/diff_windows.go +++ b/chrootarchive/diff_windows.go @@ -12,7 +12,7 @@ import ( // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. -func applyLayerHandler(dest string, layer archive.ArchiveReader, decompress bool) (size int64, err error) { +func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) if decompress { decompressed, err := archive.DecompressStream(layer)