From 46840c585a449bb5238da823841c48f8a3ad852b Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 9 Mar 2015 14:11:11 -0400 Subject: [PATCH] *: golint and docs --- checksize.go | 4 ++-- tar/asm/assemble_test.go | 4 ++-- tar/asm/doc.go | 3 ++- tar/storage/doc.go | 2 +- tar/storage/entry.go | 1 + tar/storage/getter.go | 17 +++++++++++++---- tar/storage/packer.go | 14 +++++++------- tar/storage/packer_test.go | 12 ++++++------ 8 files changed, 34 insertions(+), 23 deletions(-) diff --git a/checksize.go b/checksize.go index a817a6b..a6d3c08 100644 --- a/checksize.go +++ b/checksize.go @@ -44,8 +44,8 @@ func main() { defer os.Remove(packFh.Name()) } - sp := storage.NewJsonPacker(packFh) - fp := asm.NewDiscardFilePutter() + sp := storage.NewJSONPacker(packFh) + fp := storage.NewDiscardFilePutter() dissam, err := asm.NewInputTarStream(fh, sp, fp) if err != nil { log.Fatal(err) diff --git a/tar/asm/assemble_test.go b/tar/asm/assemble_test.go index a164f0e..203e716 100644 --- a/tar/asm/assemble_test.go +++ b/tar/asm/assemble_test.go @@ -87,7 +87,7 @@ func TestTarStream(t *testing.T) { // Setup where we'll store the metadata w := bytes.NewBuffer([]byte{}) - sp := storage.NewJsonPacker(w) + sp := storage.NewJSONPacker(w) fgp := storage.NewBufferFileGetPutter() // wrap the disassembly stream @@ -118,7 +118,7 @@ func TestTarStream(t *testing.T) { // If we've made it this far, then we'll turn it around and create a tar // stream from the packed metadata and buffered file contents. r := bytes.NewBuffer(w.Bytes()) - sup := storage.NewJsonUnpacker(r) + sup := storage.NewJSONUnpacker(r) // and reuse the fgp that we Put the payloads to. rc := NewOutputTarStream(fgp, sup) diff --git a/tar/asm/doc.go b/tar/asm/doc.go index 43748a0..4367b90 100644 --- a/tar/asm/doc.go +++ b/tar/asm/doc.go @@ -1,5 +1,6 @@ /* -asm provides the API for streaming assembly and disassembly of tar archives. +Package asm provides the API for streaming assembly and disassembly of tar +archives. Using the `github.com/vbatts/tar-split/tar/storage` for Packing/Unpacking the metadata for a stream, as well as an implementation of Getting/Putting the file diff --git a/tar/storage/doc.go b/tar/storage/doc.go index 6f67880..57b61bc 100644 --- a/tar/storage/doc.go +++ b/tar/storage/doc.go @@ -1,5 +1,5 @@ /* -storage is for metadata of a tar archive. +Package storage is for metadata of a tar archive. Packing and unpacking the Entries of the stream. The types of streams are either segments of raw bytes (for the raw headers and various padding) and for diff --git a/tar/storage/entry.go b/tar/storage/entry.go index 77dc320..961af49 100644 --- a/tar/storage/entry.go +++ b/tar/storage/entry.go @@ -7,6 +7,7 @@ func (e Entries) Len() int { return len(e) } func (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e Entries) Less(i, j int) bool { return e[i].Position < e[j].Position } +// Type of Entry type Type int const ( diff --git a/tar/storage/getter.go b/tar/storage/getter.go index 907198b..5d46e6a 100644 --- a/tar/storage/getter.go +++ b/tar/storage/getter.go @@ -10,22 +10,31 @@ import ( "path" ) +// FileGetter is the interface for getting a stream of a file payload, address +// by name/filepath. Presumably, the names will be scoped to relative file +// paths. type FileGetter interface { // Get returns a stream for the provided file path - Get(string) (io.ReadCloser, error) + Get(filepath string) (output io.ReadCloser, err error) } +// FilePutter is the interface for storing a stream of a file payload, +// addressed by name/filepath. type FilePutter interface { - // Put returns the crc64 checksum for the provided file - Put(string, io.Reader) (int64, []byte, error) + // Put returns the size of the stream received, and the crc64 checksum for + // the provided stream + Put(filepath string, input io.Reader) (size int64, checksum []byte, err error) } +// FileGetPutter is the interface that groups both Getting and Putting file +// payloads. type FileGetPutter interface { FileGetter FilePutter } -// NewPathFileGetter returns a FileGetter that is for files relative to path relpath. +// NewPathFileGetter returns a FileGetter that is for files relative to path +// relpath. func NewPathFileGetter(relpath string) FileGetter { return &pathFileGetter{root: relpath} } diff --git a/tar/storage/packer.go b/tar/storage/packer.go index 410dbd4..6c4364b 100644 --- a/tar/storage/packer.go +++ b/tar/storage/packer.go @@ -8,9 +8,9 @@ import ( "path" ) -var ( - ErrDuplicatePath = errors.New("duplicates of file paths not supported") -) +// ErrDuplicatePath is occured when a tar archive has more than one entry for +// the same file path +var ErrDuplicatePath = errors.New("duplicates of file paths not supported") // Packer describes the methods to pack Entries to a storage destination type Packer interface { @@ -71,11 +71,11 @@ func (jup *jsonUnpacker) Next() (*Entry, error) { return &e, err } -// NewJsonUnpacker provides an Unpacker that reads Entries (SegmentType and +// NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and // FileType) as a json document. // // Each Entry read are expected to be delimited by new line. -func NewJsonUnpacker(r io.Reader) Unpacker { +func NewJSONUnpacker(r io.Reader) Unpacker { return &jsonUnpacker{ r: r, b: bufio.NewReader(r), @@ -117,11 +117,11 @@ func (jp *jsonPacker) AddEntry(e Entry) (int, error) { return e.Position, nil } -// NewJsonPacker provides an Packer that writes each Entry (SegmentType and +// NewJSONPacker provides an Packer that writes each Entry (SegmentType and // FileType) as a json document. // // The Entries are delimited by new line. -func NewJsonPacker(w io.Writer) Packer { +func NewJSONPacker(w io.Writer) Packer { return &jsonPacker{ w: w, e: json.NewEncoder(w), diff --git a/tar/storage/packer_test.go b/tar/storage/packer_test.go index fd58840..1c6101f 100644 --- a/tar/storage/packer_test.go +++ b/tar/storage/packer_test.go @@ -28,7 +28,7 @@ func TestDuplicateFail(t *testing.T) { buf := []byte{} b := bytes.NewBuffer(buf) - jp := NewJsonPacker(b) + jp := NewJSONPacker(b) if _, err := jp.AddEntry(e[0]); err != nil { t.Error(err) } @@ -40,7 +40,7 @@ func TestDuplicateFail(t *testing.T) { } } -func TestJsonPackerUnpacker(t *testing.T) { +func TestJSONPackerUnpacker(t *testing.T) { e := []Entry{ Entry{ Type: SegmentType, @@ -65,7 +65,7 @@ func TestJsonPackerUnpacker(t *testing.T) { b := bytes.NewBuffer(buf) func() { - jp := NewJsonPacker(b) + jp := NewJSONPacker(b) for i := range e { if _, err := jp.AddEntry(e[i]); err != nil { t.Error(err) @@ -79,7 +79,7 @@ func TestJsonPackerUnpacker(t *testing.T) { b = bytes.NewBuffer(b.Bytes()) entries := Entries{} func() { - jup := NewJsonUnpacker(b) + jup := NewJSONUnpacker(b) for { entry, err := jup.Next() if err != nil { @@ -125,7 +125,7 @@ func TestGzip(t *testing.T) { buf := []byte{} b := bytes.NewBuffer(buf) gzW := gzip.NewWriter(b) - jp := NewJsonPacker(gzW) + jp := NewJSONPacker(gzW) for i := range e { if _, err := jp.AddEntry(e[i]); err != nil { t.Error(err) @@ -143,7 +143,7 @@ func TestGzip(t *testing.T) { } entries := Entries{} func() { - jup := NewJsonUnpacker(gzR) + jup := NewJSONUnpacker(gzR) for { entry, err := jup.Next() if err != nil {