From d8ebf3c0a7586be16383513bc0b38dfd1dfc81e9 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 9 Mar 2015 13:20:26 -0400 Subject: [PATCH] tar: mv the Getter to tar/storage --- tar/asm/assemble.go | 6 +++--- tar/asm/assemble_test.go | 4 ++-- tar/asm/disassemble.go | 14 +++++++------- tar/{asm => storage}/getter.go | 9 +++++---- tar/{asm => storage}/getter_test.go | 2 +- 5 files changed, 18 insertions(+), 17 deletions(-) rename tar/{asm => storage}/getter.go (92%) rename tar/{asm => storage}/getter_test.go (98%) diff --git a/tar/asm/assemble.go b/tar/asm/assemble.go index 610eebd..ec15612 100644 --- a/tar/asm/assemble.go +++ b/tar/asm/assemble.go @@ -12,11 +12,11 @@ import ( // NewOutputTarStream returns an io.ReadCloser that is an assemble tar archive // stream. // -// It takes a FileGetter, for mapping the file payloads that are to be read in, +// It takes a storage.FileGetter, for mapping the file payloads that are to be read in, // and a storage.Unpacker, which has access to the rawbytes and file order // metadata. With the combination of these two items, a precise assembled Tar // archive is possible. -func NewOutputTarStream(fg FileGetter, up storage.Unpacker) io.ReadCloser { +func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadCloser { // ... Since these are interfaces, this is possible, so let's not have a nil pointer if fg == nil || up == nil { return nil @@ -45,7 +45,7 @@ func NewOutputTarStream(fg FileGetter, up storage.Unpacker) io.ReadCloser { break } defer fh.Close() - c := crc64.New(crcTable) + c := crc64.New(storage.CRCTable) tRdr := io.TeeReader(fh, c) if _, err := io.Copy(pw, tRdr); err != nil { pw.CloseWithError(err) diff --git a/tar/asm/assemble_test.go b/tar/asm/assemble_test.go index 4ae3f69..a164f0e 100644 --- a/tar/asm/assemble_test.go +++ b/tar/asm/assemble_test.go @@ -39,7 +39,7 @@ var entries = []struct { } func TestTarStreamOld(t *testing.T) { - fgp := NewBufferFileGetPutter() + fgp := storage.NewBufferFileGetPutter() // first lets prep a GetPutter and Packer for i := range entries { @@ -88,7 +88,7 @@ func TestTarStream(t *testing.T) { // Setup where we'll store the metadata w := bytes.NewBuffer([]byte{}) sp := storage.NewJsonPacker(w) - fgp := NewBufferFileGetPutter() + fgp := storage.NewBufferFileGetPutter() // wrap the disassembly stream tarStream, err := NewInputTarStream(gzRdr, sp, fgp) diff --git a/tar/asm/disassemble.go b/tar/asm/disassemble.go index 27f828c..b5d8564 100644 --- a/tar/asm/disassemble.go +++ b/tar/asm/disassemble.go @@ -14,19 +14,19 @@ import ( // In the middle it will pack the segments and file metadata to storage.Packer // `p`. // -// The the FilePutter is where payload of files in the stream are stashed. If -// this stashing is not needed, fp can be nil or use NewDiscardFilePutter. -func NewInputTarStream(r io.Reader, p storage.Packer, fp FilePutter) (io.Reader, error) { +// The the storage.FilePutter is where payload of files in the stream are stashed. If +// this stashing is not needed, fp can be nil or use storage.NewDiscardFilePutter. +func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io.Reader, error) { // What to do here... folks will want their own access to the Reader that is // their tar archive stream, but we'll need that same stream to use our // forked 'archive/tar'. // Perhaps do an io.TeeReader that hand back an io.Reader for them to read // from, and we'll mitm the stream to store metadata. - // We'll need a FilePutter too ... + // We'll need a storage.FilePutter too ... - // Another concern, whether to do any FilePutter operations, such that we + // Another concern, whether to do any storage.FilePutter operations, such that we // don't extract any amount of the archive. But then again, we're not making - // files/directories, hardlinks, etc. Just writing the io to the FilePutter. + // files/directories, hardlinks, etc. Just writing the io to the storage.FilePutter. // Perhaps we have a DiscardFilePutter that is a bit bucket. // we'll return the pipe reader, since TeeReader does not buffer and will @@ -38,7 +38,7 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp FilePutter) (io.Reader, // we need a putter that will generate the crc64 sums of file payloads if fp == nil { - fp = NewDiscardFilePutter() + fp = storage.NewDiscardFilePutter() } go func() { diff --git a/tar/asm/getter.go b/tar/storage/getter.go similarity index 92% rename from tar/asm/getter.go rename to tar/storage/getter.go index a275dca..907198b 100644 --- a/tar/asm/getter.go +++ b/tar/storage/getter.go @@ -1,4 +1,4 @@ -package asm +package storage import ( "bytes" @@ -52,7 +52,7 @@ func (bfgp bufferFileGetPutter) Get(name string) (io.ReadCloser, error) { } func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) { - c := crc64.New(crcTable) + c := crc64.New(CRCTable) tRdr := io.TeeReader(r, c) b := bytes.NewBuffer([]byte{}) i, err := io.Copy(b, tRdr) @@ -88,10 +88,11 @@ type bitBucketFilePutter struct { } func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) { - c := crc64.New(crcTable) + c := crc64.New(CRCTable) tRdr := io.TeeReader(r, c) i, err := io.Copy(ioutil.Discard, tRdr) return i, c.Sum(nil), err } -var crcTable = crc64.MakeTable(crc64.ISO) +// CRCTable is the default table used for crc64 sum calculations +var CRCTable = crc64.MakeTable(crc64.ISO) diff --git a/tar/asm/getter_test.go b/tar/storage/getter_test.go similarity index 98% rename from tar/asm/getter_test.go rename to tar/storage/getter_test.go index 47e5e14..5a6fcc7 100644 --- a/tar/asm/getter_test.go +++ b/tar/storage/getter_test.go @@ -1,4 +1,4 @@ -package asm +package storage import ( "bytes"