1
0
Fork 0
forked from mirrors/tar-split

tar: mv the Getter to tar/storage

This commit is contained in:
Vincent Batts 2015-03-09 13:20:26 -04:00
parent e045daf0b0
commit d8ebf3c0a7
5 changed files with 18 additions and 17 deletions

View file

@ -12,11 +12,11 @@ import (
// NewOutputTarStream returns an io.ReadCloser that is an assemble tar archive // NewOutputTarStream returns an io.ReadCloser that is an assemble tar archive
// stream. // stream.
// //
// It takes a FileGetter, for mapping the file payloads that are to be read in, // It takes a storage.FileGetter, for mapping the file payloads that are to be read in,
// and a storage.Unpacker, which has access to the rawbytes and file order // and a storage.Unpacker, which has access to the rawbytes and file order
// metadata. With the combination of these two items, a precise assembled Tar // metadata. With the combination of these two items, a precise assembled Tar
// archive is possible. // archive is possible.
func NewOutputTarStream(fg FileGetter, up storage.Unpacker) io.ReadCloser { func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadCloser {
// ... Since these are interfaces, this is possible, so let's not have a nil pointer // ... Since these are interfaces, this is possible, so let's not have a nil pointer
if fg == nil || up == nil { if fg == nil || up == nil {
return nil return nil
@ -45,7 +45,7 @@ func NewOutputTarStream(fg FileGetter, up storage.Unpacker) io.ReadCloser {
break break
} }
defer fh.Close() defer fh.Close()
c := crc64.New(crcTable) c := crc64.New(storage.CRCTable)
tRdr := io.TeeReader(fh, c) tRdr := io.TeeReader(fh, c)
if _, err := io.Copy(pw, tRdr); err != nil { if _, err := io.Copy(pw, tRdr); err != nil {
pw.CloseWithError(err) pw.CloseWithError(err)

View file

@ -39,7 +39,7 @@ var entries = []struct {
} }
func TestTarStreamOld(t *testing.T) { func TestTarStreamOld(t *testing.T) {
fgp := NewBufferFileGetPutter() fgp := storage.NewBufferFileGetPutter()
// first lets prep a GetPutter and Packer // first lets prep a GetPutter and Packer
for i := range entries { for i := range entries {
@ -88,7 +88,7 @@ func TestTarStream(t *testing.T) {
// Setup where we'll store the metadata // Setup where we'll store the metadata
w := bytes.NewBuffer([]byte{}) w := bytes.NewBuffer([]byte{})
sp := storage.NewJsonPacker(w) sp := storage.NewJsonPacker(w)
fgp := NewBufferFileGetPutter() fgp := storage.NewBufferFileGetPutter()
// wrap the disassembly stream // wrap the disassembly stream
tarStream, err := NewInputTarStream(gzRdr, sp, fgp) tarStream, err := NewInputTarStream(gzRdr, sp, fgp)

View file

@ -14,19 +14,19 @@ import (
// In the middle it will pack the segments and file metadata to storage.Packer // In the middle it will pack the segments and file metadata to storage.Packer
// `p`. // `p`.
// //
// The the FilePutter is where payload of files in the stream are stashed. If // The the storage.FilePutter is where payload of files in the stream are stashed. If
// this stashing is not needed, fp can be nil or use NewDiscardFilePutter. // this stashing is not needed, fp can be nil or use storage.NewDiscardFilePutter.
func NewInputTarStream(r io.Reader, p storage.Packer, fp FilePutter) (io.Reader, error) { func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io.Reader, error) {
// What to do here... folks will want their own access to the Reader that is // What to do here... folks will want their own access to the Reader that is
// their tar archive stream, but we'll need that same stream to use our // their tar archive stream, but we'll need that same stream to use our
// forked 'archive/tar'. // forked 'archive/tar'.
// Perhaps do an io.TeeReader that hand back an io.Reader for them to read // Perhaps do an io.TeeReader that hand back an io.Reader for them to read
// from, and we'll mitm the stream to store metadata. // from, and we'll mitm the stream to store metadata.
// We'll need a FilePutter too ... // We'll need a storage.FilePutter too ...
// Another concern, whether to do any FilePutter operations, such that we // Another concern, whether to do any storage.FilePutter operations, such that we
// don't extract any amount of the archive. But then again, we're not making // don't extract any amount of the archive. But then again, we're not making
// files/directories, hardlinks, etc. Just writing the io to the FilePutter. // files/directories, hardlinks, etc. Just writing the io to the storage.FilePutter.
// Perhaps we have a DiscardFilePutter that is a bit bucket. // Perhaps we have a DiscardFilePutter that is a bit bucket.
// we'll return the pipe reader, since TeeReader does not buffer and will // we'll return the pipe reader, since TeeReader does not buffer and will
@ -38,7 +38,7 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp FilePutter) (io.Reader,
// we need a putter that will generate the crc64 sums of file payloads // we need a putter that will generate the crc64 sums of file payloads
if fp == nil { if fp == nil {
fp = NewDiscardFilePutter() fp = storage.NewDiscardFilePutter()
} }
go func() { go func() {

View file

@ -1,4 +1,4 @@
package asm package storage
import ( import (
"bytes" "bytes"
@ -52,7 +52,7 @@ func (bfgp bufferFileGetPutter) Get(name string) (io.ReadCloser, error) {
} }
func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) { func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) {
c := crc64.New(crcTable) c := crc64.New(CRCTable)
tRdr := io.TeeReader(r, c) tRdr := io.TeeReader(r, c)
b := bytes.NewBuffer([]byte{}) b := bytes.NewBuffer([]byte{})
i, err := io.Copy(b, tRdr) i, err := io.Copy(b, tRdr)
@ -88,10 +88,11 @@ type bitBucketFilePutter struct {
} }
func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) { func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) {
c := crc64.New(crcTable) c := crc64.New(CRCTable)
tRdr := io.TeeReader(r, c) tRdr := io.TeeReader(r, c)
i, err := io.Copy(ioutil.Discard, tRdr) i, err := io.Copy(ioutil.Discard, tRdr)
return i, c.Sum(nil), err return i, c.Sum(nil), err
} }
var crcTable = crc64.MakeTable(crc64.ISO) // CRCTable is the default table used for crc64 sum calculations
var CRCTable = crc64.MakeTable(crc64.ISO)

View file

@ -1,4 +1,4 @@
package asm package storage
import ( import (
"bytes" "bytes"