1
0
Fork 0
forked from mirrors/tar-split

tar/asm: DiscardFilePutter and stub disassemble

Have a bit-bucket FilePutter, for when it does not matter.

Beginning thoughts on disassembly, but it has things that need thought.
Mostly comments in the function for now.
This commit is contained in:
Vincent Batts 2015-03-02 15:25:03 -05:00
parent ccf6fa61a6
commit 4e27d04b0b
5 changed files with 83 additions and 1 deletions

View file

@ -6,7 +6,18 @@ import (
"github.com/vbatts/tar-split/tar/storage" "github.com/vbatts/tar-split/tar/storage"
) )
func NewTarStream(fg FileGetter, up storage.Unpacker) io.ReadCloser { // NewOutputTarStream returns an io.ReadCloser that is an assemble tar archive
// stream.
//
// It takes a FileGetter, for mapping the file payloads that are to be read in,
// and a storage.Unpacker, which has access to the rawbytes and file order
// metadata. With the combination of these two items, a precise assembled Tar
// archive is possible.
func NewOutputTarStream(fg FileGetter, up storage.Unpacker) io.ReadCloser {
// ... Since these are interfaces, this is possible, so let's not have a nil pointer
if fg == nil || up == nil {
return nil
}
pr, pw := io.Pipe() pr, pw := io.Pipe()
go func() { go func() {
for { for {

9
tar/asm/assemble_test.go Normal file
View file

@ -0,0 +1,9 @@
package asm
import "testing"
func TestNewOutputTarStream(t *testing.T) {
// TODO disassembly
fgp := NewBufferFileGetPutter()
_ = NewOutputTarStream(fgp, nil)
}

34
tar/asm/disassemble.go Normal file
View file

@ -0,0 +1,34 @@
package asm
import (
"io"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/storage"
)
func NewInputTarStream(r io.Reader, fp FilePutter, p storage.Packer) (io.Reader, error) {
// What to do here... folks will want their own access to the Reader that is
// their tar archive stream, but we'll need that same stream to use our
// forked 'archive/tar'.
// Perhaps do an io.TeeReader that hand back an io.Reader for them to read
// from, and we'll mitm the stream to store metadata.
// We'll need a FilePutter too ...
// Another concern, whether to do any FilePutter operations, such that we
// don't extract any amount of the archive. But then again, we're not making
// files/directories, hardlinks, etc. Just writing the io to the FilePutter.
// Perhaps we have a DiscardFilePutter that is a bit bucket.
// we'll return the pipe reader, since TeeReader does not buffer and will
// only read what the outputRdr Read's. Since Tar archive's have padding on
// the end, we want to be the one reading the padding, even if the user's
// `archive/tar` doesn't care.
pR, pW := io.Pipe()
outputRdr := io.TeeReader(r, pW)
tr := tar.NewReader(outputRdr)
tr.RawAccounting = true
return pR, nil
}

View file

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"errors" "errors"
"io" "io"
"io/ioutil"
"os" "os"
"path" "path"
) )
@ -73,3 +74,16 @@ func NewBufferFileGetPutter() FileGetPutter {
files: map[string][]byte{}, files: map[string][]byte{},
} }
} }
// NewDiscardFilePutter is a bit bucket FilePutter
func NewDiscardFilePutter() FilePutter {
return &bitBucketFilePutter{}
}
type bitBucketFilePutter struct {
}
func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) error {
_, err := io.Copy(ioutil.Discard, r)
return err
}

View file

@ -31,3 +31,17 @@ func TestGetter(t *testing.T) {
} }
} }
} }
func TestPutter(t *testing.T) {
fp := NewDiscardFilePutter()
files := map[string][]byte{
"file1.txt": []byte("foo"),
"file2.txt": []byte("bar"),
"file3.txt": []byte("baz"),
"file4.txt": []byte("bif"),
}
for n, b := range files {
if err := fp.Put(n, bytes.NewBuffer(b)); err != nil {
t.Error(err)
}
}
}