1
0
Fork 0
forked from mirrors/tar-split

*: golint and docs

This commit is contained in:
Vincent Batts 2015-03-09 14:11:11 -04:00
parent f7b9a6caee
commit 46840c585a
8 changed files with 34 additions and 23 deletions

View file

@ -44,8 +44,8 @@ func main() {
defer os.Remove(packFh.Name()) defer os.Remove(packFh.Name())
} }
sp := storage.NewJsonPacker(packFh) sp := storage.NewJSONPacker(packFh)
fp := asm.NewDiscardFilePutter() fp := storage.NewDiscardFilePutter()
dissam, err := asm.NewInputTarStream(fh, sp, fp) dissam, err := asm.NewInputTarStream(fh, sp, fp)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)

View file

@ -87,7 +87,7 @@ func TestTarStream(t *testing.T) {
// Setup where we'll store the metadata // Setup where we'll store the metadata
w := bytes.NewBuffer([]byte{}) w := bytes.NewBuffer([]byte{})
sp := storage.NewJsonPacker(w) sp := storage.NewJSONPacker(w)
fgp := storage.NewBufferFileGetPutter() fgp := storage.NewBufferFileGetPutter()
// wrap the disassembly stream // wrap the disassembly stream
@ -118,7 +118,7 @@ func TestTarStream(t *testing.T) {
// If we've made it this far, then we'll turn it around and create a tar // If we've made it this far, then we'll turn it around and create a tar
// stream from the packed metadata and buffered file contents. // stream from the packed metadata and buffered file contents.
r := bytes.NewBuffer(w.Bytes()) r := bytes.NewBuffer(w.Bytes())
sup := storage.NewJsonUnpacker(r) sup := storage.NewJSONUnpacker(r)
// and reuse the fgp that we Put the payloads to. // and reuse the fgp that we Put the payloads to.
rc := NewOutputTarStream(fgp, sup) rc := NewOutputTarStream(fgp, sup)

View file

@ -1,5 +1,6 @@
/* /*
asm provides the API for streaming assembly and disassembly of tar archives. Package asm provides the API for streaming assembly and disassembly of tar
archives.
Using the `github.com/vbatts/tar-split/tar/storage` for Packing/Unpacking the Using the `github.com/vbatts/tar-split/tar/storage` for Packing/Unpacking the
metadata for a stream, as well as an implementation of Getting/Putting the file metadata for a stream, as well as an implementation of Getting/Putting the file

View file

@ -1,5 +1,5 @@
/* /*
storage is for metadata of a tar archive. Package storage is for metadata of a tar archive.
Packing and unpacking the Entries of the stream. The types of streams are Packing and unpacking the Entries of the stream. The types of streams are
either segments of raw bytes (for the raw headers and various padding) and for either segments of raw bytes (for the raw headers and various padding) and for

View file

@ -7,6 +7,7 @@ func (e Entries) Len() int { return len(e) }
func (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e Entries) Less(i, j int) bool { return e[i].Position < e[j].Position } func (e Entries) Less(i, j int) bool { return e[i].Position < e[j].Position }
// Type of Entry
type Type int type Type int
const ( const (

View file

@ -10,22 +10,31 @@ import (
"path" "path"
) )
// FileGetter is the interface for getting a stream of a file payload, address
// by name/filepath. Presumably, the names will be scoped to relative file
// paths.
type FileGetter interface { type FileGetter interface {
// Get returns a stream for the provided file path // Get returns a stream for the provided file path
Get(string) (io.ReadCloser, error) Get(filepath string) (output io.ReadCloser, err error)
} }
// FilePutter is the interface for storing a stream of a file payload,
// addressed by name/filepath.
type FilePutter interface { type FilePutter interface {
// Put returns the crc64 checksum for the provided file // Put returns the size of the stream received, and the crc64 checksum for
Put(string, io.Reader) (int64, []byte, error) // the provided stream
Put(filepath string, input io.Reader) (size int64, checksum []byte, err error)
} }
// FileGetPutter is the interface that groups both Getting and Putting file
// payloads.
type FileGetPutter interface { type FileGetPutter interface {
FileGetter FileGetter
FilePutter FilePutter
} }
// NewPathFileGetter returns a FileGetter that is for files relative to path relpath. // NewPathFileGetter returns a FileGetter that is for files relative to path
// relpath.
func NewPathFileGetter(relpath string) FileGetter { func NewPathFileGetter(relpath string) FileGetter {
return &pathFileGetter{root: relpath} return &pathFileGetter{root: relpath}
} }

View file

@ -8,9 +8,9 @@ import (
"path" "path"
) )
var ( // ErrDuplicatePath is occured when a tar archive has more than one entry for
ErrDuplicatePath = errors.New("duplicates of file paths not supported") // the same file path
) var ErrDuplicatePath = errors.New("duplicates of file paths not supported")
// Packer describes the methods to pack Entries to a storage destination // Packer describes the methods to pack Entries to a storage destination
type Packer interface { type Packer interface {
@ -71,11 +71,11 @@ func (jup *jsonUnpacker) Next() (*Entry, error) {
return &e, err return &e, err
} }
// NewJsonUnpacker provides an Unpacker that reads Entries (SegmentType and // NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and
// FileType) as a json document. // FileType) as a json document.
// //
// Each Entry read are expected to be delimited by new line. // Each Entry read are expected to be delimited by new line.
func NewJsonUnpacker(r io.Reader) Unpacker { func NewJSONUnpacker(r io.Reader) Unpacker {
return &jsonUnpacker{ return &jsonUnpacker{
r: r, r: r,
b: bufio.NewReader(r), b: bufio.NewReader(r),
@ -117,11 +117,11 @@ func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
return e.Position, nil return e.Position, nil
} }
// NewJsonPacker provides an Packer that writes each Entry (SegmentType and // NewJSONPacker provides an Packer that writes each Entry (SegmentType and
// FileType) as a json document. // FileType) as a json document.
// //
// The Entries are delimited by new line. // The Entries are delimited by new line.
func NewJsonPacker(w io.Writer) Packer { func NewJSONPacker(w io.Writer) Packer {
return &jsonPacker{ return &jsonPacker{
w: w, w: w,
e: json.NewEncoder(w), e: json.NewEncoder(w),

View file

@ -28,7 +28,7 @@ func TestDuplicateFail(t *testing.T) {
buf := []byte{} buf := []byte{}
b := bytes.NewBuffer(buf) b := bytes.NewBuffer(buf)
jp := NewJsonPacker(b) jp := NewJSONPacker(b)
if _, err := jp.AddEntry(e[0]); err != nil { if _, err := jp.AddEntry(e[0]); err != nil {
t.Error(err) t.Error(err)
} }
@ -40,7 +40,7 @@ func TestDuplicateFail(t *testing.T) {
} }
} }
func TestJsonPackerUnpacker(t *testing.T) { func TestJSONPackerUnpacker(t *testing.T) {
e := []Entry{ e := []Entry{
Entry{ Entry{
Type: SegmentType, Type: SegmentType,
@ -65,7 +65,7 @@ func TestJsonPackerUnpacker(t *testing.T) {
b := bytes.NewBuffer(buf) b := bytes.NewBuffer(buf)
func() { func() {
jp := NewJsonPacker(b) jp := NewJSONPacker(b)
for i := range e { for i := range e {
if _, err := jp.AddEntry(e[i]); err != nil { if _, err := jp.AddEntry(e[i]); err != nil {
t.Error(err) t.Error(err)
@ -79,7 +79,7 @@ func TestJsonPackerUnpacker(t *testing.T) {
b = bytes.NewBuffer(b.Bytes()) b = bytes.NewBuffer(b.Bytes())
entries := Entries{} entries := Entries{}
func() { func() {
jup := NewJsonUnpacker(b) jup := NewJSONUnpacker(b)
for { for {
entry, err := jup.Next() entry, err := jup.Next()
if err != nil { if err != nil {
@ -125,7 +125,7 @@ func TestGzip(t *testing.T) {
buf := []byte{} buf := []byte{}
b := bytes.NewBuffer(buf) b := bytes.NewBuffer(buf)
gzW := gzip.NewWriter(b) gzW := gzip.NewWriter(b)
jp := NewJsonPacker(gzW) jp := NewJSONPacker(gzW)
for i := range e { for i := range e {
if _, err := jp.AddEntry(e[i]); err != nil { if _, err := jp.AddEntry(e[i]); err != nil {
t.Error(err) t.Error(err)
@ -143,7 +143,7 @@ func TestGzip(t *testing.T) {
} }
entries := Entries{} entries := Entries{}
func() { func() {
jup := NewJsonUnpacker(gzR) jup := NewJSONUnpacker(gzR)
for { for {
entry, err := jup.Next() entry, err := jup.Next()
if err != nil { if err != nil {