forked from mirrors/tar-split
*: golint and docs
This commit is contained in:
parent
f7b9a6caee
commit
46840c585a
8 changed files with 34 additions and 23 deletions
|
@ -44,8 +44,8 @@ func main() {
|
|||
defer os.Remove(packFh.Name())
|
||||
}
|
||||
|
||||
sp := storage.NewJsonPacker(packFh)
|
||||
fp := asm.NewDiscardFilePutter()
|
||||
sp := storage.NewJSONPacker(packFh)
|
||||
fp := storage.NewDiscardFilePutter()
|
||||
dissam, err := asm.NewInputTarStream(fh, sp, fp)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
|
|
@ -87,7 +87,7 @@ func TestTarStream(t *testing.T) {
|
|||
|
||||
// Setup where we'll store the metadata
|
||||
w := bytes.NewBuffer([]byte{})
|
||||
sp := storage.NewJsonPacker(w)
|
||||
sp := storage.NewJSONPacker(w)
|
||||
fgp := storage.NewBufferFileGetPutter()
|
||||
|
||||
// wrap the disassembly stream
|
||||
|
@ -118,7 +118,7 @@ func TestTarStream(t *testing.T) {
|
|||
// If we've made it this far, then we'll turn it around and create a tar
|
||||
// stream from the packed metadata and buffered file contents.
|
||||
r := bytes.NewBuffer(w.Bytes())
|
||||
sup := storage.NewJsonUnpacker(r)
|
||||
sup := storage.NewJSONUnpacker(r)
|
||||
// and reuse the fgp that we Put the payloads to.
|
||||
|
||||
rc := NewOutputTarStream(fgp, sup)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
asm provides the API for streaming assembly and disassembly of tar archives.
|
||||
Package asm provides the API for streaming assembly and disassembly of tar
|
||||
archives.
|
||||
|
||||
Using the `github.com/vbatts/tar-split/tar/storage` for Packing/Unpacking the
|
||||
metadata for a stream, as well as an implementation of Getting/Putting the file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
storage is for metadata of a tar archive.
|
||||
Package storage is for metadata of a tar archive.
|
||||
|
||||
Packing and unpacking the Entries of the stream. The types of streams are
|
||||
either segments of raw bytes (for the raw headers and various padding) and for
|
||||
|
|
|
@ -7,6 +7,7 @@ func (e Entries) Len() int { return len(e) }
|
|||
func (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
||||
func (e Entries) Less(i, j int) bool { return e[i].Position < e[j].Position }
|
||||
|
||||
// Type of Entry
|
||||
type Type int
|
||||
|
||||
const (
|
||||
|
|
|
@ -10,22 +10,31 @@ import (
|
|||
"path"
|
||||
)
|
||||
|
||||
// FileGetter is the interface for getting a stream of a file payload, address
|
||||
// by name/filepath. Presumably, the names will be scoped to relative file
|
||||
// paths.
|
||||
type FileGetter interface {
|
||||
// Get returns a stream for the provided file path
|
||||
Get(string) (io.ReadCloser, error)
|
||||
Get(filepath string) (output io.ReadCloser, err error)
|
||||
}
|
||||
|
||||
// FilePutter is the interface for storing a stream of a file payload,
|
||||
// addressed by name/filepath.
|
||||
type FilePutter interface {
|
||||
// Put returns the crc64 checksum for the provided file
|
||||
Put(string, io.Reader) (int64, []byte, error)
|
||||
// Put returns the size of the stream received, and the crc64 checksum for
|
||||
// the provided stream
|
||||
Put(filepath string, input io.Reader) (size int64, checksum []byte, err error)
|
||||
}
|
||||
|
||||
// FileGetPutter is the interface that groups both Getting and Putting file
|
||||
// payloads.
|
||||
type FileGetPutter interface {
|
||||
FileGetter
|
||||
FilePutter
|
||||
}
|
||||
|
||||
// NewPathFileGetter returns a FileGetter that is for files relative to path relpath.
|
||||
// NewPathFileGetter returns a FileGetter that is for files relative to path
|
||||
// relpath.
|
||||
func NewPathFileGetter(relpath string) FileGetter {
|
||||
return &pathFileGetter{root: relpath}
|
||||
}
|
||||
|
|
|
@ -8,9 +8,9 @@ import (
|
|||
"path"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicatePath = errors.New("duplicates of file paths not supported")
|
||||
)
|
||||
// ErrDuplicatePath is occured when a tar archive has more than one entry for
|
||||
// the same file path
|
||||
var ErrDuplicatePath = errors.New("duplicates of file paths not supported")
|
||||
|
||||
// Packer describes the methods to pack Entries to a storage destination
|
||||
type Packer interface {
|
||||
|
@ -71,11 +71,11 @@ func (jup *jsonUnpacker) Next() (*Entry, error) {
|
|||
return &e, err
|
||||
}
|
||||
|
||||
// NewJsonUnpacker provides an Unpacker that reads Entries (SegmentType and
|
||||
// NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and
|
||||
// FileType) as a json document.
|
||||
//
|
||||
// Each Entry read are expected to be delimited by new line.
|
||||
func NewJsonUnpacker(r io.Reader) Unpacker {
|
||||
func NewJSONUnpacker(r io.Reader) Unpacker {
|
||||
return &jsonUnpacker{
|
||||
r: r,
|
||||
b: bufio.NewReader(r),
|
||||
|
@ -117,11 +117,11 @@ func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
|
|||
return e.Position, nil
|
||||
}
|
||||
|
||||
// NewJsonPacker provides an Packer that writes each Entry (SegmentType and
|
||||
// NewJSONPacker provides an Packer that writes each Entry (SegmentType and
|
||||
// FileType) as a json document.
|
||||
//
|
||||
// The Entries are delimited by new line.
|
||||
func NewJsonPacker(w io.Writer) Packer {
|
||||
func NewJSONPacker(w io.Writer) Packer {
|
||||
return &jsonPacker{
|
||||
w: w,
|
||||
e: json.NewEncoder(w),
|
||||
|
|
|
@ -28,7 +28,7 @@ func TestDuplicateFail(t *testing.T) {
|
|||
buf := []byte{}
|
||||
b := bytes.NewBuffer(buf)
|
||||
|
||||
jp := NewJsonPacker(b)
|
||||
jp := NewJSONPacker(b)
|
||||
if _, err := jp.AddEntry(e[0]); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func TestDuplicateFail(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestJsonPackerUnpacker(t *testing.T) {
|
||||
func TestJSONPackerUnpacker(t *testing.T) {
|
||||
e := []Entry{
|
||||
Entry{
|
||||
Type: SegmentType,
|
||||
|
@ -65,7 +65,7 @@ func TestJsonPackerUnpacker(t *testing.T) {
|
|||
b := bytes.NewBuffer(buf)
|
||||
|
||||
func() {
|
||||
jp := NewJsonPacker(b)
|
||||
jp := NewJSONPacker(b)
|
||||
for i := range e {
|
||||
if _, err := jp.AddEntry(e[i]); err != nil {
|
||||
t.Error(err)
|
||||
|
@ -79,7 +79,7 @@ func TestJsonPackerUnpacker(t *testing.T) {
|
|||
b = bytes.NewBuffer(b.Bytes())
|
||||
entries := Entries{}
|
||||
func() {
|
||||
jup := NewJsonUnpacker(b)
|
||||
jup := NewJSONUnpacker(b)
|
||||
for {
|
||||
entry, err := jup.Next()
|
||||
if err != nil {
|
||||
|
@ -125,7 +125,7 @@ func TestGzip(t *testing.T) {
|
|||
buf := []byte{}
|
||||
b := bytes.NewBuffer(buf)
|
||||
gzW := gzip.NewWriter(b)
|
||||
jp := NewJsonPacker(gzW)
|
||||
jp := NewJSONPacker(gzW)
|
||||
for i := range e {
|
||||
if _, err := jp.AddEntry(e[i]); err != nil {
|
||||
t.Error(err)
|
||||
|
@ -143,7 +143,7 @@ func TestGzip(t *testing.T) {
|
|||
}
|
||||
entries := Entries{}
|
||||
func() {
|
||||
jup := NewJsonUnpacker(gzR)
|
||||
jup := NewJSONUnpacker(gzR)
|
||||
for {
|
||||
entry, err := jup.Next()
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in a new issue