mirror of
https://github.com/vbatts/tar-split.git
synced 2024-11-15 04:58:36 +00:00
*: clean up assorted spelling/grammar issues
Various minor fixes noticed on walking through
This commit is contained in:
parent
e0e9886972
commit
002d19f0b0
6 changed files with 15 additions and 15 deletions
|
@ -9,7 +9,7 @@ import (
|
||||||
"github.com/vbatts/tar-split/tar/storage"
|
"github.com/vbatts/tar-split/tar/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewOutputTarStream returns an io.ReadCloser that is an assemble tar archive
|
// NewOutputTarStream returns an io.ReadCloser that is an assembled tar archive
|
||||||
// stream.
|
// stream.
|
||||||
//
|
//
|
||||||
// It takes a storage.FileGetter, for mapping the file payloads that are to be read in,
|
// It takes a storage.FileGetter, for mapping the file payloads that are to be read in,
|
||||||
|
|
|
@ -22,8 +22,8 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
|
||||||
// What to do here... folks will want their own access to the Reader that is
|
// What to do here... folks will want their own access to the Reader that is
|
||||||
// their tar archive stream, but we'll need that same stream to use our
|
// their tar archive stream, but we'll need that same stream to use our
|
||||||
// forked 'archive/tar'.
|
// forked 'archive/tar'.
|
||||||
// Perhaps do an io.TeeReader that hand back an io.Reader for them to read
|
// Perhaps do an io.TeeReader that hands back an io.Reader for them to read
|
||||||
// from, and we'll mitm the stream to store metadata.
|
// from, and we'll MITM the stream to store metadata.
|
||||||
// We'll need a storage.FilePutter too ...
|
// We'll need a storage.FilePutter too ...
|
||||||
|
|
||||||
// Another concern, whether to do any storage.FilePutter operations, such that we
|
// Another concern, whether to do any storage.FilePutter operations, such that we
|
||||||
|
@ -32,7 +32,7 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
|
||||||
// Perhaps we have a DiscardFilePutter that is a bit bucket.
|
// Perhaps we have a DiscardFilePutter that is a bit bucket.
|
||||||
|
|
||||||
// we'll return the pipe reader, since TeeReader does not buffer and will
|
// we'll return the pipe reader, since TeeReader does not buffer and will
|
||||||
// only read what the outputRdr Read's. Since Tar archive's have padding on
|
// only read what the outputRdr Read's. Since Tar archives have padding on
|
||||||
// the end, we want to be the one reading the padding, even if the user's
|
// the end, we want to be the one reading the padding, even if the user's
|
||||||
// `archive/tar` doesn't care.
|
// `archive/tar` doesn't care.
|
||||||
pR, pW := io.Pipe()
|
pR, pW := io.Pipe()
|
||||||
|
|
|
@ -5,7 +5,7 @@ Packing and unpacking the Entries of the stream. The types of streams are
|
||||||
either segments of raw bytes (for the raw headers and various padding) and for
|
either segments of raw bytes (for the raw headers and various padding) and for
|
||||||
an entry marking a file payload.
|
an entry marking a file payload.
|
||||||
|
|
||||||
The raw bytes are stored precisely in the packed (marshalled) Entry. Where as
|
The raw bytes are stored precisely in the packed (marshalled) Entry, whereas
|
||||||
the file payload marker include the name of the file, size, and crc64 checksum
|
the file payload marker include the name of the file, size, and crc64 checksum
|
||||||
(for basic file integrity).
|
(for basic file integrity).
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -19,11 +19,11 @@ const (
|
||||||
// SegmentType represents a raw bytes segment from the archive stream. These raw
|
// SegmentType represents a raw bytes segment from the archive stream. These raw
|
||||||
// byte segments consist of the raw headers and various padding.
|
// byte segments consist of the raw headers and various padding.
|
||||||
//
|
//
|
||||||
// It's payload is to be marshalled base64 encoded.
|
// Its payload is to be marshalled base64 encoded.
|
||||||
SegmentType
|
SegmentType
|
||||||
)
|
)
|
||||||
|
|
||||||
// Entry is a the structure for packing and unpacking the information read from
|
// Entry is the structure for packing and unpacking the information read from
|
||||||
// the Tar archive.
|
// the Tar archive.
|
||||||
//
|
//
|
||||||
// FileType Payload checksum is using `hash/crc64` for basic file integrity,
|
// FileType Payload checksum is using `hash/crc64` for basic file integrity,
|
||||||
|
@ -34,6 +34,6 @@ type Entry struct {
|
||||||
Type Type `json:"type"`
|
Type Type `json:"type"`
|
||||||
Name string `json:"name",omitempty`
|
Name string `json:"name",omitempty`
|
||||||
Size int64 `json:"size",omitempty`
|
Size int64 `json:"size",omitempty`
|
||||||
Payload []byte `json:"payload"` // SegmentType store payload here; FileType store crc64 checksum here;
|
Payload []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here;
|
||||||
Position int `json:"position"`
|
Position int `json:"position"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,9 +10,9 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FileGetter is the interface for getting a stream of a file payload, address
|
// FileGetter is the interface for getting a stream of a file payload,
|
||||||
// by name/filename. Presumably, the names will be scoped to relative file
|
// addressed by name/filename. Presumably, the names will be scoped to relative
|
||||||
// paths.
|
// file paths.
|
||||||
type FileGetter interface {
|
type FileGetter interface {
|
||||||
// Get returns a stream for the provided file path
|
// Get returns a stream for the provided file path
|
||||||
Get(filename string) (output io.ReadCloser, err error)
|
Get(filename string) (output io.ReadCloser, err error)
|
||||||
|
@ -77,7 +77,7 @@ type readCloserWrapper struct {
|
||||||
|
|
||||||
func (w *readCloserWrapper) Close() error { return nil }
|
func (w *readCloserWrapper) Close() error { return nil }
|
||||||
|
|
||||||
// NewBufferFileGetPutter is simple in memory FileGetPutter
|
// NewBufferFileGetPutter is a simple in-memory FileGetPutter
|
||||||
//
|
//
|
||||||
// Implication is this is memory intensive...
|
// Implication is this is memory intensive...
|
||||||
// Probably best for testing or light weight cases.
|
// Probably best for testing or light weight cases.
|
||||||
|
|
|
@ -8,8 +8,8 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrDuplicatePath is occured when a tar archive has more than one entry for
|
// ErrDuplicatePath occurs when a tar archive has more than one entry for the
|
||||||
// the same file path
|
// same file path
|
||||||
var ErrDuplicatePath = errors.New("duplicates of file paths not supported")
|
var ErrDuplicatePath = errors.New("duplicates of file paths not supported")
|
||||||
|
|
||||||
// Packer describes the methods to pack Entries to a storage destination
|
// Packer describes the methods to pack Entries to a storage destination
|
||||||
|
@ -117,7 +117,7 @@ func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
|
||||||
return e.Position, nil
|
return e.Position, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewJSONPacker provides an Packer that writes each Entry (SegmentType and
|
// NewJSONPacker provides a Packer that writes each Entry (SegmentType and
|
||||||
// FileType) as a json document.
|
// FileType) as a json document.
|
||||||
//
|
//
|
||||||
// The Entries are delimited by new line.
|
// The Entries are delimited by new line.
|
||||||
|
|
Loading…
Reference in a new issue