mirror of
https://github.com/vbatts/tar-split.git
synced 2024-12-18 19:46:29 +00:00
tar/storage: do not accept duplicate paths
This commit is contained in:
parent
891685f740
commit
cfd32ecbc4
2 changed files with 82 additions and 10 deletions
|
@ -3,7 +3,13 @@ package storage
|
|||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicatePath = errors.New("duplicates of file paths not supported")
|
||||
)
|
||||
|
||||
// Packer describes the methods to pack Entries to a storage destination
|
||||
|
@ -29,6 +35,7 @@ type jsonUnpacker struct {
|
|||
r io.Reader
|
||||
b *bufio.Reader
|
||||
isEOF bool
|
||||
seen seenNames
|
||||
}
|
||||
|
||||
func (jup *jsonUnpacker) Next() (*Entry, error) {
|
||||
|
@ -45,11 +52,22 @@ func (jup *jsonUnpacker) Next() (*Entry, error) {
|
|||
} else if err == io.EOF {
|
||||
jup.isEOF = true
|
||||
}
|
||||
|
||||
err = json.Unmarshal(line, &e)
|
||||
if err != nil && jup.isEOF {
|
||||
// if the remainder actually _wasn't_ a remaining json structure, then just EOF
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
// check for dup name
|
||||
if e.Type == FileType {
|
||||
cName := path.Clean(e.Name)
|
||||
if _, ok := jup.seen[cName]; ok {
|
||||
return nil, ErrDuplicatePath
|
||||
}
|
||||
jup.seen[cName] = emptyByte
|
||||
}
|
||||
|
||||
return &e, err
|
||||
}
|
||||
|
||||
|
@ -61,6 +79,7 @@ func NewJsonUnpacker(r io.Reader) Unpacker {
|
|||
return &jsonUnpacker{
|
||||
r: r,
|
||||
b: bufio.NewReader(r),
|
||||
seen: seenNames{},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -68,15 +87,34 @@ type jsonPacker struct {
|
|||
w io.Writer
|
||||
e *json.Encoder
|
||||
pos int
|
||||
seen seenNames
|
||||
}
|
||||
|
||||
type seenNames map[string]byte
|
||||
|
||||
// used in the seenNames map. byte is a uint8, and we'll re-use the same one
|
||||
// for minimalism.
|
||||
const emptyByte byte = 0
|
||||
|
||||
func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
|
||||
// check early for dup name
|
||||
if e.Type == FileType {
|
||||
cName := path.Clean(e.Name)
|
||||
if _, ok := jp.seen[cName]; ok {
|
||||
return -1, ErrDuplicatePath
|
||||
}
|
||||
jp.seen[cName] = emptyByte
|
||||
}
|
||||
|
||||
e.Position = jp.pos
|
||||
err := jp.e.Encode(e)
|
||||
if err == nil {
|
||||
jp.pos++
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return e.Position, err
|
||||
|
||||
// made it this far, increment now
|
||||
jp.pos++
|
||||
return e.Position, nil
|
||||
}
|
||||
|
||||
// NewJsonPacker provides an Packer that writes each Entry (SegmentType and
|
||||
|
@ -87,6 +125,7 @@ func NewJsonPacker(w io.Writer) Packer {
|
|||
return &jsonPacker{
|
||||
w: w,
|
||||
e: json.NewEncoder(w),
|
||||
seen: seenNames{},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,39 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestDuplicateFail(t *testing.T) {
|
||||
e := []Entry{
|
||||
Entry{
|
||||
Type: FileType,
|
||||
Name: "./hurr.txt",
|
||||
Payload: []byte("abcde"),
|
||||
},
|
||||
Entry{
|
||||
Type: FileType,
|
||||
Name: "./hurr.txt",
|
||||
Payload: []byte("deadbeef"),
|
||||
},
|
||||
Entry{
|
||||
Type: FileType,
|
||||
Name: "hurr.txt", // slightly different path, same file though
|
||||
Payload: []byte("deadbeef"),
|
||||
},
|
||||
}
|
||||
buf := []byte{}
|
||||
b := bytes.NewBuffer(buf)
|
||||
|
||||
jp := NewJsonPacker(b)
|
||||
if _, err := jp.AddEntry(e[0]); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if _, err := jp.AddEntry(e[1]); err != ErrDuplicatePath {
|
||||
t.Errorf("expected failure on duplicate path")
|
||||
}
|
||||
if _, err := jp.AddEntry(e[2]); err != ErrDuplicatePath {
|
||||
t.Errorf("expected failure on duplicate path")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonPackerUnpacker(t *testing.T) {
|
||||
e := []Entry{
|
||||
Entry{
|
||||
|
|
Loading…
Reference in a new issue