1
0
Fork 1
mirror of https://github.com/vbatts/tar-split.git synced 2024-12-20 20:36:30 +00:00
tar-split/tar/storage/packer_test.go

219 lines
3.9 KiB
Go
Raw Normal View History

package storage
import (
"bytes"
2015-02-24 20:57:14 +00:00
"compress/gzip"
"io"
"io/ioutil"
"os"
"testing"
)
func TestDuplicateFail(t *testing.T) {
e := []Entry{
Entry{
Type: FileType,
Name: "./hurr.txt",
Payload: []byte("abcde"),
},
Entry{
Type: FileType,
Name: "./hurr.txt",
Payload: []byte("deadbeef"),
},
Entry{
Type: FileType,
Name: "hurr.txt", // slightly different path, same file though
Payload: []byte("deadbeef"),
},
}
buf := []byte{}
b := bytes.NewBuffer(buf)
2015-03-09 18:11:11 +00:00
jp := NewJSONPacker(b)
if _, err := jp.AddEntry(e[0]); err != nil {
t.Error(err)
}
if _, err := jp.AddEntry(e[1]); err != ErrDuplicatePath {
t.Errorf("expected failure on duplicate path")
}
if _, err := jp.AddEntry(e[2]); err != ErrDuplicatePath {
t.Errorf("expected failure on duplicate path")
}
}
2015-03-09 18:11:11 +00:00
func TestJSONPackerUnpacker(t *testing.T) {
e := []Entry{
Entry{
Type: SegmentType,
Payload: []byte("how"),
},
Entry{
Type: SegmentType,
Payload: []byte("y'all"),
},
Entry{
Type: FileType,
Name: "./hurr.txt",
Payload: []byte("deadbeef"),
},
Entry{
Type: SegmentType,
Payload: []byte("doin"),
},
}
buf := []byte{}
b := bytes.NewBuffer(buf)
func() {
2015-03-09 18:11:11 +00:00
jp := NewJSONPacker(b)
for i := range e {
if _, err := jp.AddEntry(e[i]); err != nil {
t.Error(err)
}
}
}()
2015-02-24 20:57:14 +00:00
// >> packer_test.go:43: uncompressed: 266
//t.Errorf("uncompressed: %d", len(b.Bytes()))
b = bytes.NewBuffer(b.Bytes())
2015-02-24 20:49:21 +00:00
entries := Entries{}
func() {
2015-03-09 18:11:11 +00:00
jup := NewJSONUnpacker(b)
for {
entry, err := jup.Next()
if err != nil {
if err == io.EOF {
break
}
t.Error(err)
}
2015-02-24 20:49:21 +00:00
entries = append(entries, *entry)
t.Logf("got %#v", entry)
}
}()
2015-02-24 20:49:21 +00:00
if len(entries) != len(e) {
t.Errorf("expected %d entries, got %d", len(e), len(entries))
}
2015-02-24 20:57:14 +00:00
}
// you can use a compress Reader/Writer and make nice savings.
//
// For these two tests that are using the same set, it the difference of 266
// bytes uncompressed vs 138 bytes compressed.
func TestGzip(t *testing.T) {
e := []Entry{
Entry{
Type: SegmentType,
Payload: []byte("how"),
},
Entry{
Type: SegmentType,
Payload: []byte("y'all"),
},
Entry{
Type: FileType,
Name: "./hurr.txt",
Payload: []byte("deadbeef"),
},
Entry{
Type: SegmentType,
Payload: []byte("doin"),
},
}
buf := []byte{}
b := bytes.NewBuffer(buf)
gzW := gzip.NewWriter(b)
2015-03-09 18:11:11 +00:00
jp := NewJSONPacker(gzW)
2015-02-24 20:57:14 +00:00
for i := range e {
if _, err := jp.AddEntry(e[i]); err != nil {
t.Error(err)
}
}
gzW.Close()
// >> packer_test.go:99: compressed: 138
//t.Errorf("compressed: %d", len(b.Bytes()))
b = bytes.NewBuffer(b.Bytes())
gzR, err := gzip.NewReader(b)
if err != nil {
t.Fatal(err)
}
entries := Entries{}
func() {
2015-03-09 18:11:11 +00:00
jup := NewJSONUnpacker(gzR)
2015-02-24 20:57:14 +00:00
for {
entry, err := jup.Next()
if err != nil {
if err == io.EOF {
break
}
t.Error(err)
}
entries = append(entries, *entry)
t.Logf("got %#v", entry)
}
}()
if len(entries) != len(e) {
t.Errorf("expected %d entries, got %d", len(e), len(entries))
}
}
func BenchmarkGetPut(b *testing.B) {
e := []Entry{
Entry{
Type: SegmentType,
Payload: []byte("how"),
},
Entry{
Type: SegmentType,
Payload: []byte("y'all"),
},
Entry{
Type: FileType,
Name: "./hurr.txt",
Payload: []byte("deadbeef"),
},
Entry{
Type: SegmentType,
Payload: []byte("doin"),
},
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
func() {
fh, err := ioutil.TempFile("", "tar-split.")
if err != nil {
b.Fatal(err)
}
defer os.Remove(fh.Name())
defer fh.Close()
jp := NewJSONPacker(fh)
for i := range e {
if _, err := jp.AddEntry(e[i]); err != nil {
b.Fatal(err)
}
}
fh.Sync()
up := NewJSONUnpacker(fh)
for {
_, err := up.Next()
if err != nil {
if err == io.EOF {
break
}
b.Fatal(err)
}
}
}()
}
})
}