1
0
Fork 0
forked from mirrors/tar-split

tar/asm: additional GNU LongLink testcase

Adding a minimal test case for GNU @LongLink.
Tested that it fails on v0.9.5, but now passes on v0.9.6 and master.
This commit is contained in:
Vincent Batts 2015-08-14 07:55:18 -04:00
parent 44d93178df
commit c76e42010e
2 changed files with 63 additions and 124 deletions

View file

@ -113,137 +113,76 @@ func TestTarStreamMangledGetterPutter(t *testing.T) {
} }
func TestTarStream(t *testing.T) { func TestTarStream(t *testing.T) {
var ( testCases := []struct {
expectedSHA1Sum = "1eb237ff69bca6e22789ecb05b45d35ca307adbd" path string
expectedSize int64 = 10240 expectedSHA1Sum string
) expectedSize int64
}{
fh, err := os.Open("./testdata/t.tar.gz") {"./testdata/t.tar.gz", "1eb237ff69bca6e22789ecb05b45d35ca307adbd", 10240},
if err != nil { {"./testdata/longlink.tar.gz", "d9f6babe107b7247953dff6b5b5ae31a3a880add", 20480},
t.Fatal(err) {"./testdata/fatlonglink.tar.gz", "8537f03f89aeef537382f8b0bb065d93e03b0be8", 26234880},
}
defer fh.Close()
gzRdr, err := gzip.NewReader(fh)
if err != nil {
t.Fatal(err)
}
defer gzRdr.Close()
// Setup where we'll store the metadata
w := bytes.NewBuffer([]byte{})
sp := storage.NewJSONPacker(w)
fgp := storage.NewBufferFileGetPutter()
// wrap the disassembly stream
tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
if err != nil {
t.Fatal(err)
} }
// get a sum of the stream after it has passed through to ensure it's the same. for _, tc := range testCases {
h0 := sha1.New() fh, err := os.Open(tc.path)
tRdr0 := io.TeeReader(tarStream, h0) if err != nil {
t.Fatal(err)
}
defer fh.Close()
gzRdr, err := gzip.NewReader(fh)
if err != nil {
t.Fatal(err)
}
defer gzRdr.Close()
// read it all to the bit bucket // Setup where we'll store the metadata
i, err := io.Copy(ioutil.Discard, tRdr0) w := bytes.NewBuffer([]byte{})
if err != nil { sp := storage.NewJSONPacker(w)
t.Fatal(err) fgp := storage.NewBufferFileGetPutter()
}
if i != expectedSize { // wrap the disassembly stream
t.Errorf("size of tar: expected %d; got %d", expectedSize, i) tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
} if err != nil {
if fmt.Sprintf("%x", h0.Sum(nil)) != expectedSHA1Sum { t.Fatal(err)
t.Fatalf("checksum of tar: expected %s; got %x", expectedSHA1Sum, h0.Sum(nil)) }
}
t.Logf("%s", w.String()) // if we fail, then show the packed info // get a sum of the stream after it has passed through to ensure it's the same.
h0 := sha1.New()
tRdr0 := io.TeeReader(tarStream, h0)
// If we've made it this far, then we'll turn it around and create a tar // read it all to the bit bucket
// stream from the packed metadata and buffered file contents. i, err := io.Copy(ioutil.Discard, tRdr0)
r := bytes.NewBuffer(w.Bytes()) if err != nil {
sup := storage.NewJSONUnpacker(r) t.Fatal(err)
// and reuse the fgp that we Put the payloads to. }
rc := NewOutputTarStream(fgp, sup) if i != tc.expectedSize {
h1 := sha1.New() t.Errorf("size of tar: expected %d; got %d", tc.expectedSize, i)
i, err = io.Copy(h1, rc) }
if err != nil { if fmt.Sprintf("%x", h0.Sum(nil)) != tc.expectedSHA1Sum {
t.Fatal(err) t.Fatalf("checksum of tar: expected %s; got %x", tc.expectedSHA1Sum, h0.Sum(nil))
} }
if i != expectedSize { t.Logf("%s", w.String()) // if we fail, then show the packed info
t.Errorf("size of output tar: expected %d; got %d", expectedSize, i)
} // If we've made it this far, then we'll turn it around and create a tar
if fmt.Sprintf("%x", h1.Sum(nil)) != expectedSHA1Sum { // stream from the packed metadata and buffered file contents.
t.Fatalf("checksum of output tar: expected %s; got %x", expectedSHA1Sum, h1.Sum(nil)) r := bytes.NewBuffer(w.Bytes())
} sup := storage.NewJSONUnpacker(r)
} // and reuse the fgp that we Put the payloads to.
func TestTarGNUTar(t *testing.T) { rc := NewOutputTarStream(fgp, sup)
var ( h1 := sha1.New()
expectedSHA1Sum = "d9f6babe107b7247953dff6b5b5ae31a3a880add" i, err = io.Copy(h1, rc)
expectedSize int64 = 20480 if err != nil {
) t.Fatal(err)
}
fh, err := os.Open("./testdata/longlink.tar.gz")
if err != nil { if i != tc.expectedSize {
t.Fatal(err) t.Errorf("size of output tar: expected %d; got %d", tc.expectedSize, i)
} }
defer fh.Close() if fmt.Sprintf("%x", h1.Sum(nil)) != tc.expectedSHA1Sum {
gzRdr, err := gzip.NewReader(fh) t.Fatalf("checksum of output tar: expected %s; got %x", tc.expectedSHA1Sum, h1.Sum(nil))
if err != nil { }
t.Fatal(err)
}
defer gzRdr.Close()
// Setup where we'll store the metadata
w := bytes.NewBuffer([]byte{})
sp := storage.NewJSONPacker(w)
fgp := storage.NewBufferFileGetPutter()
// wrap the disassembly stream
tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
if err != nil {
t.Fatal(err)
}
// get a sum of the stream after it has passed through to ensure it's the same.
h0 := sha1.New()
tRdr0 := io.TeeReader(tarStream, h0)
// read it all to the bit bucket
i, err := io.Copy(ioutil.Discard, tRdr0)
if err != nil {
t.Fatal(err)
}
if i != expectedSize {
t.Errorf("size of tar: expected %d; got %d", expectedSize, i)
}
if fmt.Sprintf("%x", h0.Sum(nil)) != expectedSHA1Sum {
t.Fatalf("checksum of tar: expected %s; got %x", expectedSHA1Sum, h0.Sum(nil))
}
t.Logf("%s", w.String()) // if we fail, then show the packed info
// If we've made it this far, then we'll turn it around and create a tar
// stream from the packed metadata and buffered file contents.
r := bytes.NewBuffer(w.Bytes())
sup := storage.NewJSONUnpacker(r)
// and reuse the fgp that we Put the payloads to.
rc := NewOutputTarStream(fgp, sup)
h1 := sha1.New()
i, err = io.Copy(h1, rc)
if err != nil {
t.Fatal(err)
}
if i != expectedSize {
t.Errorf("size of output tar: expected %d; got %d", expectedSize, i)
}
if fmt.Sprintf("%x", h1.Sum(nil)) != expectedSHA1Sum {
t.Fatalf("checksum of output tar: expected %s; got %x", expectedSHA1Sum, h1.Sum(nil))
} }
} }

BIN
tar/asm/testdata/fatlonglink.tar.gz vendored Normal file

Binary file not shown.