From d97b8009bb4442f341048e978a7c37489f3b8548 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 27 Apr 2023 14:01:09 -0400 Subject: [PATCH] archive/tar: beginning effort to prune the vendored archive/tar NOTE: I'm not sure this is really the route I want to go here, but it would need benchmarking to show if it's actually beneficial. It would still be nicer to get something like this upstreamed instead. trim down anything not used directly by tar-split. Signed-off-by: Vincent Batts --- archive/tar/common.go | 30 +- archive/tar/example_test.go | 71 -- archive/tar/reader.go | 1 + archive/tar/tar_test.go | 45 +- archive/tar/writer.go | 653 ----------------- archive/tar/writer_test.go | 1311 ----------------------------------- 6 files changed, 58 insertions(+), 2053 deletions(-) delete mode 100644 archive/tar/example_test.go delete mode 100644 archive/tar/writer.go delete mode 100644 archive/tar/writer_test.go diff --git a/archive/tar/common.go b/archive/tar/common.go index dee9e47..b8cd0a8 100644 --- a/archive/tar/common.go +++ b/archive/tar/common.go @@ -221,9 +221,11 @@ func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length } // that the file has no data in it, which is rather odd. // // As an example, if the underlying raw file contains the 10-byte data: +// // var compactFile = "abcdefgh" // // And the sparse map has the following entries: +// // var spd sparseDatas = []sparseEntry{ // {Offset: 2, Length: 5}, // Data fragment for 2..6 // {Offset: 18, Length: 3}, // Data fragment for 18..20 @@ -235,6 +237,7 @@ func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length } // } // // Then the content of the resulting sparse file with a Header.Size of 25 is: +// // var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 type ( sparseDatas []sparseEntry @@ -293,9 +296,9 @@ func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry { // The input must have been already validated. // // This function mutates src and returns a normalized map where: -// * adjacent fragments are coalesced together -// * only the last fragment may be empty -// * the endOffset of the last fragment is the total size +// - adjacent fragments are coalesced together +// - only the last fragment may be empty +// - the endOffset of the last fragment is the total size func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry { dst := src[:0] var pre sparseEntry @@ -721,3 +724,24 @@ func min(a, b int64) int64 { } return b } + +// splitUSTARPath splits a path according to USTAR prefix and suffix rules. +// If the path is not splittable, then it will return ("", "", false). +func splitUSTARPath(name string) (prefix, suffix string, ok bool) { + length := len(name) + if length <= nameSize || !isASCII(name) { + return "", "", false + } else if length > prefixSize+1 { + length = prefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + + i := strings.LastIndex(name[:length], "/") + nlen := len(name) - i - 1 // nlen is length of suffix + plen := i // plen is length of prefix + if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize { + return "", "", false + } + return name[:i], name[i+1:], true +} diff --git a/archive/tar/example_test.go b/archive/tar/example_test.go deleted file mode 100644 index a2474b9..0000000 --- a/archive/tar/example_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar_test - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "log" - "os" -) - -func Example_minimal() { - // Create and add some files to the archive. - var buf bytes.Buffer - tw := tar.NewWriter(&buf) - var files = []struct { - Name, Body string - }{ - {"readme.txt", "This archive contains some text files."}, - {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, - {"todo.txt", "Get animal handling license."}, - } - for _, file := range files { - hdr := &tar.Header{ - Name: file.Name, - Mode: 0600, - Size: int64(len(file.Body)), - } - if err := tw.WriteHeader(hdr); err != nil { - log.Fatal(err) - } - if _, err := tw.Write([]byte(file.Body)); err != nil { - log.Fatal(err) - } - } - if err := tw.Close(); err != nil { - log.Fatal(err) - } - - // Open and iterate through the files in the archive. - tr := tar.NewReader(&buf) - for { - hdr, err := tr.Next() - if err == io.EOF { - break // End of archive - } - if err != nil { - log.Fatal(err) - } - fmt.Printf("Contents of %s:\n", hdr.Name) - if _, err := io.Copy(os.Stdout, tr); err != nil { - log.Fatal(err) - } - fmt.Println() - } - - // Output: - // Contents of readme.txt: - // This archive contains some text files. - // Contents of gopher.txt: - // Gopher names: - // George - // Geoffrey - // Gonzo - // Contents of todo.txt: - // Get animal handling license. -} diff --git a/archive/tar/reader.go b/archive/tar/reader.go index fcf3215..d051f7b 100644 --- a/archive/tar/reader.go +++ b/archive/tar/reader.go @@ -404,6 +404,7 @@ func (tr *Reader) readHeader() (*Header, *block, error) { if err != nil { return nil, nil, err // EOF is okay here; exactly 1 block of zeros read } + if bytes.Equal(tr.blk[:], zeroBlock[:]) { return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read } diff --git a/archive/tar/tar_test.go b/archive/tar/tar_test.go index f1ce7fb..53b0e67 100644 --- a/archive/tar/tar_test.go +++ b/archive/tar/tar_test.go @@ -5,7 +5,9 @@ package tar import ( + realtar "archive/tar" // the stdlib one! "bytes" + "encoding/gob" "errors" "fmt" "io" @@ -300,15 +302,15 @@ func TestRoundTrip(t *testing.T) { data := []byte("some file contents") var b bytes.Buffer - tw := NewWriter(&b) - hdr := &Header{ + tw := realtar.NewWriter(&b) + hdr := &realtar.Header{ Name: "file.txt", Uid: 1 << 21, // Too big for 8 octal digits Size: int64(len(data)), ModTime: time.Now().Round(time.Second), PAXRecords: map[string]string{"uid": "2097152"}, - Format: FormatPAX, - Typeflag: TypeReg, + Format: realtar.FormatPAX, + Typeflag: realtar.TypeReg, } if err := tw.WriteHeader(hdr); err != nil { t.Fatalf("tw.WriteHeader: %v", err) @@ -326,8 +328,21 @@ func TestRoundTrip(t *testing.T) { if err != nil { t.Fatalf("tr.Next: %v", err) } - if !reflect.DeepEqual(rHdr, hdr) { - t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr) + + // HACK let's marshal this from ours to realtar to another + buf := bytes.NewBuffer(nil) + enc := gob.NewEncoder(buf) + if err := enc.Encode(hdr); err != nil { + t.Fatalf("failed to encode header: %s", err) + } + dec := gob.NewDecoder(buf) + var nHdr Header + if err := dec.Decode(&nHdr); err != nil { + t.Fatalf("failed to encode header: %s", err) + } + + if !reflect.DeepEqual(rHdr, &nHdr) { + t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, &nHdr) } rData, err := ioutil.ReadAll(tr) if err != nil { @@ -765,7 +780,7 @@ func TestHeaderAllowedFormats(t *testing.T) { func Benchmark(b *testing.B) { type file struct { - hdr *Header + hdr *realtar.Header body []byte } @@ -775,28 +790,28 @@ func Benchmark(b *testing.B) { }{{ "USTAR", []file{{ - &Header{Name: "bar", Mode: 0640, Size: int64(3)}, + &realtar.Header{Name: "bar", Mode: 0640, Size: int64(3)}, []byte("foo"), }, { - &Header{Name: "world", Mode: 0640, Size: int64(5)}, + &realtar.Header{Name: "world", Mode: 0640, Size: int64(5)}, []byte("hello"), }}, }, { "GNU", []file{{ - &Header{Name: "bar", Mode: 0640, Size: int64(3), Devmajor: -1}, + &realtar.Header{Name: "bar", Mode: 0640, Size: int64(3), Devmajor: -1}, []byte("foo"), }, { - &Header{Name: "world", Mode: 0640, Size: int64(5), Devmajor: -1}, + &realtar.Header{Name: "world", Mode: 0640, Size: int64(5), Devmajor: -1}, []byte("hello"), }}, }, { "PAX", []file{{ - &Header{Name: "bar", Mode: 0640, Size: int64(3), Xattrs: map[string]string{"foo": "bar"}}, + &realtar.Header{Name: "bar", Mode: 0640, Size: int64(3), Xattrs: map[string]string{"foo": "bar"}}, []byte("foo"), }, { - &Header{Name: "world", Mode: 0640, Size: int64(5), Xattrs: map[string]string{"foo": "bar"}}, + &realtar.Header{Name: "world", Mode: 0640, Size: int64(5), Xattrs: map[string]string{"foo": "bar"}}, []byte("hello"), }}, }} @@ -808,7 +823,7 @@ func Benchmark(b *testing.B) { for i := 0; i < b.N; i++ { // Writing to ioutil.Discard because we want to // test purely the writer code and not bring in disk performance into this. - tw := NewWriter(ioutil.Discard) + tw := realtar.NewWriter(ioutil.Discard) for _, file := range v.files { if err := tw.WriteHeader(file.hdr); err != nil { b.Errorf("unexpected WriteHeader error: %v", err) @@ -831,7 +846,7 @@ func Benchmark(b *testing.B) { var r bytes.Reader // Write the archive to a byte buffer. - tw := NewWriter(&buf) + tw := realtar.NewWriter(&buf) for _, file := range v.files { _ = tw.WriteHeader(file.hdr) _, _ = tw.Write(file.body) diff --git a/archive/tar/writer.go b/archive/tar/writer.go deleted file mode 100644 index e80498d..0000000 --- a/archive/tar/writer.go +++ /dev/null @@ -1,653 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import ( - "fmt" - "io" - "path" - "sort" - "strings" - "time" -) - -// Writer provides sequential writing of a tar archive. -// Write.WriteHeader begins a new file with the provided Header, -// and then Writer can be treated as an io.Writer to supply that file's data. -type Writer struct { - w io.Writer - pad int64 // Amount of padding to write after current file entry - curr fileWriter // Writer for current file entry - hdr Header // Shallow copy of Header that is safe for mutations - blk block // Buffer to use as temporary local storage - - // err is a persistent error. - // It is only the responsibility of every exported method of Writer to - // ensure that this error is sticky. - err error -} - -// NewWriter creates a new Writer writing to w. -func NewWriter(w io.Writer) *Writer { - return &Writer{w: w, curr: ®FileWriter{w, 0}} -} - -type fileWriter interface { - io.Writer - fileState - - ReadFrom(io.Reader) (int64, error) -} - -// Flush finishes writing the current file's block padding. -// The current file must be fully written before Flush can be called. -// -// This is unnecessary as the next call to WriteHeader or Close -// will implicitly flush out the file's padding. -func (tw *Writer) Flush() error { - if tw.err != nil { - return tw.err - } - if nb := tw.curr.LogicalRemaining(); nb > 0 { - return fmt.Errorf("archive/tar: missed writing %d bytes", nb) - } - if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil { - return tw.err - } - tw.pad = 0 - return nil -} - -// WriteHeader writes hdr and prepares to accept the file's contents. -// The Header.Size determines how many bytes can be written for the next file. -// If the current file is not fully written, then this returns an error. -// This implicitly flushes any padding necessary before writing the header. -func (tw *Writer) WriteHeader(hdr *Header) error { - if err := tw.Flush(); err != nil { - return err - } - tw.hdr = *hdr // Shallow copy of Header - - // Avoid usage of the legacy TypeRegA flag, and automatically promote - // it to use TypeReg or TypeDir. - if tw.hdr.Typeflag == TypeRegA { - if strings.HasSuffix(tw.hdr.Name, "/") { - tw.hdr.Typeflag = TypeDir - } else { - tw.hdr.Typeflag = TypeReg - } - } - - // Round ModTime and ignore AccessTime and ChangeTime unless - // the format is explicitly chosen. - // This ensures nominal usage of WriteHeader (without specifying the format) - // does not always result in the PAX format being chosen, which - // causes a 1KiB increase to every header. - if tw.hdr.Format == FormatUnknown { - tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second) - tw.hdr.AccessTime = time.Time{} - tw.hdr.ChangeTime = time.Time{} - } - - allowedFormats, paxHdrs, err := tw.hdr.allowedFormats() - switch { - case allowedFormats.has(FormatUSTAR): - tw.err = tw.writeUSTARHeader(&tw.hdr) - return tw.err - case allowedFormats.has(FormatPAX): - tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs) - return tw.err - case allowedFormats.has(FormatGNU): - tw.err = tw.writeGNUHeader(&tw.hdr) - return tw.err - default: - return err // Non-fatal error - } -} - -func (tw *Writer) writeUSTARHeader(hdr *Header) error { - // Check if we can use USTAR prefix/suffix splitting. - var namePrefix string - if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok { - namePrefix, hdr.Name = prefix, suffix - } - - // Pack the main header. - var f formatter - blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal) - f.formatString(blk.USTAR().Prefix(), namePrefix) - blk.SetFormat(FormatUSTAR) - if f.err != nil { - return f.err // Should never happen since header is validated - } - return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag) -} - -func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { - realName, realSize := hdr.Name, hdr.Size - - // TODO(dsnet): Re-enable this when adding sparse support. - // See https://golang.org/issue/22735 - /* - // Handle sparse files. - var spd sparseDatas - var spb []byte - if len(hdr.SparseHoles) > 0 { - sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map - sph = alignSparseEntries(sph, hdr.Size) - spd = invertSparseEntries(sph, hdr.Size) - - // Format the sparse map. - hdr.Size = 0 // Replace with encoded size - spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n') - for _, s := range spd { - hdr.Size += s.Length - spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n') - spb = append(strconv.AppendInt(spb, s.Length, 10), '\n') - } - pad := blockPadding(int64(len(spb))) - spb = append(spb, zeroBlock[:pad]...) - hdr.Size += int64(len(spb)) // Accounts for encoded sparse map - - // Add and modify appropriate PAX records. - dir, file := path.Split(realName) - hdr.Name = path.Join(dir, "GNUSparseFile.0", file) - paxHdrs[paxGNUSparseMajor] = "1" - paxHdrs[paxGNUSparseMinor] = "0" - paxHdrs[paxGNUSparseName] = realName - paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10) - paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10) - delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName - } - */ - _ = realSize - - // Write PAX records to the output. - isGlobal := hdr.Typeflag == TypeXGlobalHeader - if len(paxHdrs) > 0 || isGlobal { - // Sort keys for deterministic ordering. - var keys []string - for k := range paxHdrs { - keys = append(keys, k) - } - sort.Strings(keys) - - // Write each record to a buffer. - var buf strings.Builder - for _, k := range keys { - rec, err := formatPAXRecord(k, paxHdrs[k]) - if err != nil { - return err - } - buf.WriteString(rec) - } - - // Write the extended header file. - var name string - var flag byte - if isGlobal { - name = realName - if name == "" { - name = "GlobalHead.0.0" - } - flag = TypeXGlobalHeader - } else { - dir, file := path.Split(realName) - name = path.Join(dir, "PaxHeaders.0", file) - flag = TypeXHeader - } - data := buf.String() - if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { - return err // Global headers return here - } - } - - // Pack the main header. - var f formatter // Ignore errors since they are expected - fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) } - blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal) - blk.SetFormat(FormatPAX) - if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { - return err - } - - // TODO(dsnet): Re-enable this when adding sparse support. - // See https://golang.org/issue/22735 - /* - // Write the sparse map and setup the sparse writer if necessary. - if len(spd) > 0 { - // Use tw.curr since the sparse map is accounted for in hdr.Size. - if _, err := tw.curr.Write(spb); err != nil { - return err - } - tw.curr = &sparseFileWriter{tw.curr, spd, 0} - } - */ - return nil -} - -func (tw *Writer) writeGNUHeader(hdr *Header) error { - // Use long-link files if Name or Linkname exceeds the field size. - const longName = "././@LongLink" - if len(hdr.Name) > nameSize { - data := hdr.Name + "\x00" - if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil { - return err - } - } - if len(hdr.Linkname) > nameSize { - data := hdr.Linkname + "\x00" - if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil { - return err - } - } - - // Pack the main header. - var f formatter // Ignore errors since they are expected - var spd sparseDatas - var spb []byte - blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric) - if !hdr.AccessTime.IsZero() { - f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix()) - } - if !hdr.ChangeTime.IsZero() { - f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix()) - } - // TODO(dsnet): Re-enable this when adding sparse support. - // See https://golang.org/issue/22735 - /* - if hdr.Typeflag == TypeGNUSparse { - sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map - sph = alignSparseEntries(sph, hdr.Size) - spd = invertSparseEntries(sph, hdr.Size) - - // Format the sparse map. - formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas { - for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ { - f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset) - f.formatNumeric(sa.Entry(i).Length(), sp[0].Length) - sp = sp[1:] - } - if len(sp) > 0 { - sa.IsExtended()[0] = 1 - } - return sp - } - sp2 := formatSPD(spd, blk.GNU().Sparse()) - for len(sp2) > 0 { - var spHdr block - sp2 = formatSPD(sp2, spHdr.Sparse()) - spb = append(spb, spHdr[:]...) - } - - // Update size fields in the header block. - realSize := hdr.Size - hdr.Size = 0 // Encoded size; does not account for encoded sparse map - for _, s := range spd { - hdr.Size += s.Length - } - copy(blk.V7().Size(), zeroBlock[:]) // Reset field - f.formatNumeric(blk.V7().Size(), hdr.Size) - f.formatNumeric(blk.GNU().RealSize(), realSize) - } - */ - blk.SetFormat(FormatGNU) - if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { - return err - } - - // Write the extended sparse map and setup the sparse writer if necessary. - if len(spd) > 0 { - // Use tw.w since the sparse map is not accounted for in hdr.Size. - if _, err := tw.w.Write(spb); err != nil { - return err - } - tw.curr = &sparseFileWriter{tw.curr, spd, 0} - } - return nil -} - -type ( - stringFormatter func([]byte, string) - numberFormatter func([]byte, int64) -) - -// templateV7Plus fills out the V7 fields of a block using values from hdr. -// It also fills out fields (uname, gname, devmajor, devminor) that are -// shared in the USTAR, PAX, and GNU formats using the provided formatters. -// -// The block returned is only valid until the next call to -// templateV7Plus or writeRawFile. -func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block { - tw.blk.Reset() - - modTime := hdr.ModTime - if modTime.IsZero() { - modTime = time.Unix(0, 0) - } - - v7 := tw.blk.V7() - v7.TypeFlag()[0] = hdr.Typeflag - fmtStr(v7.Name(), hdr.Name) - fmtStr(v7.LinkName(), hdr.Linkname) - fmtNum(v7.Mode(), hdr.Mode) - fmtNum(v7.UID(), int64(hdr.Uid)) - fmtNum(v7.GID(), int64(hdr.Gid)) - fmtNum(v7.Size(), hdr.Size) - fmtNum(v7.ModTime(), modTime.Unix()) - - ustar := tw.blk.USTAR() - fmtStr(ustar.UserName(), hdr.Uname) - fmtStr(ustar.GroupName(), hdr.Gname) - fmtNum(ustar.DevMajor(), hdr.Devmajor) - fmtNum(ustar.DevMinor(), hdr.Devminor) - - return &tw.blk -} - -// writeRawFile writes a minimal file with the given name and flag type. -// It uses format to encode the header format and will write data as the body. -// It uses default values for all of the other fields (as BSD and GNU tar does). -func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error { - tw.blk.Reset() - - // Best effort for the filename. - name = toASCII(name) - if len(name) > nameSize { - name = name[:nameSize] - } - name = strings.TrimRight(name, "/") - - var f formatter - v7 := tw.blk.V7() - v7.TypeFlag()[0] = flag - f.formatString(v7.Name(), name) - f.formatOctal(v7.Mode(), 0) - f.formatOctal(v7.UID(), 0) - f.formatOctal(v7.GID(), 0) - f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB - f.formatOctal(v7.ModTime(), 0) - tw.blk.SetFormat(format) - if f.err != nil { - return f.err // Only occurs if size condition is violated - } - - // Write the header and data. - if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil { - return err - } - _, err := io.WriteString(tw, data) - return err -} - -// writeRawHeader writes the value of blk, regardless of its value. -// It sets up the Writer such that it can accept a file of the given size. -// If the flag is a special header-only flag, then the size is treated as zero. -func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error { - if err := tw.Flush(); err != nil { - return err - } - if _, err := tw.w.Write(blk[:]); err != nil { - return err - } - if isHeaderOnlyType(flag) { - size = 0 - } - tw.curr = ®FileWriter{tw.w, size} - tw.pad = blockPadding(size) - return nil -} - -// splitUSTARPath splits a path according to USTAR prefix and suffix rules. -// If the path is not splittable, then it will return ("", "", false). -func splitUSTARPath(name string) (prefix, suffix string, ok bool) { - length := len(name) - if length <= nameSize || !isASCII(name) { - return "", "", false - } else if length > prefixSize+1 { - length = prefixSize + 1 - } else if name[length-1] == '/' { - length-- - } - - i := strings.LastIndex(name[:length], "/") - nlen := len(name) - i - 1 // nlen is length of suffix - plen := i // plen is length of prefix - if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize { - return "", "", false - } - return name[:i], name[i+1:], true -} - -// Write writes to the current file in the tar archive. -// Write returns the error ErrWriteTooLong if more than -// Header.Size bytes are written after WriteHeader. -// -// Calling Write on special types like TypeLink, TypeSymlink, TypeChar, -// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless -// of what the Header.Size claims. -func (tw *Writer) Write(b []byte) (int, error) { - if tw.err != nil { - return 0, tw.err - } - n, err := tw.curr.Write(b) - if err != nil && err != ErrWriteTooLong { - tw.err = err - } - return n, err -} - -// readFrom populates the content of the current file by reading from r. -// The bytes read must match the number of remaining bytes in the current file. -// -// If the current file is sparse and r is an io.ReadSeeker, -// then readFrom uses Seek to skip past holes defined in Header.SparseHoles, -// assuming that skipped regions are all NULs. -// This always reads the last byte to ensure r is the right size. -// -// TODO(dsnet): Re-export this when adding sparse file support. -// See https://golang.org/issue/22735 -func (tw *Writer) readFrom(r io.Reader) (int64, error) { - if tw.err != nil { - return 0, tw.err - } - n, err := tw.curr.ReadFrom(r) - if err != nil && err != ErrWriteTooLong { - tw.err = err - } - return n, err -} - -// Close closes the tar archive by flushing the padding, and writing the footer. -// If the current file (from a prior call to WriteHeader) is not fully written, -// then this returns an error. -func (tw *Writer) Close() error { - if tw.err == ErrWriteAfterClose { - return nil - } - if tw.err != nil { - return tw.err - } - - // Trailer: two zero blocks. - err := tw.Flush() - for i := 0; i < 2 && err == nil; i++ { - _, err = tw.w.Write(zeroBlock[:]) - } - - // Ensure all future actions are invalid. - tw.err = ErrWriteAfterClose - return err // Report IO errors -} - -// regFileWriter is a fileWriter for writing data to a regular file entry. -type regFileWriter struct { - w io.Writer // Underlying Writer - nb int64 // Number of remaining bytes to write -} - -func (fw *regFileWriter) Write(b []byte) (n int, err error) { - overwrite := int64(len(b)) > fw.nb - if overwrite { - b = b[:fw.nb] - } - if len(b) > 0 { - n, err = fw.w.Write(b) - fw.nb -= int64(n) - } - switch { - case err != nil: - return n, err - case overwrite: - return n, ErrWriteTooLong - default: - return n, nil - } -} - -func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) { - return io.Copy(struct{ io.Writer }{fw}, r) -} - -func (fw regFileWriter) LogicalRemaining() int64 { - return fw.nb -} -func (fw regFileWriter) PhysicalRemaining() int64 { - return fw.nb -} - -// sparseFileWriter is a fileWriter for writing data to a sparse file entry. -type sparseFileWriter struct { - fw fileWriter // Underlying fileWriter - sp sparseDatas // Normalized list of data fragments - pos int64 // Current position in sparse file -} - -func (sw *sparseFileWriter) Write(b []byte) (n int, err error) { - overwrite := int64(len(b)) > sw.LogicalRemaining() - if overwrite { - b = b[:sw.LogicalRemaining()] - } - - b0 := b - endPos := sw.pos + int64(len(b)) - for endPos > sw.pos && err == nil { - var nf int // Bytes written in fragment - dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() - if sw.pos < dataStart { // In a hole fragment - bf := b[:min(int64(len(b)), dataStart-sw.pos)] - nf, err = zeroWriter{}.Write(bf) - } else { // In a data fragment - bf := b[:min(int64(len(b)), dataEnd-sw.pos)] - nf, err = sw.fw.Write(bf) - } - b = b[nf:] - sw.pos += int64(nf) - if sw.pos >= dataEnd && len(sw.sp) > 1 { - sw.sp = sw.sp[1:] // Ensure last fragment always remains - } - } - - n = len(b0) - len(b) - switch { - case err == ErrWriteTooLong: - return n, errMissData // Not possible; implies bug in validation logic - case err != nil: - return n, err - case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: - return n, errUnrefData // Not possible; implies bug in validation logic - case overwrite: - return n, ErrWriteTooLong - default: - return n, nil - } -} - -func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) { - rs, ok := r.(io.ReadSeeker) - if ok { - if _, err := rs.Seek(0, io.SeekCurrent); err != nil { - ok = false // Not all io.Seeker can really seek - } - } - if !ok { - return io.Copy(struct{ io.Writer }{sw}, r) - } - - var readLastByte bool - pos0 := sw.pos - for sw.LogicalRemaining() > 0 && !readLastByte && err == nil { - var nf int64 // Size of fragment - dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() - if sw.pos < dataStart { // In a hole fragment - nf = dataStart - sw.pos - if sw.PhysicalRemaining() == 0 { - readLastByte = true - nf-- - } - _, err = rs.Seek(nf, io.SeekCurrent) - } else { // In a data fragment - nf = dataEnd - sw.pos - nf, err = io.CopyN(sw.fw, rs, nf) - } - sw.pos += nf - if sw.pos >= dataEnd && len(sw.sp) > 1 { - sw.sp = sw.sp[1:] // Ensure last fragment always remains - } - } - - // If the last fragment is a hole, then seek to 1-byte before EOF, and - // read a single byte to ensure the file is the right size. - if readLastByte && err == nil { - _, err = mustReadFull(rs, []byte{0}) - sw.pos++ - } - - n = sw.pos - pos0 - switch { - case err == io.EOF: - return n, io.ErrUnexpectedEOF - case err == ErrWriteTooLong: - return n, errMissData // Not possible; implies bug in validation logic - case err != nil: - return n, err - case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: - return n, errUnrefData // Not possible; implies bug in validation logic - default: - return n, ensureEOF(rs) - } -} - -func (sw sparseFileWriter) LogicalRemaining() int64 { - return sw.sp[len(sw.sp)-1].endOffset() - sw.pos -} -func (sw sparseFileWriter) PhysicalRemaining() int64 { - return sw.fw.PhysicalRemaining() -} - -// zeroWriter may only be written with NULs, otherwise it returns errWriteHole. -type zeroWriter struct{} - -func (zeroWriter) Write(b []byte) (int, error) { - for i, c := range b { - if c != 0 { - return i, errWriteHole - } - } - return len(b), nil -} - -// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so. -func ensureEOF(r io.Reader) error { - n, err := tryReadFull(r, []byte{0}) - switch { - case n > 0: - return ErrWriteTooLong - case err == io.EOF: - return nil - default: - return err - } -} diff --git a/archive/tar/writer_test.go b/archive/tar/writer_test.go deleted file mode 100644 index 30556d2..0000000 --- a/archive/tar/writer_test.go +++ /dev/null @@ -1,1311 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -import ( - "bytes" - "encoding/hex" - "errors" - "io" - "io/ioutil" - "os" - "path" - "reflect" - "sort" - "strings" - "testing" - "testing/iotest" - "time" -) - -func bytediff(a, b []byte) string { - const ( - uniqueA = "- " - uniqueB = "+ " - identity = " " - ) - var ss []string - sa := strings.Split(strings.TrimSpace(hex.Dump(a)), "\n") - sb := strings.Split(strings.TrimSpace(hex.Dump(b)), "\n") - for len(sa) > 0 && len(sb) > 0 { - if sa[0] == sb[0] { - ss = append(ss, identity+sa[0]) - } else { - ss = append(ss, uniqueA+sa[0]) - ss = append(ss, uniqueB+sb[0]) - } - sa, sb = sa[1:], sb[1:] - } - for len(sa) > 0 { - ss = append(ss, uniqueA+sa[0]) - sa = sa[1:] - } - for len(sb) > 0 { - ss = append(ss, uniqueB+sb[0]) - sb = sb[1:] - } - return strings.Join(ss, "\n") -} - -func TestWriter(t *testing.T) { - type ( - testHeader struct { // WriteHeader(hdr) == wantErr - hdr Header - wantErr error - } - testWrite struct { // Write(str) == (wantCnt, wantErr) - str string - wantCnt int - wantErr error - } - testReadFrom struct { // ReadFrom(testFile{ops}) == (wantCnt, wantErr) - ops fileOps - wantCnt int64 - wantErr error - } - testClose struct { // Close() == wantErr - wantErr error - } - testFnc interface{} // testHeader | testWrite | testReadFrom | testClose - ) - - vectors := []struct { - file string // Optional filename of expected output - tests []testFnc - }{{ - // The writer test file was produced with this command: - // tar (GNU tar) 1.26 - // ln -s small.txt link.txt - // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt - file: "testdata/writer.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: "small.txt", - Size: 5, - Mode: 0640, - Uid: 73025, - Gid: 5000, - Uname: "dsymonds", - Gname: "eng", - ModTime: time.Unix(1246508266, 0), - }, nil}, - testWrite{"Kilts", 5, nil}, - - testHeader{Header{ - Typeflag: TypeReg, - Name: "small2.txt", - Size: 11, - Mode: 0640, - Uid: 73025, - Uname: "dsymonds", - Gname: "eng", - Gid: 5000, - ModTime: time.Unix(1245217492, 0), - }, nil}, - testWrite{"Google.com\n", 11, nil}, - - testHeader{Header{ - Typeflag: TypeSymlink, - Name: "link.txt", - Linkname: "small.txt", - Mode: 0777, - Uid: 1000, - Gid: 1000, - Uname: "strings", - Gname: "strings", - ModTime: time.Unix(1314603082, 0), - }, nil}, - testWrite{"", 0, nil}, - - testClose{nil}, - }, - }, { - // The truncated test file was produced using these commands: - // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt - // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar - file: "testdata/writer-big.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: "tmp/16gig.txt", - Size: 16 << 30, - Mode: 0640, - Uid: 73025, - Gid: 5000, - Uname: "dsymonds", - Gname: "eng", - ModTime: time.Unix(1254699560, 0), - Format: FormatGNU, - }, nil}, - }, - }, { - // This truncated file was produced using this library. - // It was verified to work with GNU tar 1.27.1 and BSD tar 3.1.2. - // dd if=/dev/zero bs=1G count=16 >> writer-big-long.tar - // gnutar -xvf writer-big-long.tar - // bsdtar -xvf writer-big-long.tar - // - // This file is in PAX format. - file: "testdata/writer-big-long.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: strings.Repeat("longname/", 15) + "16gig.txt", - Size: 16 << 30, - Mode: 0644, - Uid: 1000, - Gid: 1000, - Uname: "guillaume", - Gname: "guillaume", - ModTime: time.Unix(1399583047, 0), - }, nil}, - }, - }, { - // This file was produced using GNU tar v1.17. - // gnutar -b 4 --format=ustar (longname/)*15 + file.txt - file: "testdata/ustar.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: strings.Repeat("longname/", 15) + "file.txt", - Size: 6, - Mode: 0644, - Uid: 501, - Gid: 20, - Uname: "shane", - Gname: "staff", - ModTime: time.Unix(1360135598, 0), - }, nil}, - testWrite{"hello\n", 6, nil}, - testClose{nil}, - }, - }, { - // This file was produced using GNU tar v1.26: - // echo "Slartibartfast" > file.txt - // ln file.txt hard.txt - // tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt - file: "testdata/hardlink.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: "file.txt", - Size: 15, - Mode: 0644, - Uid: 1000, - Gid: 100, - Uname: "vbatts", - Gname: "users", - ModTime: time.Unix(1425484303, 0), - }, nil}, - testWrite{"Slartibartfast\n", 15, nil}, - - testHeader{Header{ - Typeflag: TypeLink, - Name: "hard.txt", - Linkname: "file.txt", - Mode: 0644, - Uid: 1000, - Gid: 100, - Uname: "vbatts", - Gname: "users", - ModTime: time.Unix(1425484303, 0), - }, nil}, - testWrite{"", 0, nil}, - - testClose{nil}, - }, - }, { - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: "bad-null.txt", - Xattrs: map[string]string{"null\x00null\x00": "fizzbuzz"}, - }, headerError{}}, - }, - }, { - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: "null\x00.txt", - }, headerError{}}, - }, - }, { - file: "testdata/pax-records.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: "file", - Uname: strings.Repeat("long", 10), - PAXRecords: map[string]string{ - "path": "FILE", // Should be ignored - "GNU.sparse.map": "0,0", // Should be ignored - "comment": "Hello, 世界", - "GOLANG.pkg": "tar", - }, - }, nil}, - testClose{nil}, - }, - }, { - // Craft a theoretically valid PAX archive with global headers. - // The GNU and BSD tar tools do not parse these the same way. - // - // BSD tar v3.1.2 parses and ignores all global headers; - // the behavior is verified by researching the source code. - // - // $ bsdtar -tvf pax-global-records.tar - // ---------- 0 0 0 0 Dec 31 1969 file1 - // ---------- 0 0 0 0 Dec 31 1969 file2 - // ---------- 0 0 0 0 Dec 31 1969 file3 - // ---------- 0 0 0 0 May 13 2014 file4 - // - // GNU tar v1.27.1 applies global headers to subsequent records, - // but does not do the following properly: - // * It does not treat an empty record as deletion. - // * It does not use subsequent global headers to update previous ones. - // - // $ gnutar -tvf pax-global-records.tar - // ---------- 0/0 0 2017-07-13 19:40 global1 - // ---------- 0/0 0 2017-07-13 19:40 file2 - // gnutar: Substituting `.' for empty member name - // ---------- 0/0 0 1969-12-31 16:00 - // gnutar: Substituting `.' for empty member name - // ---------- 0/0 0 2014-05-13 09:53 - // - // According to the PAX specification, this should have been the result: - // ---------- 0/0 0 2017-07-13 19:40 global1 - // ---------- 0/0 0 2017-07-13 19:40 file2 - // ---------- 0/0 0 2017-07-13 19:40 file3 - // ---------- 0/0 0 2014-05-13 09:53 file4 - file: "testdata/pax-global-records.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeXGlobalHeader, - PAXRecords: map[string]string{"path": "global1", "mtime": "1500000000.0"}, - }, nil}, - testHeader{Header{ - Typeflag: TypeReg, Name: "file1", - }, nil}, - testHeader{Header{ - Typeflag: TypeReg, - Name: "file2", - PAXRecords: map[string]string{"path": "file2"}, - }, nil}, - testHeader{Header{ - Typeflag: TypeXGlobalHeader, - PAXRecords: map[string]string{"path": ""}, // Should delete "path", but keep "mtime" - }, nil}, - testHeader{Header{ - Typeflag: TypeReg, Name: "file3", - }, nil}, - testHeader{Header{ - Typeflag: TypeReg, - Name: "file4", - ModTime: time.Unix(1400000000, 0), - PAXRecords: map[string]string{"mtime": "1400000000"}, - }, nil}, - testClose{nil}, - }, - }, { - file: "testdata/gnu-utf8.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: "☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹", - Mode: 0644, - Uid: 1000, Gid: 1000, - Uname: "☺", - Gname: "⚹", - ModTime: time.Unix(0, 0), - Format: FormatGNU, - }, nil}, - testClose{nil}, - }, - }, { - file: "testdata/gnu-not-utf8.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: "hi\x80\x81\x82\x83bye", - Mode: 0644, - Uid: 1000, - Gid: 1000, - Uname: "rawr", - Gname: "dsnet", - ModTime: time.Unix(0, 0), - Format: FormatGNU, - }, nil}, - testClose{nil}, - }, - // TODO(dsnet): Re-enable this test when adding sparse support. - // See https://golang.org/issue/22735 - /* - }, { - file: "testdata/gnu-nil-sparse-data.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeGNUSparse, - Name: "sparse.db", - Size: 1000, - SparseHoles: []sparseEntry{{Offset: 1000, Length: 0}}, - }, nil}, - testWrite{strings.Repeat("0123456789", 100), 1000, nil}, - testClose{}, - }, - }, { - file: "testdata/gnu-nil-sparse-hole.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeGNUSparse, - Name: "sparse.db", - Size: 1000, - SparseHoles: []sparseEntry{{Offset: 0, Length: 1000}}, - }, nil}, - testWrite{strings.Repeat("\x00", 1000), 1000, nil}, - testClose{}, - }, - }, { - file: "testdata/pax-nil-sparse-data.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: "sparse.db", - Size: 1000, - SparseHoles: []sparseEntry{{Offset: 1000, Length: 0}}, - }, nil}, - testWrite{strings.Repeat("0123456789", 100), 1000, nil}, - testClose{}, - }, - }, { - file: "testdata/pax-nil-sparse-hole.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: "sparse.db", - Size: 1000, - SparseHoles: []sparseEntry{{Offset: 0, Length: 1000}}, - }, nil}, - testWrite{strings.Repeat("\x00", 1000), 1000, nil}, - testClose{}, - }, - }, { - file: "testdata/gnu-sparse-big.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeGNUSparse, - Name: "gnu-sparse", - Size: 6e10, - SparseHoles: []sparseEntry{ - {Offset: 0e10, Length: 1e10 - 100}, - {Offset: 1e10, Length: 1e10 - 100}, - {Offset: 2e10, Length: 1e10 - 100}, - {Offset: 3e10, Length: 1e10 - 100}, - {Offset: 4e10, Length: 1e10 - 100}, - {Offset: 5e10, Length: 1e10 - 100}, - }, - }, nil}, - testReadFrom{fileOps{ - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - }, 6e10, nil}, - testClose{nil}, - }, - }, { - file: "testdata/pax-sparse-big.tar", - tests: []testFnc{ - testHeader{Header{ - Typeflag: TypeReg, - Name: "pax-sparse", - Size: 6e10, - SparseHoles: []sparseEntry{ - {Offset: 0e10, Length: 1e10 - 100}, - {Offset: 1e10, Length: 1e10 - 100}, - {Offset: 2e10, Length: 1e10 - 100}, - {Offset: 3e10, Length: 1e10 - 100}, - {Offset: 4e10, Length: 1e10 - 100}, - {Offset: 5e10, Length: 1e10 - 100}, - }, - }, nil}, - testReadFrom{fileOps{ - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - int64(1e10 - blockSize), - strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), - }, 6e10, nil}, - testClose{nil}, - }, - */ - }, { - file: "testdata/trailing-slash.tar", - tests: []testFnc{ - testHeader{Header{Name: strings.Repeat("123456789/", 30)}, nil}, - testClose{nil}, - }, - }, { - // Automatically promote zero value of Typeflag depending on the name. - file: "testdata/file-and-dir.tar", - tests: []testFnc{ - testHeader{Header{Name: "small.txt", Size: 5}, nil}, - testWrite{"Kilts", 5, nil}, - testHeader{Header{Name: "dir/"}, nil}, - testClose{nil}, - }, - }} - - equalError := func(x, y error) bool { - _, ok1 := x.(headerError) - _, ok2 := y.(headerError) - if ok1 || ok2 { - return ok1 && ok2 - } - return x == y - } - for _, v := range vectors { - t.Run(path.Base(v.file), func(t *testing.T) { - const maxSize = 10 << 10 // 10KiB - buf := new(bytes.Buffer) - tw := NewWriter(iotest.TruncateWriter(buf, maxSize)) - - for i, tf := range v.tests { - switch tf := tf.(type) { - case testHeader: - err := tw.WriteHeader(&tf.hdr) - if !equalError(err, tf.wantErr) { - t.Fatalf("test %d, WriteHeader() = %v, want %v", i, err, tf.wantErr) - } - case testWrite: - got, err := tw.Write([]byte(tf.str)) - if got != tf.wantCnt || !equalError(err, tf.wantErr) { - t.Fatalf("test %d, Write() = (%d, %v), want (%d, %v)", i, got, err, tf.wantCnt, tf.wantErr) - } - case testReadFrom: - f := &testFile{ops: tf.ops} - got, err := tw.readFrom(f) - if _, ok := err.(testError); ok { - t.Errorf("test %d, ReadFrom(): %v", i, err) - } else if got != tf.wantCnt || !equalError(err, tf.wantErr) { - t.Errorf("test %d, ReadFrom() = (%d, %v), want (%d, %v)", i, got, err, tf.wantCnt, tf.wantErr) - } - if len(f.ops) > 0 { - t.Errorf("test %d, expected %d more operations", i, len(f.ops)) - } - case testClose: - err := tw.Close() - if !equalError(err, tf.wantErr) { - t.Fatalf("test %d, Close() = %v, want %v", i, err, tf.wantErr) - } - default: - t.Fatalf("test %d, unknown test operation: %T", i, tf) - } - } - - if v.file != "" { - want, err := ioutil.ReadFile(v.file) - if err != nil { - t.Fatalf("ReadFile() = %v, want nil", err) - } - got := buf.Bytes() - if !bytes.Equal(want, got) { - t.Fatalf("incorrect result: (-got +want)\n%v", bytediff(got, want)) - } - } - }) - } -} - -func TestPax(t *testing.T) { - // Create an archive with a large name - fileinfo, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - hdr, err := FileInfoHeader(fileinfo, "") - if err != nil { - t.Fatalf("os.Stat: %v", err) - } - // Force a PAX long name to be written - longName := strings.Repeat("ab", 100) - contents := strings.Repeat(" ", int(hdr.Size)) - hdr.Name = longName - var buf bytes.Buffer - writer := NewWriter(&buf) - if err := writer.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if _, err = writer.Write([]byte(contents)); err != nil { - t.Fatal(err) - } - if err := writer.Close(); err != nil { - t.Fatal(err) - } - // Simple test to make sure PAX extensions are in effect - if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { - t.Fatal("Expected at least one PAX header to be written.") - } - // Test that we can get a long name back out of the archive. - reader := NewReader(&buf) - hdr, err = reader.Next() - if err != nil { - t.Fatal(err) - } - if hdr.Name != longName { - t.Fatal("Couldn't recover long file name") - } -} - -func TestPaxSymlink(t *testing.T) { - // Create an archive with a large linkname - fileinfo, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - hdr, err := FileInfoHeader(fileinfo, "") - hdr.Typeflag = TypeSymlink - if err != nil { - t.Fatalf("os.Stat:1 %v", err) - } - // Force a PAX long linkname to be written - longLinkname := strings.Repeat("1234567890/1234567890", 10) - hdr.Linkname = longLinkname - - hdr.Size = 0 - var buf bytes.Buffer - writer := NewWriter(&buf) - if err := writer.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if err := writer.Close(); err != nil { - t.Fatal(err) - } - // Simple test to make sure PAX extensions are in effect - if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { - t.Fatal("Expected at least one PAX header to be written.") - } - // Test that we can get a long name back out of the archive. - reader := NewReader(&buf) - hdr, err = reader.Next() - if err != nil { - t.Fatal(err) - } - if hdr.Linkname != longLinkname { - t.Fatal("Couldn't recover long link name") - } -} - -func TestPaxNonAscii(t *testing.T) { - // Create an archive with non ascii. These should trigger a pax header - // because pax headers have a defined utf-8 encoding. - fileinfo, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - - hdr, err := FileInfoHeader(fileinfo, "") - if err != nil { - t.Fatalf("os.Stat:1 %v", err) - } - - // some sample data - chineseFilename := "文件名" - chineseGroupname := "組" - chineseUsername := "用戶名" - - hdr.Name = chineseFilename - hdr.Gname = chineseGroupname - hdr.Uname = chineseUsername - - contents := strings.Repeat(" ", int(hdr.Size)) - - var buf bytes.Buffer - writer := NewWriter(&buf) - if err := writer.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if _, err = writer.Write([]byte(contents)); err != nil { - t.Fatal(err) - } - if err := writer.Close(); err != nil { - t.Fatal(err) - } - // Simple test to make sure PAX extensions are in effect - if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { - t.Fatal("Expected at least one PAX header to be written.") - } - // Test that we can get a long name back out of the archive. - reader := NewReader(&buf) - hdr, err = reader.Next() - if err != nil { - t.Fatal(err) - } - if hdr.Name != chineseFilename { - t.Fatal("Couldn't recover unicode name") - } - if hdr.Gname != chineseGroupname { - t.Fatal("Couldn't recover unicode group") - } - if hdr.Uname != chineseUsername { - t.Fatal("Couldn't recover unicode user") - } -} - -func TestPaxXattrs(t *testing.T) { - xattrs := map[string]string{ - "user.key": "value", - } - - // Create an archive with an xattr - fileinfo, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - hdr, err := FileInfoHeader(fileinfo, "") - if err != nil { - t.Fatalf("os.Stat: %v", err) - } - contents := "Kilts" - hdr.Xattrs = xattrs - var buf bytes.Buffer - writer := NewWriter(&buf) - if err := writer.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if _, err = writer.Write([]byte(contents)); err != nil { - t.Fatal(err) - } - if err := writer.Close(); err != nil { - t.Fatal(err) - } - // Test that we can get the xattrs back out of the archive. - reader := NewReader(&buf) - hdr, err = reader.Next() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(hdr.Xattrs, xattrs) { - t.Fatalf("xattrs did not survive round trip: got %+v, want %+v", - hdr.Xattrs, xattrs) - } -} - -func TestPaxHeadersSorted(t *testing.T) { - fileinfo, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - hdr, err := FileInfoHeader(fileinfo, "") - if err != nil { - t.Fatalf("os.Stat: %v", err) - } - contents := strings.Repeat(" ", int(hdr.Size)) - - hdr.Xattrs = map[string]string{ - "foo": "foo", - "bar": "bar", - "baz": "baz", - "qux": "qux", - } - - var buf bytes.Buffer - writer := NewWriter(&buf) - if err := writer.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if _, err = writer.Write([]byte(contents)); err != nil { - t.Fatal(err) - } - if err := writer.Close(); err != nil { - t.Fatal(err) - } - // Simple test to make sure PAX extensions are in effect - if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { - t.Fatal("Expected at least one PAX header to be written.") - } - - // xattr bar should always appear before others - indices := []int{ - bytes.Index(buf.Bytes(), []byte("bar=bar")), - bytes.Index(buf.Bytes(), []byte("baz=baz")), - bytes.Index(buf.Bytes(), []byte("foo=foo")), - bytes.Index(buf.Bytes(), []byte("qux=qux")), - } - if !sort.IntsAreSorted(indices) { - t.Fatal("PAX headers are not sorted") - } -} - -func TestUSTARLongName(t *testing.T) { - // Create an archive with a path that failed to split with USTAR extension in previous versions. - fileinfo, err := os.Stat("testdata/small.txt") - if err != nil { - t.Fatal(err) - } - hdr, err := FileInfoHeader(fileinfo, "") - hdr.Typeflag = TypeDir - if err != nil { - t.Fatalf("os.Stat:1 %v", err) - } - // Force a PAX long name to be written. The name was taken from a practical example - // that fails and replaced ever char through numbers to anonymize the sample. - longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/" - hdr.Name = longName - - hdr.Size = 0 - var buf bytes.Buffer - writer := NewWriter(&buf) - if err := writer.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if err := writer.Close(); err != nil { - t.Fatal(err) - } - // Test that we can get a long name back out of the archive. - reader := NewReader(&buf) - hdr, err = reader.Next() - if err != nil { - t.Fatal(err) - } - if hdr.Name != longName { - t.Fatal("Couldn't recover long name") - } -} - -func TestValidTypeflagWithPAXHeader(t *testing.T) { - var buffer bytes.Buffer - tw := NewWriter(&buffer) - - fileName := strings.Repeat("ab", 100) - - hdr := &Header{ - Name: fileName, - Size: 4, - Typeflag: 0, - } - if err := tw.WriteHeader(hdr); err != nil { - t.Fatalf("Failed to write header: %s", err) - } - if _, err := tw.Write([]byte("fooo")); err != nil { - t.Fatalf("Failed to write the file's data: %s", err) - } - tw.Close() - - tr := NewReader(&buffer) - - for { - header, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatalf("Failed to read header: %s", err) - } - if header.Typeflag != TypeReg { - t.Fatalf("Typeflag should've been %d, found %d", TypeReg, header.Typeflag) - } - } -} - -// failOnceWriter fails exactly once and then always reports success. -type failOnceWriter bool - -func (w *failOnceWriter) Write(b []byte) (int, error) { - if !*w { - return 0, io.ErrShortWrite - } - *w = true - return len(b), nil -} - -func TestWriterErrors(t *testing.T) { - t.Run("HeaderOnly", func(t *testing.T) { - tw := NewWriter(new(bytes.Buffer)) - hdr := &Header{Name: "dir/", Typeflag: TypeDir} - if err := tw.WriteHeader(hdr); err != nil { - t.Fatalf("WriteHeader() = %v, want nil", err) - } - if _, err := tw.Write([]byte{0x00}); err != ErrWriteTooLong { - t.Fatalf("Write() = %v, want %v", err, ErrWriteTooLong) - } - }) - - t.Run("NegativeSize", func(t *testing.T) { - tw := NewWriter(new(bytes.Buffer)) - hdr := &Header{Name: "small.txt", Size: -1} - if err := tw.WriteHeader(hdr); err == nil { - t.Fatalf("WriteHeader() = nil, want non-nil error") - } - }) - - t.Run("BeforeHeader", func(t *testing.T) { - tw := NewWriter(new(bytes.Buffer)) - if _, err := tw.Write([]byte("Kilts")); err != ErrWriteTooLong { - t.Fatalf("Write() = %v, want %v", err, ErrWriteTooLong) - } - }) - - t.Run("AfterClose", func(t *testing.T) { - tw := NewWriter(new(bytes.Buffer)) - hdr := &Header{Name: "small.txt"} - if err := tw.WriteHeader(hdr); err != nil { - t.Fatalf("WriteHeader() = %v, want nil", err) - } - if err := tw.Close(); err != nil { - t.Fatalf("Close() = %v, want nil", err) - } - if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose { - t.Fatalf("Write() = %v, want %v", err, ErrWriteAfterClose) - } - if err := tw.Flush(); err != ErrWriteAfterClose { - t.Fatalf("Flush() = %v, want %v", err, ErrWriteAfterClose) - } - if err := tw.Close(); err != nil { - t.Fatalf("Close() = %v, want nil", err) - } - }) - - t.Run("PrematureFlush", func(t *testing.T) { - tw := NewWriter(new(bytes.Buffer)) - hdr := &Header{Name: "small.txt", Size: 5} - if err := tw.WriteHeader(hdr); err != nil { - t.Fatalf("WriteHeader() = %v, want nil", err) - } - if err := tw.Flush(); err == nil { - t.Fatalf("Flush() = %v, want non-nil error", err) - } - }) - - t.Run("PrematureClose", func(t *testing.T) { - tw := NewWriter(new(bytes.Buffer)) - hdr := &Header{Name: "small.txt", Size: 5} - if err := tw.WriteHeader(hdr); err != nil { - t.Fatalf("WriteHeader() = %v, want nil", err) - } - if err := tw.Close(); err == nil { - t.Fatalf("Close() = %v, want non-nil error", err) - } - }) - - t.Run("Persistence", func(t *testing.T) { - tw := NewWriter(new(failOnceWriter)) - if err := tw.WriteHeader(&Header{}); err != io.ErrShortWrite { - t.Fatalf("WriteHeader() = %v, want %v", err, io.ErrShortWrite) - } - if err := tw.WriteHeader(&Header{Name: "small.txt"}); err == nil { - t.Errorf("WriteHeader() = got %v, want non-nil error", err) - } - if _, err := tw.Write(nil); err == nil { - t.Errorf("Write() = %v, want non-nil error", err) - } - if err := tw.Flush(); err == nil { - t.Errorf("Flush() = %v, want non-nil error", err) - } - if err := tw.Close(); err == nil { - t.Errorf("Close() = %v, want non-nil error", err) - } - }) -} - -func TestSplitUSTARPath(t *testing.T) { - sr := strings.Repeat - - vectors := []struct { - input string // Input path - prefix string // Expected output prefix - suffix string // Expected output suffix - ok bool // Split success? - }{ - {"", "", "", false}, - {"abc", "", "", false}, - {"用戶名", "", "", false}, - {sr("a", nameSize), "", "", false}, - {sr("a", nameSize) + "/", "", "", false}, - {sr("a", nameSize) + "/a", sr("a", nameSize), "a", true}, - {sr("a", prefixSize) + "/", "", "", false}, - {sr("a", prefixSize) + "/a", sr("a", prefixSize), "a", true}, - {sr("a", nameSize+1), "", "", false}, - {sr("/", nameSize+1), sr("/", nameSize-1), "/", true}, - {sr("a", prefixSize) + "/" + sr("b", nameSize), - sr("a", prefixSize), sr("b", nameSize), true}, - {sr("a", prefixSize) + "//" + sr("b", nameSize), "", "", false}, - {sr("a/", nameSize), sr("a/", 77) + "a", sr("a/", 22), true}, - } - - for _, v := range vectors { - prefix, suffix, ok := splitUSTARPath(v.input) - if prefix != v.prefix || suffix != v.suffix || ok != v.ok { - t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)", - v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok) - } - } -} - -// TestIssue12594 tests that the Writer does not attempt to populate the prefix -// field when encoding a header in the GNU format. The prefix field is valid -// in USTAR and PAX, but not GNU. -func TestIssue12594(t *testing.T) { - names := []string{ - "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/file.txt", - "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/file.txt", - "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/333/file.txt", - "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/34/35/36/37/38/39/40/file.txt", - "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000/file.txt", - "/home/support/.openoffice.org/3/user/uno_packages/cache/registry/com.sun.star.comp.deployment.executable.PackageRegistryBackend", - } - - for i, name := range names { - var b bytes.Buffer - - tw := NewWriter(&b) - if err := tw.WriteHeader(&Header{ - Name: name, - Uid: 1 << 25, // Prevent USTAR format - }); err != nil { - t.Errorf("test %d, unexpected WriteHeader error: %v", i, err) - } - if err := tw.Close(); err != nil { - t.Errorf("test %d, unexpected Close error: %v", i, err) - } - - // The prefix field should never appear in the GNU format. - var blk block - copy(blk[:], b.Bytes()) - prefix := string(blk.USTAR().Prefix()) - if i := strings.IndexByte(prefix, 0); i >= 0 { - prefix = prefix[:i] // Truncate at the NUL terminator - } - if blk.GetFormat() == FormatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) { - t.Errorf("test %d, found prefix in GNU format: %s", i, prefix) - } - - tr := NewReader(&b) - hdr, err := tr.Next() - if err != nil { - t.Errorf("test %d, unexpected Next error: %v", i, err) - } - if hdr.Name != name { - t.Errorf("test %d, hdr.Name = %s, want %s", i, hdr.Name, name) - } - } -} - -// testNonEmptyWriter wraps an io.Writer and ensures that -// Write is never called with an empty buffer. -type testNonEmptyWriter struct{ io.Writer } - -func (w testNonEmptyWriter) Write(b []byte) (int, error) { - if len(b) == 0 { - return 0, errors.New("unexpected empty Write call") - } - return w.Writer.Write(b) -} - -func TestFileWriter(t *testing.T) { - type ( - testWrite struct { // Write(str) == (wantCnt, wantErr) - str string - wantCnt int - wantErr error - } - testReadFrom struct { // ReadFrom(testFile{ops}) == (wantCnt, wantErr) - ops fileOps - wantCnt int64 - wantErr error - } - testRemaining struct { // LogicalRemaining() == wantLCnt, PhysicalRemaining() == wantPCnt - wantLCnt int64 - wantPCnt int64 - } - testFnc interface{} // testWrite | testReadFrom | testRemaining - ) - - type ( - makeReg struct { - size int64 - wantStr string - } - makeSparse struct { - makeReg makeReg - sph sparseHoles - size int64 - } - fileMaker interface{} // makeReg | makeSparse - ) - - vectors := []struct { - maker fileMaker - tests []testFnc - }{{ - maker: makeReg{0, ""}, - tests: []testFnc{ - testRemaining{0, 0}, - testWrite{"", 0, nil}, - testWrite{"a", 0, ErrWriteTooLong}, - testReadFrom{fileOps{""}, 0, nil}, - testReadFrom{fileOps{"a"}, 0, ErrWriteTooLong}, - testRemaining{0, 0}, - }, - }, { - maker: makeReg{1, "a"}, - tests: []testFnc{ - testRemaining{1, 1}, - testWrite{"", 0, nil}, - testWrite{"a", 1, nil}, - testWrite{"bcde", 0, ErrWriteTooLong}, - testWrite{"", 0, nil}, - testReadFrom{fileOps{""}, 0, nil}, - testReadFrom{fileOps{"a"}, 0, ErrWriteTooLong}, - testRemaining{0, 0}, - }, - }, { - maker: makeReg{5, "hello"}, - tests: []testFnc{ - testRemaining{5, 5}, - testWrite{"hello", 5, nil}, - testRemaining{0, 0}, - }, - }, { - maker: makeReg{5, "\x00\x00\x00\x00\x00"}, - tests: []testFnc{ - testRemaining{5, 5}, - testReadFrom{fileOps{"\x00\x00\x00\x00\x00"}, 5, nil}, - testRemaining{0, 0}, - }, - }, { - maker: makeReg{5, "\x00\x00\x00\x00\x00"}, - tests: []testFnc{ - testRemaining{5, 5}, - testReadFrom{fileOps{"\x00\x00\x00\x00\x00extra"}, 5, ErrWriteTooLong}, - testRemaining{0, 0}, - }, - }, { - maker: makeReg{5, "abc\x00\x00"}, - tests: []testFnc{ - testRemaining{5, 5}, - testWrite{"abc", 3, nil}, - testRemaining{2, 2}, - testReadFrom{fileOps{"\x00\x00"}, 2, nil}, - testRemaining{0, 0}, - }, - }, { - maker: makeReg{5, "\x00\x00abc"}, - tests: []testFnc{ - testRemaining{5, 5}, - testWrite{"\x00\x00", 2, nil}, - testRemaining{3, 3}, - testWrite{"abc", 3, nil}, - testReadFrom{fileOps{"z"}, 0, ErrWriteTooLong}, - testWrite{"z", 0, ErrWriteTooLong}, - testRemaining{0, 0}, - }, - }, { - maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, - tests: []testFnc{ - testRemaining{8, 5}, - testWrite{"ab\x00\x00\x00cde", 8, nil}, - testWrite{"a", 0, ErrWriteTooLong}, - testRemaining{0, 0}, - }, - }, { - maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, - tests: []testFnc{ - testWrite{"ab\x00\x00\x00cdez", 8, ErrWriteTooLong}, - testRemaining{0, 0}, - }, - }, { - maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, - tests: []testFnc{ - testWrite{"ab\x00", 3, nil}, - testRemaining{5, 3}, - testWrite{"\x00\x00cde", 5, nil}, - testWrite{"a", 0, ErrWriteTooLong}, - testRemaining{0, 0}, - }, - }, { - maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, - tests: []testFnc{ - testWrite{"ab", 2, nil}, - testRemaining{6, 3}, - testReadFrom{fileOps{int64(3), "cde"}, 6, nil}, - testRemaining{0, 0}, - }, - }, { - maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, - tests: []testFnc{ - testReadFrom{fileOps{"ab", int64(3), "cde"}, 8, nil}, - testRemaining{0, 0}, - }, - }, { - maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, - tests: []testFnc{ - testReadFrom{fileOps{"ab", int64(3), "cdeX"}, 8, ErrWriteTooLong}, - testRemaining{0, 0}, - }, - }, { - maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8}, - tests: []testFnc{ - testReadFrom{fileOps{"ab", int64(3), "cd"}, 7, io.ErrUnexpectedEOF}, - testRemaining{1, 0}, - }, - }, { - maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8}, - tests: []testFnc{ - testReadFrom{fileOps{"ab", int64(3), "cde"}, 7, errMissData}, - testRemaining{1, 0}, - }, - }, { - maker: makeSparse{makeReg{6, "abcde"}, sparseHoles{{2, 3}}, 8}, - tests: []testFnc{ - testReadFrom{fileOps{"ab", int64(3), "cde"}, 8, errUnrefData}, - testRemaining{0, 1}, - }, - }, { - maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8}, - tests: []testFnc{ - testWrite{"ab", 2, nil}, - testRemaining{6, 2}, - testWrite{"\x00\x00\x00", 3, nil}, - testRemaining{3, 2}, - testWrite{"cde", 2, errMissData}, - testRemaining{1, 0}, - }, - }, { - maker: makeSparse{makeReg{6, "abcde"}, sparseHoles{{2, 3}}, 8}, - tests: []testFnc{ - testWrite{"ab", 2, nil}, - testRemaining{6, 4}, - testWrite{"\x00\x00\x00", 3, nil}, - testRemaining{3, 4}, - testWrite{"cde", 3, errUnrefData}, - testRemaining{0, 1}, - }, - }, { - maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, - tests: []testFnc{ - testRemaining{7, 3}, - testWrite{"\x00\x00abc\x00\x00", 7, nil}, - testRemaining{0, 0}, - }, - }, { - maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, - tests: []testFnc{ - testRemaining{7, 3}, - testReadFrom{fileOps{int64(2), "abc", int64(1), "\x00"}, 7, nil}, - testRemaining{0, 0}, - }, - }, { - maker: makeSparse{makeReg{3, ""}, sparseHoles{{0, 2}, {5, 2}}, 7}, - tests: []testFnc{ - testWrite{"abcdefg", 0, errWriteHole}, - }, - }, { - maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, - tests: []testFnc{ - testWrite{"\x00\x00abcde", 5, errWriteHole}, - }, - }, { - maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, - tests: []testFnc{ - testWrite{"\x00\x00abc\x00\x00z", 7, ErrWriteTooLong}, - testRemaining{0, 0}, - }, - }, { - maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, - tests: []testFnc{ - testWrite{"\x00\x00", 2, nil}, - testRemaining{5, 3}, - testWrite{"abc", 3, nil}, - testRemaining{2, 0}, - testWrite{"\x00\x00", 2, nil}, - testRemaining{0, 0}, - }, - }, { - maker: makeSparse{makeReg{2, "ab"}, sparseHoles{{0, 2}, {5, 2}}, 7}, - tests: []testFnc{ - testWrite{"\x00\x00", 2, nil}, - testWrite{"abc", 2, errMissData}, - testWrite{"\x00\x00", 0, errMissData}, - }, - }, { - maker: makeSparse{makeReg{4, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, - tests: []testFnc{ - testWrite{"\x00\x00", 2, nil}, - testWrite{"abc", 3, nil}, - testWrite{"\x00\x00", 2, errUnrefData}, - }, - }} - - for i, v := range vectors { - var wantStr string - bb := new(bytes.Buffer) - w := testNonEmptyWriter{bb} - var fw fileWriter - switch maker := v.maker.(type) { - case makeReg: - fw = ®FileWriter{w, maker.size} - wantStr = maker.wantStr - case makeSparse: - if !validateSparseEntries(maker.sph, maker.size) { - t.Fatalf("invalid sparse map: %v", maker.sph) - } - spd := invertSparseEntries(maker.sph, maker.size) - fw = ®FileWriter{w, maker.makeReg.size} - fw = &sparseFileWriter{fw, spd, 0} - wantStr = maker.makeReg.wantStr - default: - t.Fatalf("test %d, unknown make operation: %T", i, maker) - } - - for j, tf := range v.tests { - switch tf := tf.(type) { - case testWrite: - got, err := fw.Write([]byte(tf.str)) - if got != tf.wantCnt || err != tf.wantErr { - t.Errorf("test %d.%d, Write(%s):\ngot (%d, %v)\nwant (%d, %v)", i, j, tf.str, got, err, tf.wantCnt, tf.wantErr) - } - case testReadFrom: - f := &testFile{ops: tf.ops} - got, err := fw.ReadFrom(f) - if _, ok := err.(testError); ok { - t.Errorf("test %d.%d, ReadFrom(): %v", i, j, err) - } else if got != tf.wantCnt || err != tf.wantErr { - t.Errorf("test %d.%d, ReadFrom() = (%d, %v), want (%d, %v)", i, j, got, err, tf.wantCnt, tf.wantErr) - } - if len(f.ops) > 0 { - t.Errorf("test %d.%d, expected %d more operations", i, j, len(f.ops)) - } - case testRemaining: - if got := fw.LogicalRemaining(); got != tf.wantLCnt { - t.Errorf("test %d.%d, LogicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt) - } - if got := fw.PhysicalRemaining(); got != tf.wantPCnt { - t.Errorf("test %d.%d, PhysicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt) - } - default: - t.Fatalf("test %d.%d, unknown test operation: %T", i, j, tf) - } - } - - if got := bb.String(); got != wantStr { - t.Fatalf("test %d, String() = %q, want %q", i, got, wantStr) - } - } -}