forked from mirrors/tar-split
archive/tar: adding from go as of a9dddb53f
This commit is contained in:
parent
70b9150cff
commit
64426b0aae
23 changed files with 3195 additions and 0 deletions
305
archive/tar/common.go
Normal file
305
archive/tar/common.go
Normal file
|
@ -0,0 +1,305 @@
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package tar implements access to tar archives.
|
||||||
|
// It aims to cover most of the variations, including those produced
|
||||||
|
// by GNU and BSD tars.
|
||||||
|
//
|
||||||
|
// References:
|
||||||
|
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
|
||||||
|
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
|
||||||
|
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
|
||||||
|
package tar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
blockSize = 512
|
||||||
|
|
||||||
|
// Types
|
||||||
|
TypeReg = '0' // regular file
|
||||||
|
TypeRegA = '\x00' // regular file
|
||||||
|
TypeLink = '1' // hard link
|
||||||
|
TypeSymlink = '2' // symbolic link
|
||||||
|
TypeChar = '3' // character device node
|
||||||
|
TypeBlock = '4' // block device node
|
||||||
|
TypeDir = '5' // directory
|
||||||
|
TypeFifo = '6' // fifo node
|
||||||
|
TypeCont = '7' // reserved
|
||||||
|
TypeXHeader = 'x' // extended header
|
||||||
|
TypeXGlobalHeader = 'g' // global extended header
|
||||||
|
TypeGNULongName = 'L' // Next file has a long name
|
||||||
|
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
|
||||||
|
TypeGNUSparse = 'S' // sparse file
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Header represents a single header in a tar archive.
|
||||||
|
// Some fields may not be populated.
|
||||||
|
type Header struct {
|
||||||
|
Name string // name of header file entry
|
||||||
|
Mode int64 // permission and mode bits
|
||||||
|
Uid int // user id of owner
|
||||||
|
Gid int // group id of owner
|
||||||
|
Size int64 // length in bytes
|
||||||
|
ModTime time.Time // modified time
|
||||||
|
Typeflag byte // type of header entry
|
||||||
|
Linkname string // target name of link
|
||||||
|
Uname string // user name of owner
|
||||||
|
Gname string // group name of owner
|
||||||
|
Devmajor int64 // major number of character or block device
|
||||||
|
Devminor int64 // minor number of character or block device
|
||||||
|
AccessTime time.Time // access time
|
||||||
|
ChangeTime time.Time // status change time
|
||||||
|
Xattrs map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// File name constants from the tar spec.
|
||||||
|
const (
|
||||||
|
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
|
||||||
|
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileInfo returns an os.FileInfo for the Header.
|
||||||
|
func (h *Header) FileInfo() os.FileInfo {
|
||||||
|
return headerFileInfo{h}
|
||||||
|
}
|
||||||
|
|
||||||
|
// headerFileInfo implements os.FileInfo.
|
||||||
|
type headerFileInfo struct {
|
||||||
|
h *Header
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
|
||||||
|
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
|
||||||
|
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
|
||||||
|
func (fi headerFileInfo) Sys() interface{} { return fi.h }
|
||||||
|
|
||||||
|
// Name returns the base name of the file.
|
||||||
|
func (fi headerFileInfo) Name() string {
|
||||||
|
if fi.IsDir() {
|
||||||
|
return path.Base(path.Clean(fi.h.Name))
|
||||||
|
}
|
||||||
|
return path.Base(fi.h.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mode returns the permission and mode bits for the headerFileInfo.
|
||||||
|
func (fi headerFileInfo) Mode() (mode os.FileMode) {
|
||||||
|
// Set file permission bits.
|
||||||
|
mode = os.FileMode(fi.h.Mode).Perm()
|
||||||
|
|
||||||
|
// Set setuid, setgid and sticky bits.
|
||||||
|
if fi.h.Mode&c_ISUID != 0 {
|
||||||
|
// setuid
|
||||||
|
mode |= os.ModeSetuid
|
||||||
|
}
|
||||||
|
if fi.h.Mode&c_ISGID != 0 {
|
||||||
|
// setgid
|
||||||
|
mode |= os.ModeSetgid
|
||||||
|
}
|
||||||
|
if fi.h.Mode&c_ISVTX != 0 {
|
||||||
|
// sticky
|
||||||
|
mode |= os.ModeSticky
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set file mode bits.
|
||||||
|
// clear perm, setuid, setgid and sticky bits.
|
||||||
|
m := os.FileMode(fi.h.Mode) &^ 07777
|
||||||
|
if m == c_ISDIR {
|
||||||
|
// directory
|
||||||
|
mode |= os.ModeDir
|
||||||
|
}
|
||||||
|
if m == c_ISFIFO {
|
||||||
|
// named pipe (FIFO)
|
||||||
|
mode |= os.ModeNamedPipe
|
||||||
|
}
|
||||||
|
if m == c_ISLNK {
|
||||||
|
// symbolic link
|
||||||
|
mode |= os.ModeSymlink
|
||||||
|
}
|
||||||
|
if m == c_ISBLK {
|
||||||
|
// device file
|
||||||
|
mode |= os.ModeDevice
|
||||||
|
}
|
||||||
|
if m == c_ISCHR {
|
||||||
|
// Unix character device
|
||||||
|
mode |= os.ModeDevice
|
||||||
|
mode |= os.ModeCharDevice
|
||||||
|
}
|
||||||
|
if m == c_ISSOCK {
|
||||||
|
// Unix domain socket
|
||||||
|
mode |= os.ModeSocket
|
||||||
|
}
|
||||||
|
|
||||||
|
switch fi.h.Typeflag {
|
||||||
|
case TypeLink, TypeSymlink:
|
||||||
|
// hard link, symbolic link
|
||||||
|
mode |= os.ModeSymlink
|
||||||
|
case TypeChar:
|
||||||
|
// character device node
|
||||||
|
mode |= os.ModeDevice
|
||||||
|
mode |= os.ModeCharDevice
|
||||||
|
case TypeBlock:
|
||||||
|
// block device node
|
||||||
|
mode |= os.ModeDevice
|
||||||
|
case TypeDir:
|
||||||
|
// directory
|
||||||
|
mode |= os.ModeDir
|
||||||
|
case TypeFifo:
|
||||||
|
// fifo node
|
||||||
|
mode |= os.ModeNamedPipe
|
||||||
|
}
|
||||||
|
|
||||||
|
return mode
|
||||||
|
}
|
||||||
|
|
||||||
|
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||||
|
var sysStat func(fi os.FileInfo, h *Header) error
|
||||||
|
|
||||||
|
// Mode constants from the tar spec.
|
||||||
|
const (
|
||||||
|
c_ISUID = 04000 // Set uid
|
||||||
|
c_ISGID = 02000 // Set gid
|
||||||
|
c_ISVTX = 01000 // Save text (sticky bit)
|
||||||
|
c_ISDIR = 040000 // Directory
|
||||||
|
c_ISFIFO = 010000 // FIFO
|
||||||
|
c_ISREG = 0100000 // Regular file
|
||||||
|
c_ISLNK = 0120000 // Symbolic link
|
||||||
|
c_ISBLK = 060000 // Block special file
|
||||||
|
c_ISCHR = 020000 // Character special file
|
||||||
|
c_ISSOCK = 0140000 // Socket
|
||||||
|
)
|
||||||
|
|
||||||
|
// Keywords for the PAX Extended Header
|
||||||
|
const (
|
||||||
|
paxAtime = "atime"
|
||||||
|
paxCharset = "charset"
|
||||||
|
paxComment = "comment"
|
||||||
|
paxCtime = "ctime" // please note that ctime is not a valid pax header.
|
||||||
|
paxGid = "gid"
|
||||||
|
paxGname = "gname"
|
||||||
|
paxLinkpath = "linkpath"
|
||||||
|
paxMtime = "mtime"
|
||||||
|
paxPath = "path"
|
||||||
|
paxSize = "size"
|
||||||
|
paxUid = "uid"
|
||||||
|
paxUname = "uname"
|
||||||
|
paxXattr = "SCHILY.xattr."
|
||||||
|
paxNone = ""
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileInfoHeader creates a partially-populated Header from fi.
|
||||||
|
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
||||||
|
// If fi describes a directory, a slash is appended to the name.
|
||||||
|
// Because os.FileInfo's Name method returns only the base name of
|
||||||
|
// the file it describes, it may be necessary to modify the Name field
|
||||||
|
// of the returned header to provide the full path name of the file.
|
||||||
|
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
||||||
|
if fi == nil {
|
||||||
|
return nil, errors.New("tar: FileInfo is nil")
|
||||||
|
}
|
||||||
|
fm := fi.Mode()
|
||||||
|
h := &Header{
|
||||||
|
Name: fi.Name(),
|
||||||
|
ModTime: fi.ModTime(),
|
||||||
|
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case fm.IsRegular():
|
||||||
|
h.Mode |= c_ISREG
|
||||||
|
h.Typeflag = TypeReg
|
||||||
|
h.Size = fi.Size()
|
||||||
|
case fi.IsDir():
|
||||||
|
h.Typeflag = TypeDir
|
||||||
|
h.Mode |= c_ISDIR
|
||||||
|
h.Name += "/"
|
||||||
|
case fm&os.ModeSymlink != 0:
|
||||||
|
h.Typeflag = TypeSymlink
|
||||||
|
h.Mode |= c_ISLNK
|
||||||
|
h.Linkname = link
|
||||||
|
case fm&os.ModeDevice != 0:
|
||||||
|
if fm&os.ModeCharDevice != 0 {
|
||||||
|
h.Mode |= c_ISCHR
|
||||||
|
h.Typeflag = TypeChar
|
||||||
|
} else {
|
||||||
|
h.Mode |= c_ISBLK
|
||||||
|
h.Typeflag = TypeBlock
|
||||||
|
}
|
||||||
|
case fm&os.ModeNamedPipe != 0:
|
||||||
|
h.Typeflag = TypeFifo
|
||||||
|
h.Mode |= c_ISFIFO
|
||||||
|
case fm&os.ModeSocket != 0:
|
||||||
|
h.Mode |= c_ISSOCK
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
|
||||||
|
}
|
||||||
|
if fm&os.ModeSetuid != 0 {
|
||||||
|
h.Mode |= c_ISUID
|
||||||
|
}
|
||||||
|
if fm&os.ModeSetgid != 0 {
|
||||||
|
h.Mode |= c_ISGID
|
||||||
|
}
|
||||||
|
if fm&os.ModeSticky != 0 {
|
||||||
|
h.Mode |= c_ISVTX
|
||||||
|
}
|
||||||
|
if sysStat != nil {
|
||||||
|
return h, sysStat(fi, h)
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var zeroBlock = make([]byte, blockSize)
|
||||||
|
|
||||||
|
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
|
||||||
|
// We compute and return both.
|
||||||
|
func checksum(header []byte) (unsigned int64, signed int64) {
|
||||||
|
for i := 0; i < len(header); i++ {
|
||||||
|
if i == 148 {
|
||||||
|
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
|
||||||
|
unsigned += ' ' * 8
|
||||||
|
signed += ' ' * 8
|
||||||
|
i += 7
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
unsigned += int64(header[i])
|
||||||
|
signed += int64(int8(header[i]))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type slicer []byte
|
||||||
|
|
||||||
|
func (sp *slicer) next(n int) (b []byte) {
|
||||||
|
s := *sp
|
||||||
|
b, *sp = s[0:n], s[n:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func isASCII(s string) bool {
|
||||||
|
for _, c := range s {
|
||||||
|
if c >= 0x80 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func toASCII(s string) string {
|
||||||
|
if isASCII(s) {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for _, c := range s {
|
||||||
|
if c < 0x80 {
|
||||||
|
buf.WriteByte(byte(c))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
80
archive/tar/example_test.go
Normal file
80
archive/tar/example_test.go
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package tar_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Example() {
|
||||||
|
// Create a buffer to write our archive to.
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
|
||||||
|
// Create a new tar archive.
|
||||||
|
tw := tar.NewWriter(buf)
|
||||||
|
|
||||||
|
// Add some files to the archive.
|
||||||
|
var files = []struct {
|
||||||
|
Name, Body string
|
||||||
|
}{
|
||||||
|
{"readme.txt", "This archive contains some text files."},
|
||||||
|
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
|
||||||
|
{"todo.txt", "Get animal handling licence."},
|
||||||
|
}
|
||||||
|
for _, file := range files {
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: file.Name,
|
||||||
|
Mode: 0600,
|
||||||
|
Size: int64(len(file.Body)),
|
||||||
|
}
|
||||||
|
if err := tw.WriteHeader(hdr); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
if _, err := tw.Write([]byte(file.Body)); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Make sure to check the error on Close.
|
||||||
|
if err := tw.Close(); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open the tar archive for reading.
|
||||||
|
r := bytes.NewReader(buf.Bytes())
|
||||||
|
tr := tar.NewReader(r)
|
||||||
|
|
||||||
|
// Iterate through the files in the archive.
|
||||||
|
for {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
// end of tar archive
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Contents of %s:\n", hdr.Name)
|
||||||
|
if _, err := io.Copy(os.Stdout, tr); err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// Contents of readme.txt:
|
||||||
|
// This archive contains some text files.
|
||||||
|
// Contents of gopher.txt:
|
||||||
|
// Gopher names:
|
||||||
|
// George
|
||||||
|
// Geoffrey
|
||||||
|
// Gonzo
|
||||||
|
// Contents of todo.txt:
|
||||||
|
// Get animal handling licence.
|
||||||
|
}
|
822
archive/tar/reader.go
Normal file
822
archive/tar/reader.go
Normal file
|
@ -0,0 +1,822 @@
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package tar
|
||||||
|
|
||||||
|
// TODO(dsymonds):
|
||||||
|
// - pax extensions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrHeader = errors.New("archive/tar: invalid tar header")
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxNanoSecondIntSize = 9
|
||||||
|
|
||||||
|
// A Reader provides sequential access to the contents of a tar archive.
|
||||||
|
// A tar archive consists of a sequence of files.
|
||||||
|
// The Next method advances to the next file in the archive (including the first),
|
||||||
|
// and then it can be treated as an io.Reader to access the file's data.
|
||||||
|
type Reader struct {
|
||||||
|
r io.Reader
|
||||||
|
err error
|
||||||
|
pad int64 // amount of padding (ignored) after current file entry
|
||||||
|
curr numBytesReader // reader for current file entry
|
||||||
|
hdrBuff [blockSize]byte // buffer to use in readHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
// A numBytesReader is an io.Reader with a numBytes method, returning the number
|
||||||
|
// of bytes remaining in the underlying encoded data.
|
||||||
|
type numBytesReader interface {
|
||||||
|
io.Reader
|
||||||
|
numBytes() int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// A regFileReader is a numBytesReader for reading file data from a tar archive.
|
||||||
|
type regFileReader struct {
|
||||||
|
r io.Reader // underlying reader
|
||||||
|
nb int64 // number of unread bytes for current file entry
|
||||||
|
}
|
||||||
|
|
||||||
|
// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive.
|
||||||
|
type sparseFileReader struct {
|
||||||
|
rfr *regFileReader // reads the sparse-encoded file data
|
||||||
|
sp []sparseEntry // the sparse map for the file
|
||||||
|
pos int64 // keeps track of file position
|
||||||
|
tot int64 // total size of the file
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keywords for GNU sparse files in a PAX extended header
|
||||||
|
const (
|
||||||
|
paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
|
||||||
|
paxGNUSparseOffset = "GNU.sparse.offset"
|
||||||
|
paxGNUSparseNumBytes = "GNU.sparse.numbytes"
|
||||||
|
paxGNUSparseMap = "GNU.sparse.map"
|
||||||
|
paxGNUSparseName = "GNU.sparse.name"
|
||||||
|
paxGNUSparseMajor = "GNU.sparse.major"
|
||||||
|
paxGNUSparseMinor = "GNU.sparse.minor"
|
||||||
|
paxGNUSparseSize = "GNU.sparse.size"
|
||||||
|
paxGNUSparseRealSize = "GNU.sparse.realsize"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Keywords for old GNU sparse headers
|
||||||
|
const (
|
||||||
|
oldGNUSparseMainHeaderOffset = 386
|
||||||
|
oldGNUSparseMainHeaderIsExtendedOffset = 482
|
||||||
|
oldGNUSparseMainHeaderNumEntries = 4
|
||||||
|
oldGNUSparseExtendedHeaderIsExtendedOffset = 504
|
||||||
|
oldGNUSparseExtendedHeaderNumEntries = 21
|
||||||
|
oldGNUSparseOffsetSize = 12
|
||||||
|
oldGNUSparseNumBytesSize = 12
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewReader creates a new Reader reading from r.
|
||||||
|
func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
|
||||||
|
|
||||||
|
// Next advances to the next entry in the tar archive.
|
||||||
|
//
|
||||||
|
// io.EOF is returned at the end of the input.
|
||||||
|
func (tr *Reader) Next() (*Header, error) {
|
||||||
|
var hdr *Header
|
||||||
|
if tr.err == nil {
|
||||||
|
tr.skipUnread()
|
||||||
|
}
|
||||||
|
if tr.err != nil {
|
||||||
|
return hdr, tr.err
|
||||||
|
}
|
||||||
|
hdr = tr.readHeader()
|
||||||
|
if hdr == nil {
|
||||||
|
return hdr, tr.err
|
||||||
|
}
|
||||||
|
// Check for PAX/GNU header.
|
||||||
|
switch hdr.Typeflag {
|
||||||
|
case TypeXHeader:
|
||||||
|
// PAX extended header
|
||||||
|
headers, err := parsePAX(tr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// We actually read the whole file,
|
||||||
|
// but this skips alignment padding
|
||||||
|
tr.skipUnread()
|
||||||
|
hdr = tr.readHeader()
|
||||||
|
mergePAX(hdr, headers)
|
||||||
|
|
||||||
|
// Check for a PAX format sparse file
|
||||||
|
sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers)
|
||||||
|
if err != nil {
|
||||||
|
tr.err = err
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if sp != nil {
|
||||||
|
// Current file is a PAX format GNU sparse file.
|
||||||
|
// Set the current file reader to a sparse file reader.
|
||||||
|
tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
|
||||||
|
}
|
||||||
|
return hdr, nil
|
||||||
|
case TypeGNULongName:
|
||||||
|
// We have a GNU long name header. Its contents are the real file name.
|
||||||
|
realname, err := ioutil.ReadAll(tr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
hdr.Name = cString(realname)
|
||||||
|
return hdr, err
|
||||||
|
case TypeGNULongLink:
|
||||||
|
// We have a GNU long link header.
|
||||||
|
realname, err := ioutil.ReadAll(tr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
hdr.Linkname = cString(realname)
|
||||||
|
return hdr, err
|
||||||
|
}
|
||||||
|
return hdr, tr.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
|
||||||
|
// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
|
||||||
|
// be treated as a regular file.
|
||||||
|
func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
|
||||||
|
var sparseFormat string
|
||||||
|
|
||||||
|
// Check for sparse format indicators
|
||||||
|
major, majorOk := headers[paxGNUSparseMajor]
|
||||||
|
minor, minorOk := headers[paxGNUSparseMinor]
|
||||||
|
sparseName, sparseNameOk := headers[paxGNUSparseName]
|
||||||
|
_, sparseMapOk := headers[paxGNUSparseMap]
|
||||||
|
sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
|
||||||
|
sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
|
||||||
|
|
||||||
|
// Identify which, if any, sparse format applies from which PAX headers are set
|
||||||
|
if majorOk && minorOk {
|
||||||
|
sparseFormat = major + "." + minor
|
||||||
|
} else if sparseNameOk && sparseMapOk {
|
||||||
|
sparseFormat = "0.1"
|
||||||
|
} else if sparseSizeOk {
|
||||||
|
sparseFormat = "0.0"
|
||||||
|
} else {
|
||||||
|
// Not a PAX format GNU sparse file.
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for unknown sparse format
|
||||||
|
if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update hdr from GNU sparse PAX headers
|
||||||
|
if sparseNameOk {
|
||||||
|
hdr.Name = sparseName
|
||||||
|
}
|
||||||
|
if sparseSizeOk {
|
||||||
|
realSize, err := strconv.ParseInt(sparseSize, 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrHeader
|
||||||
|
}
|
||||||
|
hdr.Size = realSize
|
||||||
|
} else if sparseRealSizeOk {
|
||||||
|
realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrHeader
|
||||||
|
}
|
||||||
|
hdr.Size = realSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up the sparse map, according to the particular sparse format in use
|
||||||
|
var sp []sparseEntry
|
||||||
|
var err error
|
||||||
|
switch sparseFormat {
|
||||||
|
case "0.0", "0.1":
|
||||||
|
sp, err = readGNUSparseMap0x1(headers)
|
||||||
|
case "1.0":
|
||||||
|
sp, err = readGNUSparseMap1x0(tr.curr)
|
||||||
|
}
|
||||||
|
return sp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergePAX merges well known headers according to PAX standard.
|
||||||
|
// In general headers with the same name as those found
|
||||||
|
// in the header struct overwrite those found in the header
|
||||||
|
// struct with higher precision or longer values. Esp. useful
|
||||||
|
// for name and linkname fields.
|
||||||
|
func mergePAX(hdr *Header, headers map[string]string) error {
|
||||||
|
for k, v := range headers {
|
||||||
|
switch k {
|
||||||
|
case paxPath:
|
||||||
|
hdr.Name = v
|
||||||
|
case paxLinkpath:
|
||||||
|
hdr.Linkname = v
|
||||||
|
case paxGname:
|
||||||
|
hdr.Gname = v
|
||||||
|
case paxUname:
|
||||||
|
hdr.Uname = v
|
||||||
|
case paxUid:
|
||||||
|
uid, err := strconv.ParseInt(v, 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hdr.Uid = int(uid)
|
||||||
|
case paxGid:
|
||||||
|
gid, err := strconv.ParseInt(v, 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hdr.Gid = int(gid)
|
||||||
|
case paxAtime:
|
||||||
|
t, err := parsePAXTime(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hdr.AccessTime = t
|
||||||
|
case paxMtime:
|
||||||
|
t, err := parsePAXTime(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hdr.ModTime = t
|
||||||
|
case paxCtime:
|
||||||
|
t, err := parsePAXTime(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hdr.ChangeTime = t
|
||||||
|
case paxSize:
|
||||||
|
size, err := strconv.ParseInt(v, 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hdr.Size = int64(size)
|
||||||
|
default:
|
||||||
|
if strings.HasPrefix(k, paxXattr) {
|
||||||
|
if hdr.Xattrs == nil {
|
||||||
|
hdr.Xattrs = make(map[string]string)
|
||||||
|
}
|
||||||
|
hdr.Xattrs[k[len(paxXattr):]] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePAXTime takes a string of the form %d.%d as described in
|
||||||
|
// the PAX specification.
|
||||||
|
func parsePAXTime(t string) (time.Time, error) {
|
||||||
|
buf := []byte(t)
|
||||||
|
pos := bytes.IndexByte(buf, '.')
|
||||||
|
var seconds, nanoseconds int64
|
||||||
|
var err error
|
||||||
|
if pos == -1 {
|
||||||
|
seconds, err = strconv.ParseInt(t, 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
nano_buf := string(buf[pos+1:])
|
||||||
|
// Pad as needed before converting to a decimal.
|
||||||
|
// For example .030 -> .030000000 -> 30000000 nanoseconds
|
||||||
|
if len(nano_buf) < maxNanoSecondIntSize {
|
||||||
|
// Right pad
|
||||||
|
nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
|
||||||
|
} else if len(nano_buf) > maxNanoSecondIntSize {
|
||||||
|
// Right truncate
|
||||||
|
nano_buf = nano_buf[:maxNanoSecondIntSize]
|
||||||
|
}
|
||||||
|
nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ts := time.Unix(seconds, nanoseconds)
|
||||||
|
return ts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePAX parses PAX headers.
|
||||||
|
// If an extended header (type 'x') is invalid, ErrHeader is returned
|
||||||
|
func parsePAX(r io.Reader) (map[string]string, error) {
|
||||||
|
buf, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// For GNU PAX sparse format 0.0 support.
|
||||||
|
// This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
|
||||||
|
var sparseMap bytes.Buffer
|
||||||
|
|
||||||
|
headers := make(map[string]string)
|
||||||
|
// Each record is constructed as
|
||||||
|
// "%d %s=%s\n", length, keyword, value
|
||||||
|
for len(buf) > 0 {
|
||||||
|
// or the header was empty to start with.
|
||||||
|
var sp int
|
||||||
|
// The size field ends at the first space.
|
||||||
|
sp = bytes.IndexByte(buf, ' ')
|
||||||
|
if sp == -1 {
|
||||||
|
return nil, ErrHeader
|
||||||
|
}
|
||||||
|
// Parse the first token as a decimal integer.
|
||||||
|
n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrHeader
|
||||||
|
}
|
||||||
|
// Extract everything between the decimal and the n -1 on the
|
||||||
|
// beginning to eat the ' ', -1 on the end to skip the newline.
|
||||||
|
var record []byte
|
||||||
|
record, buf = buf[sp+1:n-1], buf[n:]
|
||||||
|
// The first equals is guaranteed to mark the end of the key.
|
||||||
|
// Everything else is value.
|
||||||
|
eq := bytes.IndexByte(record, '=')
|
||||||
|
if eq == -1 {
|
||||||
|
return nil, ErrHeader
|
||||||
|
}
|
||||||
|
key, value := record[:eq], record[eq+1:]
|
||||||
|
|
||||||
|
keyStr := string(key)
|
||||||
|
if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
|
||||||
|
// GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
|
||||||
|
sparseMap.Write(value)
|
||||||
|
sparseMap.Write([]byte{','})
|
||||||
|
} else {
|
||||||
|
// Normal key. Set the value in the headers map.
|
||||||
|
headers[keyStr] = string(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if sparseMap.Len() != 0 {
|
||||||
|
// Add sparse info to headers, chopping off the extra comma
|
||||||
|
sparseMap.Truncate(sparseMap.Len() - 1)
|
||||||
|
headers[paxGNUSparseMap] = sparseMap.String()
|
||||||
|
}
|
||||||
|
return headers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cString parses bytes as a NUL-terminated C-style string.
|
||||||
|
// If a NUL byte is not found then the whole slice is returned as a string.
|
||||||
|
func cString(b []byte) string {
|
||||||
|
n := 0
|
||||||
|
for n < len(b) && b[n] != 0 {
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
return string(b[0:n])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tr *Reader) octal(b []byte) int64 {
|
||||||
|
// Check for binary format first.
|
||||||
|
if len(b) > 0 && b[0]&0x80 != 0 {
|
||||||
|
var x int64
|
||||||
|
for i, c := range b {
|
||||||
|
if i == 0 {
|
||||||
|
c &= 0x7f // ignore signal bit in first byte
|
||||||
|
}
|
||||||
|
x = x<<8 | int64(c)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because unused fields are filled with NULs, we need
|
||||||
|
// to skip leading NULs. Fields may also be padded with
|
||||||
|
// spaces or NULs.
|
||||||
|
// So we remove leading and trailing NULs and spaces to
|
||||||
|
// be sure.
|
||||||
|
b = bytes.Trim(b, " \x00")
|
||||||
|
|
||||||
|
if len(b) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
x, err := strconv.ParseUint(cString(b), 8, 64)
|
||||||
|
if err != nil {
|
||||||
|
tr.err = err
|
||||||
|
}
|
||||||
|
return int64(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
|
||||||
|
func (tr *Reader) skipUnread() {
|
||||||
|
nr := tr.numBytes() + tr.pad // number of bytes to skip
|
||||||
|
tr.curr, tr.pad = nil, 0
|
||||||
|
if sr, ok := tr.r.(io.Seeker); ok {
|
||||||
|
if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tr *Reader) verifyChecksum(header []byte) bool {
|
||||||
|
if tr.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
given := tr.octal(header[148:156])
|
||||||
|
unsigned, signed := checksum(header)
|
||||||
|
return given == unsigned || given == signed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tr *Reader) readHeader() *Header {
|
||||||
|
header := tr.hdrBuff[:]
|
||||||
|
copy(header, zeroBlock)
|
||||||
|
|
||||||
|
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Two blocks of zero bytes marks the end of the archive.
|
||||||
|
if bytes.Equal(header, zeroBlock[0:blockSize]) {
|
||||||
|
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if bytes.Equal(header, zeroBlock[0:blockSize]) {
|
||||||
|
tr.err = io.EOF
|
||||||
|
} else {
|
||||||
|
tr.err = ErrHeader // zero block and then non-zero block
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !tr.verifyChecksum(header) {
|
||||||
|
tr.err = ErrHeader
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unpack
|
||||||
|
hdr := new(Header)
|
||||||
|
s := slicer(header)
|
||||||
|
|
||||||
|
hdr.Name = cString(s.next(100))
|
||||||
|
hdr.Mode = tr.octal(s.next(8))
|
||||||
|
hdr.Uid = int(tr.octal(s.next(8)))
|
||||||
|
hdr.Gid = int(tr.octal(s.next(8)))
|
||||||
|
hdr.Size = tr.octal(s.next(12))
|
||||||
|
hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
|
||||||
|
s.next(8) // chksum
|
||||||
|
hdr.Typeflag = s.next(1)[0]
|
||||||
|
hdr.Linkname = cString(s.next(100))
|
||||||
|
|
||||||
|
// The remainder of the header depends on the value of magic.
|
||||||
|
// The original (v7) version of tar had no explicit magic field,
|
||||||
|
// so its magic bytes, like the rest of the block, are NULs.
|
||||||
|
magic := string(s.next(8)) // contains version field as well.
|
||||||
|
var format string
|
||||||
|
switch {
|
||||||
|
case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
|
||||||
|
if string(header[508:512]) == "tar\x00" {
|
||||||
|
format = "star"
|
||||||
|
} else {
|
||||||
|
format = "posix"
|
||||||
|
}
|
||||||
|
case magic == "ustar \x00": // old GNU tar
|
||||||
|
format = "gnu"
|
||||||
|
}
|
||||||
|
|
||||||
|
switch format {
|
||||||
|
case "posix", "gnu", "star":
|
||||||
|
hdr.Uname = cString(s.next(32))
|
||||||
|
hdr.Gname = cString(s.next(32))
|
||||||
|
devmajor := s.next(8)
|
||||||
|
devminor := s.next(8)
|
||||||
|
if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
|
||||||
|
hdr.Devmajor = tr.octal(devmajor)
|
||||||
|
hdr.Devminor = tr.octal(devminor)
|
||||||
|
}
|
||||||
|
var prefix string
|
||||||
|
switch format {
|
||||||
|
case "posix", "gnu":
|
||||||
|
prefix = cString(s.next(155))
|
||||||
|
case "star":
|
||||||
|
prefix = cString(s.next(131))
|
||||||
|
hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0)
|
||||||
|
hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0)
|
||||||
|
}
|
||||||
|
if len(prefix) > 0 {
|
||||||
|
hdr.Name = prefix + "/" + hdr.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tr.err != nil {
|
||||||
|
tr.err = ErrHeader
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Maximum value of hdr.Size is 64 GB (12 octal digits),
|
||||||
|
// so there's no risk of int64 overflowing.
|
||||||
|
nb := int64(hdr.Size)
|
||||||
|
tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
|
||||||
|
|
||||||
|
// Set the current file reader.
|
||||||
|
tr.curr = ®FileReader{r: tr.r, nb: nb}
|
||||||
|
|
||||||
|
// Check for old GNU sparse format entry.
|
||||||
|
if hdr.Typeflag == TypeGNUSparse {
|
||||||
|
// Get the real size of the file.
|
||||||
|
hdr.Size = tr.octal(header[483:495])
|
||||||
|
|
||||||
|
// Read the sparse map.
|
||||||
|
sp := tr.readOldGNUSparseMap(header)
|
||||||
|
if tr.err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Current file is a GNU sparse file. Update the current file reader.
|
||||||
|
tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
|
||||||
|
}
|
||||||
|
|
||||||
|
return hdr
|
||||||
|
}
|
||||||
|
|
||||||
|
// A sparseEntry holds a single entry in a sparse file's sparse map.
|
||||||
|
// A sparse entry indicates the offset and size in a sparse file of a
|
||||||
|
// block of data.
|
||||||
|
type sparseEntry struct {
|
||||||
|
offset int64
|
||||||
|
numBytes int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
|
||||||
|
// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
|
||||||
|
// then one or more extension headers are used to store the rest of the sparse map.
|
||||||
|
func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
|
||||||
|
isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
|
||||||
|
spCap := oldGNUSparseMainHeaderNumEntries
|
||||||
|
if isExtended {
|
||||||
|
spCap += oldGNUSparseExtendedHeaderNumEntries
|
||||||
|
}
|
||||||
|
sp := make([]sparseEntry, 0, spCap)
|
||||||
|
s := slicer(header[oldGNUSparseMainHeaderOffset:])
|
||||||
|
|
||||||
|
// Read the four entries from the main tar header
|
||||||
|
for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
|
||||||
|
offset := tr.octal(s.next(oldGNUSparseOffsetSize))
|
||||||
|
numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
|
||||||
|
if tr.err != nil {
|
||||||
|
tr.err = ErrHeader
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if offset == 0 && numBytes == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
|
||||||
|
}
|
||||||
|
|
||||||
|
for isExtended {
|
||||||
|
// There are more entries. Read an extension header and parse its entries.
|
||||||
|
sparseHeader := make([]byte, blockSize)
|
||||||
|
if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
|
||||||
|
s = slicer(sparseHeader)
|
||||||
|
for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
|
||||||
|
offset := tr.octal(s.next(oldGNUSparseOffsetSize))
|
||||||
|
numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
|
||||||
|
if tr.err != nil {
|
||||||
|
tr.err = ErrHeader
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if offset == 0 && numBytes == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sp
|
||||||
|
}
|
||||||
|
|
||||||
|
// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0.
|
||||||
|
// The sparse map is stored just before the file data and padded out to the nearest block boundary.
|
||||||
|
func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
|
||||||
|
buf := make([]byte, 2*blockSize)
|
||||||
|
sparseHeader := buf[:blockSize]
|
||||||
|
|
||||||
|
// readDecimal is a helper function to read a decimal integer from the sparse map
|
||||||
|
// while making sure to read from the file in blocks of size blockSize
|
||||||
|
readDecimal := func() (int64, error) {
|
||||||
|
// Look for newline
|
||||||
|
nl := bytes.IndexByte(sparseHeader, '\n')
|
||||||
|
if nl == -1 {
|
||||||
|
if len(sparseHeader) >= blockSize {
|
||||||
|
// This is an error
|
||||||
|
return 0, ErrHeader
|
||||||
|
}
|
||||||
|
oldLen := len(sparseHeader)
|
||||||
|
newLen := oldLen + blockSize
|
||||||
|
if cap(sparseHeader) < newLen {
|
||||||
|
// There's more header, but we need to make room for the next block
|
||||||
|
copy(buf, sparseHeader)
|
||||||
|
sparseHeader = buf[:newLen]
|
||||||
|
} else {
|
||||||
|
// There's more header, and we can just reslice
|
||||||
|
sparseHeader = sparseHeader[:newLen]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that sparseHeader is large enough, read next block
|
||||||
|
if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for a newline in the new data
|
||||||
|
nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n')
|
||||||
|
if nl == -1 {
|
||||||
|
// This is an error
|
||||||
|
return 0, ErrHeader
|
||||||
|
}
|
||||||
|
nl += oldLen // We want the position from the beginning
|
||||||
|
}
|
||||||
|
// Now that we've found a newline, read a number
|
||||||
|
n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, ErrHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update sparseHeader to consume this number
|
||||||
|
sparseHeader = sparseHeader[nl+1:]
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the first block
|
||||||
|
if _, err := io.ReadFull(r, sparseHeader); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// The first line contains the number of entries
|
||||||
|
numEntries, err := readDecimal()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read all the entries
|
||||||
|
sp := make([]sparseEntry, 0, numEntries)
|
||||||
|
for i := int64(0); i < numEntries; i++ {
|
||||||
|
// Read the offset
|
||||||
|
offset, err := readDecimal()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Read numBytes
|
||||||
|
numBytes, err := readDecimal()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
|
||||||
|
}
|
||||||
|
|
||||||
|
return sp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1.
|
||||||
|
// The sparse map is stored in the PAX headers.
|
||||||
|
func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) {
|
||||||
|
// Get number of entries
|
||||||
|
numEntriesStr, ok := headers[paxGNUSparseNumBlocks]
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrHeader
|
||||||
|
}
|
||||||
|
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
sparseMap := strings.Split(headers[paxGNUSparseMap], ",")
|
||||||
|
|
||||||
|
// There should be two numbers in sparseMap for each entry
|
||||||
|
if int64(len(sparseMap)) != 2*numEntries {
|
||||||
|
return nil, ErrHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop through the entries in the sparse map
|
||||||
|
sp := make([]sparseEntry, 0, numEntries)
|
||||||
|
for i := int64(0); i < numEntries; i++ {
|
||||||
|
offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrHeader
|
||||||
|
}
|
||||||
|
numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrHeader
|
||||||
|
}
|
||||||
|
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
|
||||||
|
}
|
||||||
|
|
||||||
|
return sp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// numBytes returns the number of bytes left to read in the current file's entry
|
||||||
|
// in the tar archive, or 0 if there is no current file.
|
||||||
|
func (tr *Reader) numBytes() int64 {
|
||||||
|
if tr.curr == nil {
|
||||||
|
// No current file, so no bytes
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return tr.curr.numBytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads from the current entry in the tar archive.
|
||||||
|
// It returns 0, io.EOF when it reaches the end of that entry,
|
||||||
|
// until Next is called to advance to the next entry.
|
||||||
|
func (tr *Reader) Read(b []byte) (n int, err error) {
|
||||||
|
if tr.curr == nil {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n, err = tr.curr.Read(b)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
tr.err = err
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rfr *regFileReader) Read(b []byte) (n int, err error) {
|
||||||
|
if rfr.nb == 0 {
|
||||||
|
// file consumed
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
if int64(len(b)) > rfr.nb {
|
||||||
|
b = b[0:rfr.nb]
|
||||||
|
}
|
||||||
|
n, err = rfr.r.Read(b)
|
||||||
|
rfr.nb -= int64(n)
|
||||||
|
|
||||||
|
if err == io.EOF && rfr.nb > 0 {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// numBytes returns the number of bytes left to read in the file's data in the tar archive.
|
||||||
|
func (rfr *regFileReader) numBytes() int64 {
|
||||||
|
return rfr.nb
|
||||||
|
}
|
||||||
|
|
||||||
|
// readHole reads a sparse file hole ending at offset toOffset
|
||||||
|
func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
|
||||||
|
n64 := toOffset - sfr.pos
|
||||||
|
if n64 > int64(len(b)) {
|
||||||
|
n64 = int64(len(b))
|
||||||
|
}
|
||||||
|
n := int(n64)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
b[i] = 0
|
||||||
|
}
|
||||||
|
sfr.pos += n64
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads the sparse file data in expanded form.
|
||||||
|
func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
|
||||||
|
if len(sfr.sp) == 0 {
|
||||||
|
// No more data fragments to read from.
|
||||||
|
if sfr.pos < sfr.tot {
|
||||||
|
// We're in the last hole
|
||||||
|
n = sfr.readHole(b, sfr.tot)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Otherwise, we're at the end of the file
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
if sfr.pos < sfr.sp[0].offset {
|
||||||
|
// We're in a hole
|
||||||
|
n = sfr.readHole(b, sfr.sp[0].offset)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're not in a hole, so we'll read from the next data fragment
|
||||||
|
posInFragment := sfr.pos - sfr.sp[0].offset
|
||||||
|
bytesLeft := sfr.sp[0].numBytes - posInFragment
|
||||||
|
if int64(len(b)) > bytesLeft {
|
||||||
|
b = b[0:bytesLeft]
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = sfr.rfr.Read(b)
|
||||||
|
sfr.pos += int64(n)
|
||||||
|
|
||||||
|
if int64(n) == bytesLeft {
|
||||||
|
// We're done with this fragment
|
||||||
|
sfr.sp = sfr.sp[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == io.EOF && sfr.pos < sfr.tot {
|
||||||
|
// We reached the end of the last fragment's data, but there's a final hole
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// numBytes returns the number of bytes left to read in the sparse file's
|
||||||
|
// sparse-encoded data in the tar archive.
|
||||||
|
func (sfr *sparseFileReader) numBytes() int64 {
|
||||||
|
return sfr.rfr.nb
|
||||||
|
}
|
743
archive/tar/reader_test.go
Normal file
743
archive/tar/reader_test.go
Normal file
|
@ -0,0 +1,743 @@
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package tar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type untarTest struct {
|
||||||
|
file string
|
||||||
|
headers []*Header
|
||||||
|
cksums []string
|
||||||
|
}
|
||||||
|
|
||||||
|
var gnuTarTest = &untarTest{
|
||||||
|
file: "testdata/gnu.tar",
|
||||||
|
headers: []*Header{
|
||||||
|
{
|
||||||
|
Name: "small.txt",
|
||||||
|
Mode: 0640,
|
||||||
|
Uid: 73025,
|
||||||
|
Gid: 5000,
|
||||||
|
Size: 5,
|
||||||
|
ModTime: time.Unix(1244428340, 0),
|
||||||
|
Typeflag: '0',
|
||||||
|
Uname: "dsymonds",
|
||||||
|
Gname: "eng",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "small2.txt",
|
||||||
|
Mode: 0640,
|
||||||
|
Uid: 73025,
|
||||||
|
Gid: 5000,
|
||||||
|
Size: 11,
|
||||||
|
ModTime: time.Unix(1244436044, 0),
|
||||||
|
Typeflag: '0',
|
||||||
|
Uname: "dsymonds",
|
||||||
|
Gname: "eng",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cksums: []string{
|
||||||
|
"e38b27eaccb4391bdec553a7f3ae6b2f",
|
||||||
|
"c65bd2e50a56a2138bf1716f2fd56fe9",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var sparseTarTest = &untarTest{
|
||||||
|
file: "testdata/sparse-formats.tar",
|
||||||
|
headers: []*Header{
|
||||||
|
{
|
||||||
|
Name: "sparse-gnu",
|
||||||
|
Mode: 420,
|
||||||
|
Uid: 1000,
|
||||||
|
Gid: 1000,
|
||||||
|
Size: 200,
|
||||||
|
ModTime: time.Unix(1392395740, 0),
|
||||||
|
Typeflag: 0x53,
|
||||||
|
Linkname: "",
|
||||||
|
Uname: "david",
|
||||||
|
Gname: "david",
|
||||||
|
Devmajor: 0,
|
||||||
|
Devminor: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "sparse-posix-0.0",
|
||||||
|
Mode: 420,
|
||||||
|
Uid: 1000,
|
||||||
|
Gid: 1000,
|
||||||
|
Size: 200,
|
||||||
|
ModTime: time.Unix(1392342187, 0),
|
||||||
|
Typeflag: 0x30,
|
||||||
|
Linkname: "",
|
||||||
|
Uname: "david",
|
||||||
|
Gname: "david",
|
||||||
|
Devmajor: 0,
|
||||||
|
Devminor: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "sparse-posix-0.1",
|
||||||
|
Mode: 420,
|
||||||
|
Uid: 1000,
|
||||||
|
Gid: 1000,
|
||||||
|
Size: 200,
|
||||||
|
ModTime: time.Unix(1392340456, 0),
|
||||||
|
Typeflag: 0x30,
|
||||||
|
Linkname: "",
|
||||||
|
Uname: "david",
|
||||||
|
Gname: "david",
|
||||||
|
Devmajor: 0,
|
||||||
|
Devminor: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "sparse-posix-1.0",
|
||||||
|
Mode: 420,
|
||||||
|
Uid: 1000,
|
||||||
|
Gid: 1000,
|
||||||
|
Size: 200,
|
||||||
|
ModTime: time.Unix(1392337404, 0),
|
||||||
|
Typeflag: 0x30,
|
||||||
|
Linkname: "",
|
||||||
|
Uname: "david",
|
||||||
|
Gname: "david",
|
||||||
|
Devmajor: 0,
|
||||||
|
Devminor: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "end",
|
||||||
|
Mode: 420,
|
||||||
|
Uid: 1000,
|
||||||
|
Gid: 1000,
|
||||||
|
Size: 4,
|
||||||
|
ModTime: time.Unix(1392398319, 0),
|
||||||
|
Typeflag: 0x30,
|
||||||
|
Linkname: "",
|
||||||
|
Uname: "david",
|
||||||
|
Gname: "david",
|
||||||
|
Devmajor: 0,
|
||||||
|
Devminor: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cksums: []string{
|
||||||
|
"6f53234398c2449fe67c1812d993012f",
|
||||||
|
"6f53234398c2449fe67c1812d993012f",
|
||||||
|
"6f53234398c2449fe67c1812d993012f",
|
||||||
|
"6f53234398c2449fe67c1812d993012f",
|
||||||
|
"b0061974914468de549a2af8ced10316",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var untarTests = []*untarTest{
|
||||||
|
gnuTarTest,
|
||||||
|
sparseTarTest,
|
||||||
|
{
|
||||||
|
file: "testdata/star.tar",
|
||||||
|
headers: []*Header{
|
||||||
|
{
|
||||||
|
Name: "small.txt",
|
||||||
|
Mode: 0640,
|
||||||
|
Uid: 73025,
|
||||||
|
Gid: 5000,
|
||||||
|
Size: 5,
|
||||||
|
ModTime: time.Unix(1244592783, 0),
|
||||||
|
Typeflag: '0',
|
||||||
|
Uname: "dsymonds",
|
||||||
|
Gname: "eng",
|
||||||
|
AccessTime: time.Unix(1244592783, 0),
|
||||||
|
ChangeTime: time.Unix(1244592783, 0),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "small2.txt",
|
||||||
|
Mode: 0640,
|
||||||
|
Uid: 73025,
|
||||||
|
Gid: 5000,
|
||||||
|
Size: 11,
|
||||||
|
ModTime: time.Unix(1244592783, 0),
|
||||||
|
Typeflag: '0',
|
||||||
|
Uname: "dsymonds",
|
||||||
|
Gname: "eng",
|
||||||
|
AccessTime: time.Unix(1244592783, 0),
|
||||||
|
ChangeTime: time.Unix(1244592783, 0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
file: "testdata/v7.tar",
|
||||||
|
headers: []*Header{
|
||||||
|
{
|
||||||
|
Name: "small.txt",
|
||||||
|
Mode: 0444,
|
||||||
|
Uid: 73025,
|
||||||
|
Gid: 5000,
|
||||||
|
Size: 5,
|
||||||
|
ModTime: time.Unix(1244593104, 0),
|
||||||
|
Typeflag: '\x00',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "small2.txt",
|
||||||
|
Mode: 0444,
|
||||||
|
Uid: 73025,
|
||||||
|
Gid: 5000,
|
||||||
|
Size: 11,
|
||||||
|
ModTime: time.Unix(1244593104, 0),
|
||||||
|
Typeflag: '\x00',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
file: "testdata/pax.tar",
|
||||||
|
headers: []*Header{
|
||||||
|
{
|
||||||
|
Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
|
||||||
|
Mode: 0664,
|
||||||
|
Uid: 1000,
|
||||||
|
Gid: 1000,
|
||||||
|
Uname: "shane",
|
||||||
|
Gname: "shane",
|
||||||
|
Size: 7,
|
||||||
|
ModTime: time.Unix(1350244992, 23960108),
|
||||||
|
ChangeTime: time.Unix(1350244992, 23960108),
|
||||||
|
AccessTime: time.Unix(1350244992, 23960108),
|
||||||
|
Typeflag: TypeReg,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "a/b",
|
||||||
|
Mode: 0777,
|
||||||
|
Uid: 1000,
|
||||||
|
Gid: 1000,
|
||||||
|
Uname: "shane",
|
||||||
|
Gname: "shane",
|
||||||
|
Size: 0,
|
||||||
|
ModTime: time.Unix(1350266320, 910238425),
|
||||||
|
ChangeTime: time.Unix(1350266320, 910238425),
|
||||||
|
AccessTime: time.Unix(1350266320, 910238425),
|
||||||
|
Typeflag: TypeSymlink,
|
||||||
|
Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
file: "testdata/nil-uid.tar", // golang.org/issue/5290
|
||||||
|
headers: []*Header{
|
||||||
|
{
|
||||||
|
Name: "P1050238.JPG.log",
|
||||||
|
Mode: 0664,
|
||||||
|
Uid: 0,
|
||||||
|
Gid: 0,
|
||||||
|
Size: 14,
|
||||||
|
ModTime: time.Unix(1365454838, 0),
|
||||||
|
Typeflag: TypeReg,
|
||||||
|
Linkname: "",
|
||||||
|
Uname: "eyefi",
|
||||||
|
Gname: "eyefi",
|
||||||
|
Devmajor: 0,
|
||||||
|
Devminor: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
file: "testdata/xattrs.tar",
|
||||||
|
headers: []*Header{
|
||||||
|
{
|
||||||
|
Name: "small.txt",
|
||||||
|
Mode: 0644,
|
||||||
|
Uid: 1000,
|
||||||
|
Gid: 10,
|
||||||
|
Size: 5,
|
||||||
|
ModTime: time.Unix(1386065770, 448252320),
|
||||||
|
Typeflag: '0',
|
||||||
|
Uname: "alex",
|
||||||
|
Gname: "wheel",
|
||||||
|
AccessTime: time.Unix(1389782991, 419875220),
|
||||||
|
ChangeTime: time.Unix(1389782956, 794414986),
|
||||||
|
Xattrs: map[string]string{
|
||||||
|
"user.key": "value",
|
||||||
|
"user.key2": "value2",
|
||||||
|
// Interestingly, selinux encodes the terminating null inside the xattr
|
||||||
|
"security.selinux": "unconfined_u:object_r:default_t:s0\x00",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "small2.txt",
|
||||||
|
Mode: 0644,
|
||||||
|
Uid: 1000,
|
||||||
|
Gid: 10,
|
||||||
|
Size: 11,
|
||||||
|
ModTime: time.Unix(1386065770, 449252304),
|
||||||
|
Typeflag: '0',
|
||||||
|
Uname: "alex",
|
||||||
|
Gname: "wheel",
|
||||||
|
AccessTime: time.Unix(1389782991, 419875220),
|
||||||
|
ChangeTime: time.Unix(1386065770, 449252304),
|
||||||
|
Xattrs: map[string]string{
|
||||||
|
"security.selinux": "unconfined_u:object_r:default_t:s0\x00",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReader(t *testing.T) {
|
||||||
|
testLoop:
|
||||||
|
for i, test := range untarTests {
|
||||||
|
f, err := os.Open(test.file)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
tr := NewReader(f)
|
||||||
|
for j, header := range test.headers {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err != nil || hdr == nil {
|
||||||
|
t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
|
||||||
|
f.Close()
|
||||||
|
continue testLoop
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(*hdr, *header) {
|
||||||
|
t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
|
||||||
|
i, j, *hdr, *header)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
continue testLoop
|
||||||
|
}
|
||||||
|
if hdr != nil || err != nil {
|
||||||
|
t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPartialRead(t *testing.T) {
|
||||||
|
f, err := os.Open("testdata/gnu.tar")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
tr := NewReader(f)
|
||||||
|
|
||||||
|
// Read the first four bytes; Next() should skip the last byte.
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err != nil || hdr == nil {
|
||||||
|
t.Fatalf("Didn't get first file: %v", err)
|
||||||
|
}
|
||||||
|
buf := make([]byte, 4)
|
||||||
|
if _, err := io.ReadFull(tr, buf); err != nil {
|
||||||
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if expected := []byte("Kilt"); !bytes.Equal(buf, expected) {
|
||||||
|
t.Errorf("Contents = %v, want %v", buf, expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second file
|
||||||
|
hdr, err = tr.Next()
|
||||||
|
if err != nil || hdr == nil {
|
||||||
|
t.Fatalf("Didn't get second file: %v", err)
|
||||||
|
}
|
||||||
|
buf = make([]byte, 6)
|
||||||
|
if _, err := io.ReadFull(tr, buf); err != nil {
|
||||||
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if expected := []byte("Google"); !bytes.Equal(buf, expected) {
|
||||||
|
t.Errorf("Contents = %v, want %v", buf, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIncrementalRead(t *testing.T) {
|
||||||
|
test := gnuTarTest
|
||||||
|
f, err := os.Open(test.file)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
tr := NewReader(f)
|
||||||
|
|
||||||
|
headers := test.headers
|
||||||
|
cksums := test.cksums
|
||||||
|
nread := 0
|
||||||
|
|
||||||
|
// loop over all files
|
||||||
|
for ; ; nread++ {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if hdr == nil || err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the header
|
||||||
|
if !reflect.DeepEqual(*hdr, *headers[nread]) {
|
||||||
|
t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
|
||||||
|
*hdr, headers[nread])
|
||||||
|
}
|
||||||
|
|
||||||
|
// read file contents in little chunks EOF,
|
||||||
|
// checksumming all the way
|
||||||
|
h := md5.New()
|
||||||
|
rdbuf := make([]uint8, 8)
|
||||||
|
for {
|
||||||
|
nr, err := tr.Read(rdbuf)
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Read: unexpected error %v\n", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
h.Write(rdbuf[0:nr])
|
||||||
|
}
|
||||||
|
// verify checksum
|
||||||
|
have := fmt.Sprintf("%x", h.Sum(nil))
|
||||||
|
want := cksums[nread]
|
||||||
|
if want != have {
|
||||||
|
t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if nread != len(headers) {
|
||||||
|
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonSeekable(t *testing.T) {
|
||||||
|
test := gnuTarTest
|
||||||
|
f, err := os.Open(test.file)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
type readerOnly struct {
|
||||||
|
io.Reader
|
||||||
|
}
|
||||||
|
tr := NewReader(readerOnly{f})
|
||||||
|
nread := 0
|
||||||
|
|
||||||
|
for ; ; nread++ {
|
||||||
|
_, err := tr.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if nread != len(test.headers) {
|
||||||
|
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParsePAXHeader(t *testing.T) {
|
||||||
|
paxTests := [][3]string{
|
||||||
|
{"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
|
||||||
|
{"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length
|
||||||
|
{"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}}
|
||||||
|
for _, test := range paxTests {
|
||||||
|
key, expected, raw := test[0], test[1], test[2]
|
||||||
|
reader := bytes.NewReader([]byte(raw))
|
||||||
|
headers, err := parsePAX(reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Couldn't parse correctly formatted headers: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.EqualFold(headers[key], expected) {
|
||||||
|
t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
trailer := make([]byte, 100)
|
||||||
|
n, err := reader.Read(trailer)
|
||||||
|
if err != io.EOF || n != 0 {
|
||||||
|
t.Error("Buffer wasn't consumed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
badHeader := bytes.NewReader([]byte("3 somelongkey="))
|
||||||
|
if _, err := parsePAX(badHeader); err != ErrHeader {
|
||||||
|
t.Fatal("Unexpected success when parsing bad header")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParsePAXTime(t *testing.T) {
|
||||||
|
// Some valid PAX time values
|
||||||
|
timestamps := map[string]time.Time{
|
||||||
|
"1350244992.023960108": time.Unix(1350244992, 23960108), // The common case
|
||||||
|
"1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value
|
||||||
|
"1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value
|
||||||
|
"1350244992": time.Unix(1350244992, 0), // Low precision value
|
||||||
|
}
|
||||||
|
for input, expected := range timestamps {
|
||||||
|
ts, err := parsePAXTime(input)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !ts.Equal(expected) {
|
||||||
|
t.Fatalf("Time parsing failure %s %s", ts, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMergePAX(t *testing.T) {
|
||||||
|
hdr := new(Header)
|
||||||
|
// Test a string, integer, and time based value.
|
||||||
|
headers := map[string]string{
|
||||||
|
"path": "a/b/c",
|
||||||
|
"uid": "1000",
|
||||||
|
"mtime": "1350244992.023960108",
|
||||||
|
}
|
||||||
|
err := mergePAX(hdr, headers)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
want := &Header{
|
||||||
|
Name: "a/b/c",
|
||||||
|
Uid: 1000,
|
||||||
|
ModTime: time.Unix(1350244992, 23960108),
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(hdr, want) {
|
||||||
|
t.Errorf("incorrect merge: got %+v, want %+v", hdr, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSparseEndToEnd(t *testing.T) {
|
||||||
|
test := sparseTarTest
|
||||||
|
f, err := os.Open(test.file)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
tr := NewReader(f)
|
||||||
|
|
||||||
|
headers := test.headers
|
||||||
|
cksums := test.cksums
|
||||||
|
nread := 0
|
||||||
|
|
||||||
|
// loop over all files
|
||||||
|
for ; ; nread++ {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if hdr == nil || err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the header
|
||||||
|
if !reflect.DeepEqual(*hdr, *headers[nread]) {
|
||||||
|
t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
|
||||||
|
*hdr, headers[nread])
|
||||||
|
}
|
||||||
|
|
||||||
|
// read and checksum the file data
|
||||||
|
h := md5.New()
|
||||||
|
_, err = io.Copy(h, tr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify checksum
|
||||||
|
have := fmt.Sprintf("%x", h.Sum(nil))
|
||||||
|
want := cksums[nread]
|
||||||
|
if want != have {
|
||||||
|
t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if nread != len(headers) {
|
||||||
|
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type sparseFileReadTest struct {
|
||||||
|
sparseData []byte
|
||||||
|
sparseMap []sparseEntry
|
||||||
|
realSize int64
|
||||||
|
expected []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
var sparseFileReadTests = []sparseFileReadTest{
|
||||||
|
{
|
||||||
|
sparseData: []byte("abcde"),
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 0, numBytes: 2},
|
||||||
|
{offset: 5, numBytes: 3},
|
||||||
|
},
|
||||||
|
realSize: 8,
|
||||||
|
expected: []byte("ab\x00\x00\x00cde"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sparseData: []byte("abcde"),
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 0, numBytes: 2},
|
||||||
|
{offset: 5, numBytes: 3},
|
||||||
|
},
|
||||||
|
realSize: 10,
|
||||||
|
expected: []byte("ab\x00\x00\x00cde\x00\x00"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sparseData: []byte("abcde"),
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 1, numBytes: 3},
|
||||||
|
{offset: 6, numBytes: 2},
|
||||||
|
},
|
||||||
|
realSize: 8,
|
||||||
|
expected: []byte("\x00abc\x00\x00de"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sparseData: []byte("abcde"),
|
||||||
|
sparseMap: []sparseEntry{
|
||||||
|
{offset: 1, numBytes: 3},
|
||||||
|
{offset: 6, numBytes: 2},
|
||||||
|
},
|
||||||
|
realSize: 10,
|
||||||
|
expected: []byte("\x00abc\x00\x00de\x00\x00"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sparseData: []byte(""),
|
||||||
|
sparseMap: nil,
|
||||||
|
realSize: 2,
|
||||||
|
expected: []byte("\x00\x00"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSparseFileReader(t *testing.T) {
|
||||||
|
for i, test := range sparseFileReadTests {
|
||||||
|
r := bytes.NewReader(test.sparseData)
|
||||||
|
nb := int64(r.Len())
|
||||||
|
sfr := &sparseFileReader{
|
||||||
|
rfr: ®FileReader{r: r, nb: nb},
|
||||||
|
sp: test.sparseMap,
|
||||||
|
pos: 0,
|
||||||
|
tot: test.realSize,
|
||||||
|
}
|
||||||
|
if sfr.numBytes() != nb {
|
||||||
|
t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb)
|
||||||
|
}
|
||||||
|
buf, err := ioutil.ReadAll(sfr)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||||
|
}
|
||||||
|
if e := test.expected; !bytes.Equal(buf, e) {
|
||||||
|
t.Errorf("test %d: Contents = %v, want %v", i, buf, e)
|
||||||
|
}
|
||||||
|
if sfr.numBytes() != 0 {
|
||||||
|
t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSparseIncrementalRead(t *testing.T) {
|
||||||
|
sparseMap := []sparseEntry{{10, 2}}
|
||||||
|
sparseData := []byte("Go")
|
||||||
|
expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||||
|
|
||||||
|
r := bytes.NewReader(sparseData)
|
||||||
|
nb := int64(r.Len())
|
||||||
|
sfr := &sparseFileReader{
|
||||||
|
rfr: ®FileReader{r: r, nb: nb},
|
||||||
|
sp: sparseMap,
|
||||||
|
pos: 0,
|
||||||
|
tot: int64(len(expected)),
|
||||||
|
}
|
||||||
|
|
||||||
|
// We'll read the data 6 bytes at a time, with a hole of size 10 at
|
||||||
|
// the beginning and one of size 8 at the end.
|
||||||
|
var outputBuf bytes.Buffer
|
||||||
|
buf := make([]byte, 6)
|
||||||
|
for {
|
||||||
|
n, err := sfr.Read(buf)
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Read: unexpected error %v\n", err)
|
||||||
|
}
|
||||||
|
if n > 0 {
|
||||||
|
_, err := outputBuf.Write(buf[:n])
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Write: unexpected error %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
got := outputBuf.String()
|
||||||
|
if got != expected {
|
||||||
|
t.Errorf("Contents = %v, want %v", got, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadGNUSparseMap0x1(t *testing.T) {
|
||||||
|
headers := map[string]string{
|
||||||
|
paxGNUSparseNumBlocks: "4",
|
||||||
|
paxGNUSparseMap: "0,5,10,5,20,5,30,5",
|
||||||
|
}
|
||||||
|
expected := []sparseEntry{
|
||||||
|
{offset: 0, numBytes: 5},
|
||||||
|
{offset: 10, numBytes: 5},
|
||||||
|
{offset: 20, numBytes: 5},
|
||||||
|
{offset: 30, numBytes: 5},
|
||||||
|
}
|
||||||
|
|
||||||
|
sp, err := readGNUSparseMap0x1(headers)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(sp, expected) {
|
||||||
|
t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadGNUSparseMap1x0(t *testing.T) {
|
||||||
|
// This test uses lots of holes so the sparse header takes up more than two blocks
|
||||||
|
numEntries := 100
|
||||||
|
expected := make([]sparseEntry, 0, numEntries)
|
||||||
|
sparseMap := new(bytes.Buffer)
|
||||||
|
|
||||||
|
fmt.Fprintf(sparseMap, "%d\n", numEntries)
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
offset := int64(2048 * i)
|
||||||
|
numBytes := int64(1024)
|
||||||
|
expected = append(expected, sparseEntry{offset: offset, numBytes: numBytes})
|
||||||
|
fmt.Fprintf(sparseMap, "%d\n%d\n", offset, numBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the header the smallest multiple of blockSize that fits the sparseMap
|
||||||
|
headerBlocks := (sparseMap.Len() + blockSize - 1) / blockSize
|
||||||
|
bufLen := blockSize * headerBlocks
|
||||||
|
buf := make([]byte, bufLen)
|
||||||
|
copy(buf, sparseMap.Bytes())
|
||||||
|
|
||||||
|
// Get an reader to read the sparse map
|
||||||
|
r := bytes.NewReader(buf)
|
||||||
|
|
||||||
|
// Read the sparse map
|
||||||
|
sp, err := readGNUSparseMap1x0(r)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(sp, expected) {
|
||||||
|
t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUninitializedRead(t *testing.T) {
|
||||||
|
test := gnuTarTest
|
||||||
|
f, err := os.Open(test.file)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
tr := NewReader(f)
|
||||||
|
_, err = tr.Read([]byte{})
|
||||||
|
if err == nil || err != io.EOF {
|
||||||
|
t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
20
archive/tar/stat_atim.go
Normal file
20
archive/tar/stat_atim.go
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux dragonfly openbsd solaris
|
||||||
|
|
||||||
|
package tar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func statAtime(st *syscall.Stat_t) time.Time {
|
||||||
|
return time.Unix(st.Atim.Unix())
|
||||||
|
}
|
||||||
|
|
||||||
|
func statCtime(st *syscall.Stat_t) time.Time {
|
||||||
|
return time.Unix(st.Ctim.Unix())
|
||||||
|
}
|
20
archive/tar/stat_atimespec.go
Normal file
20
archive/tar/stat_atimespec.go
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build darwin freebsd netbsd
|
||||||
|
|
||||||
|
package tar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func statAtime(st *syscall.Stat_t) time.Time {
|
||||||
|
return time.Unix(st.Atimespec.Unix())
|
||||||
|
}
|
||||||
|
|
||||||
|
func statCtime(st *syscall.Stat_t) time.Time {
|
||||||
|
return time.Unix(st.Ctimespec.Unix())
|
||||||
|
}
|
32
archive/tar/stat_unix.go
Normal file
32
archive/tar/stat_unix.go
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
|
||||||
|
|
||||||
|
package tar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
sysStat = statUnix
|
||||||
|
}
|
||||||
|
|
||||||
|
func statUnix(fi os.FileInfo, h *Header) error {
|
||||||
|
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
h.Uid = int(sys.Uid)
|
||||||
|
h.Gid = int(sys.Gid)
|
||||||
|
// TODO(bradfitz): populate username & group. os/user
|
||||||
|
// doesn't cache LookupId lookups, and lacks group
|
||||||
|
// lookup functions.
|
||||||
|
h.AccessTime = statAtime(sys)
|
||||||
|
h.ChangeTime = statCtime(sys)
|
||||||
|
// TODO(bradfitz): major/minor device numbers?
|
||||||
|
return nil
|
||||||
|
}
|
284
archive/tar/tar_test.go
Normal file
284
archive/tar/tar_test.go
Normal file
|
@ -0,0 +1,284 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package tar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFileInfoHeader(t *testing.T) {
|
||||||
|
fi, err := os.Stat("testdata/small.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
h, err := FileInfoHeader(fi, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("FileInfoHeader: %v", err)
|
||||||
|
}
|
||||||
|
if g, e := h.Name, "small.txt"; g != e {
|
||||||
|
t.Errorf("Name = %q; want %q", g, e)
|
||||||
|
}
|
||||||
|
if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
|
||||||
|
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||||
|
}
|
||||||
|
if g, e := h.Size, int64(5); g != e {
|
||||||
|
t.Errorf("Size = %v; want %v", g, e)
|
||||||
|
}
|
||||||
|
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||||
|
t.Errorf("ModTime = %v; want %v", g, e)
|
||||||
|
}
|
||||||
|
// FileInfoHeader should error when passing nil FileInfo
|
||||||
|
if _, err := FileInfoHeader(nil, ""); err == nil {
|
||||||
|
t.Fatalf("Expected error when passing nil to FileInfoHeader")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFileInfoHeaderDir(t *testing.T) {
|
||||||
|
fi, err := os.Stat("testdata")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
h, err := FileInfoHeader(fi, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("FileInfoHeader: %v", err)
|
||||||
|
}
|
||||||
|
if g, e := h.Name, "testdata/"; g != e {
|
||||||
|
t.Errorf("Name = %q; want %q", g, e)
|
||||||
|
}
|
||||||
|
// Ignoring c_ISGID for golang.org/issue/4867
|
||||||
|
if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
|
||||||
|
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||||
|
}
|
||||||
|
if g, e := h.Size, int64(0); g != e {
|
||||||
|
t.Errorf("Size = %v; want %v", g, e)
|
||||||
|
}
|
||||||
|
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||||
|
t.Errorf("ModTime = %v; want %v", g, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFileInfoHeaderSymlink(t *testing.T) {
|
||||||
|
h, err := FileInfoHeader(symlink{}, "some-target")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if g, e := h.Name, "some-symlink"; g != e {
|
||||||
|
t.Errorf("Name = %q; want %q", g, e)
|
||||||
|
}
|
||||||
|
if g, e := h.Linkname, "some-target"; g != e {
|
||||||
|
t.Errorf("Linkname = %q; want %q", g, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type symlink struct{}
|
||||||
|
|
||||||
|
func (symlink) Name() string { return "some-symlink" }
|
||||||
|
func (symlink) Size() int64 { return 0 }
|
||||||
|
func (symlink) Mode() os.FileMode { return os.ModeSymlink }
|
||||||
|
func (symlink) ModTime() time.Time { return time.Time{} }
|
||||||
|
func (symlink) IsDir() bool { return false }
|
||||||
|
func (symlink) Sys() interface{} { return nil }
|
||||||
|
|
||||||
|
func TestRoundTrip(t *testing.T) {
|
||||||
|
data := []byte("some file contents")
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
tw := NewWriter(&b)
|
||||||
|
hdr := &Header{
|
||||||
|
Name: "file.txt",
|
||||||
|
Uid: 1 << 21, // too big for 8 octal digits
|
||||||
|
Size: int64(len(data)),
|
||||||
|
ModTime: time.Now(),
|
||||||
|
}
|
||||||
|
// tar only supports second precision.
|
||||||
|
hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
|
||||||
|
if err := tw.WriteHeader(hdr); err != nil {
|
||||||
|
t.Fatalf("tw.WriteHeader: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := tw.Write(data); err != nil {
|
||||||
|
t.Fatalf("tw.Write: %v", err)
|
||||||
|
}
|
||||||
|
if err := tw.Close(); err != nil {
|
||||||
|
t.Fatalf("tw.Close: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read it back.
|
||||||
|
tr := NewReader(&b)
|
||||||
|
rHdr, err := tr.Next()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("tr.Next: %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(rHdr, hdr) {
|
||||||
|
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
|
||||||
|
}
|
||||||
|
rData, err := ioutil.ReadAll(tr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Read: %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(rData, data) {
|
||||||
|
t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type headerRoundTripTest struct {
|
||||||
|
h *Header
|
||||||
|
fm os.FileMode
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHeaderRoundTrip(t *testing.T) {
|
||||||
|
golden := []headerRoundTripTest{
|
||||||
|
// regular file.
|
||||||
|
{
|
||||||
|
h: &Header{
|
||||||
|
Name: "test.txt",
|
||||||
|
Mode: 0644 | c_ISREG,
|
||||||
|
Size: 12,
|
||||||
|
ModTime: time.Unix(1360600916, 0),
|
||||||
|
Typeflag: TypeReg,
|
||||||
|
},
|
||||||
|
fm: 0644,
|
||||||
|
},
|
||||||
|
// hard link.
|
||||||
|
{
|
||||||
|
h: &Header{
|
||||||
|
Name: "hard.txt",
|
||||||
|
Mode: 0644 | c_ISLNK,
|
||||||
|
Size: 0,
|
||||||
|
ModTime: time.Unix(1360600916, 0),
|
||||||
|
Typeflag: TypeLink,
|
||||||
|
},
|
||||||
|
fm: 0644 | os.ModeSymlink,
|
||||||
|
},
|
||||||
|
// symbolic link.
|
||||||
|
{
|
||||||
|
h: &Header{
|
||||||
|
Name: "link.txt",
|
||||||
|
Mode: 0777 | c_ISLNK,
|
||||||
|
Size: 0,
|
||||||
|
ModTime: time.Unix(1360600852, 0),
|
||||||
|
Typeflag: TypeSymlink,
|
||||||
|
},
|
||||||
|
fm: 0777 | os.ModeSymlink,
|
||||||
|
},
|
||||||
|
// character device node.
|
||||||
|
{
|
||||||
|
h: &Header{
|
||||||
|
Name: "dev/null",
|
||||||
|
Mode: 0666 | c_ISCHR,
|
||||||
|
Size: 0,
|
||||||
|
ModTime: time.Unix(1360578951, 0),
|
||||||
|
Typeflag: TypeChar,
|
||||||
|
},
|
||||||
|
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
|
||||||
|
},
|
||||||
|
// block device node.
|
||||||
|
{
|
||||||
|
h: &Header{
|
||||||
|
Name: "dev/sda",
|
||||||
|
Mode: 0660 | c_ISBLK,
|
||||||
|
Size: 0,
|
||||||
|
ModTime: time.Unix(1360578954, 0),
|
||||||
|
Typeflag: TypeBlock,
|
||||||
|
},
|
||||||
|
fm: 0660 | os.ModeDevice,
|
||||||
|
},
|
||||||
|
// directory.
|
||||||
|
{
|
||||||
|
h: &Header{
|
||||||
|
Name: "dir/",
|
||||||
|
Mode: 0755 | c_ISDIR,
|
||||||
|
Size: 0,
|
||||||
|
ModTime: time.Unix(1360601116, 0),
|
||||||
|
Typeflag: TypeDir,
|
||||||
|
},
|
||||||
|
fm: 0755 | os.ModeDir,
|
||||||
|
},
|
||||||
|
// fifo node.
|
||||||
|
{
|
||||||
|
h: &Header{
|
||||||
|
Name: "dev/initctl",
|
||||||
|
Mode: 0600 | c_ISFIFO,
|
||||||
|
Size: 0,
|
||||||
|
ModTime: time.Unix(1360578949, 0),
|
||||||
|
Typeflag: TypeFifo,
|
||||||
|
},
|
||||||
|
fm: 0600 | os.ModeNamedPipe,
|
||||||
|
},
|
||||||
|
// setuid.
|
||||||
|
{
|
||||||
|
h: &Header{
|
||||||
|
Name: "bin/su",
|
||||||
|
Mode: 0755 | c_ISREG | c_ISUID,
|
||||||
|
Size: 23232,
|
||||||
|
ModTime: time.Unix(1355405093, 0),
|
||||||
|
Typeflag: TypeReg,
|
||||||
|
},
|
||||||
|
fm: 0755 | os.ModeSetuid,
|
||||||
|
},
|
||||||
|
// setguid.
|
||||||
|
{
|
||||||
|
h: &Header{
|
||||||
|
Name: "group.txt",
|
||||||
|
Mode: 0750 | c_ISREG | c_ISGID,
|
||||||
|
Size: 0,
|
||||||
|
ModTime: time.Unix(1360602346, 0),
|
||||||
|
Typeflag: TypeReg,
|
||||||
|
},
|
||||||
|
fm: 0750 | os.ModeSetgid,
|
||||||
|
},
|
||||||
|
// sticky.
|
||||||
|
{
|
||||||
|
h: &Header{
|
||||||
|
Name: "sticky.txt",
|
||||||
|
Mode: 0600 | c_ISREG | c_ISVTX,
|
||||||
|
Size: 7,
|
||||||
|
ModTime: time.Unix(1360602540, 0),
|
||||||
|
Typeflag: TypeReg,
|
||||||
|
},
|
||||||
|
fm: 0600 | os.ModeSticky,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, g := range golden {
|
||||||
|
fi := g.h.FileInfo()
|
||||||
|
h2, err := FileInfoHeader(fi, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.Contains(fi.Name(), "/") {
|
||||||
|
t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
|
||||||
|
}
|
||||||
|
name := path.Base(g.h.Name)
|
||||||
|
if fi.IsDir() {
|
||||||
|
name += "/"
|
||||||
|
}
|
||||||
|
if got, want := h2.Name, name; got != want {
|
||||||
|
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
|
||||||
|
}
|
||||||
|
if got, want := h2.Size, g.h.Size; got != want {
|
||||||
|
t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
|
||||||
|
}
|
||||||
|
if got, want := h2.Mode, g.h.Mode; got != want {
|
||||||
|
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
|
||||||
|
}
|
||||||
|
if got, want := fi.Mode(), g.fm; got != want {
|
||||||
|
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
|
||||||
|
}
|
||||||
|
if got, want := h2.ModTime, g.h.ModTime; got != want {
|
||||||
|
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
|
||||||
|
}
|
||||||
|
if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
|
||||||
|
t.Errorf("i=%d: Sys didn't return original *Header", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
BIN
archive/tar/testdata/gnu.tar
vendored
Normal file
BIN
archive/tar/testdata/gnu.tar
vendored
Normal file
Binary file not shown.
BIN
archive/tar/testdata/nil-uid.tar
vendored
Normal file
BIN
archive/tar/testdata/nil-uid.tar
vendored
Normal file
Binary file not shown.
BIN
archive/tar/testdata/pax.tar
vendored
Normal file
BIN
archive/tar/testdata/pax.tar
vendored
Normal file
Binary file not shown.
1
archive/tar/testdata/small.txt
vendored
Normal file
1
archive/tar/testdata/small.txt
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Kilts
|
1
archive/tar/testdata/small2.txt
vendored
Normal file
1
archive/tar/testdata/small2.txt
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Google.com
|
BIN
archive/tar/testdata/sparse-formats.tar
vendored
Normal file
BIN
archive/tar/testdata/sparse-formats.tar
vendored
Normal file
Binary file not shown.
BIN
archive/tar/testdata/star.tar
vendored
Normal file
BIN
archive/tar/testdata/star.tar
vendored
Normal file
Binary file not shown.
BIN
archive/tar/testdata/ustar.tar
vendored
Normal file
BIN
archive/tar/testdata/ustar.tar
vendored
Normal file
Binary file not shown.
BIN
archive/tar/testdata/v7.tar
vendored
Normal file
BIN
archive/tar/testdata/v7.tar
vendored
Normal file
Binary file not shown.
BIN
archive/tar/testdata/writer-big-long.tar
vendored
Normal file
BIN
archive/tar/testdata/writer-big-long.tar
vendored
Normal file
Binary file not shown.
BIN
archive/tar/testdata/writer-big.tar
vendored
Normal file
BIN
archive/tar/testdata/writer-big.tar
vendored
Normal file
Binary file not shown.
BIN
archive/tar/testdata/writer.tar
vendored
Normal file
BIN
archive/tar/testdata/writer.tar
vendored
Normal file
Binary file not shown.
BIN
archive/tar/testdata/xattrs.tar
vendored
Normal file
BIN
archive/tar/testdata/xattrs.tar
vendored
Normal file
Binary file not shown.
396
archive/tar/writer.go
Normal file
396
archive/tar/writer.go
Normal file
|
@ -0,0 +1,396 @@
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package tar
|
||||||
|
|
||||||
|
// TODO(dsymonds):
|
||||||
|
// - catch more errors (no first header, etc.)
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
||||||
|
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||||
|
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||||
|
errNameTooLong = errors.New("archive/tar: name too long")
|
||||||
|
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
|
||||||
|
// A tar archive consists of a sequence of files.
|
||||||
|
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
|
||||||
|
// writing at most hdr.Size bytes in total.
|
||||||
|
type Writer struct {
|
||||||
|
w io.Writer
|
||||||
|
err error
|
||||||
|
nb int64 // number of unwritten bytes for current file entry
|
||||||
|
pad int64 // amount of padding to write after current file entry
|
||||||
|
closed bool
|
||||||
|
usedBinary bool // whether the binary numeric field extension was used
|
||||||
|
preferPax bool // use pax header instead of binary numeric header
|
||||||
|
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
|
||||||
|
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriter creates a new Writer writing to w.
|
||||||
|
func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
|
||||||
|
|
||||||
|
// Flush finishes writing the current file (optional).
|
||||||
|
func (tw *Writer) Flush() error {
|
||||||
|
if tw.nb > 0 {
|
||||||
|
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
|
||||||
|
return tw.err
|
||||||
|
}
|
||||||
|
|
||||||
|
n := tw.nb + tw.pad
|
||||||
|
for n > 0 && tw.err == nil {
|
||||||
|
nr := n
|
||||||
|
if nr > blockSize {
|
||||||
|
nr = blockSize
|
||||||
|
}
|
||||||
|
var nw int
|
||||||
|
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
|
||||||
|
n -= int64(nw)
|
||||||
|
}
|
||||||
|
tw.nb = 0
|
||||||
|
tw.pad = 0
|
||||||
|
return tw.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write s into b, terminating it with a NUL if there is room.
|
||||||
|
// If the value is too long for the field and allowPax is true add a paxheader record instead
|
||||||
|
func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
|
||||||
|
needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
|
||||||
|
if needsPaxHeader {
|
||||||
|
paxHeaders[paxKeyword] = s
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(s) > len(b) {
|
||||||
|
if tw.err == nil {
|
||||||
|
tw.err = ErrFieldTooLong
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ascii := toASCII(s)
|
||||||
|
copy(b, ascii)
|
||||||
|
if len(ascii) < len(b) {
|
||||||
|
b[len(ascii)] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode x as an octal ASCII string and write it into b with leading zeros.
|
||||||
|
func (tw *Writer) octal(b []byte, x int64) {
|
||||||
|
s := strconv.FormatInt(x, 8)
|
||||||
|
// leading zeros, but leave room for a NUL.
|
||||||
|
for len(s)+1 < len(b) {
|
||||||
|
s = "0" + s
|
||||||
|
}
|
||||||
|
tw.cString(b, s, false, paxNone, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write x into b, either as octal or as binary (GNUtar/star extension).
|
||||||
|
// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
|
||||||
|
func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
|
||||||
|
// Try octal first.
|
||||||
|
s := strconv.FormatInt(x, 8)
|
||||||
|
if len(s) < len(b) {
|
||||||
|
tw.octal(b, x)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it is too long for octal, and pax is preferred, use a pax header
|
||||||
|
if allowPax && tw.preferPax {
|
||||||
|
tw.octal(b, 0)
|
||||||
|
s := strconv.FormatInt(x, 10)
|
||||||
|
paxHeaders[paxKeyword] = s
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Too big: use binary (big-endian).
|
||||||
|
tw.usedBinary = true
|
||||||
|
for i := len(b) - 1; x > 0 && i >= 0; i-- {
|
||||||
|
b[i] = byte(x)
|
||||||
|
x >>= 8
|
||||||
|
}
|
||||||
|
b[0] |= 0x80 // highest bit indicates binary format
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
minTime = time.Unix(0, 0)
|
||||||
|
// There is room for 11 octal digits (33 bits) of mtime.
|
||||||
|
maxTime = minTime.Add((1<<33 - 1) * time.Second)
|
||||||
|
)
|
||||||
|
|
||||||
|
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||||
|
// WriteHeader calls Flush if it is not the first header.
|
||||||
|
// Calling after a Close will return ErrWriteAfterClose.
|
||||||
|
func (tw *Writer) WriteHeader(hdr *Header) error {
|
||||||
|
return tw.writeHeader(hdr, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||||
|
// WriteHeader calls Flush if it is not the first header.
|
||||||
|
// Calling after a Close will return ErrWriteAfterClose.
|
||||||
|
// As this method is called internally by writePax header to allow it to
|
||||||
|
// suppress writing the pax header.
|
||||||
|
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||||
|
if tw.closed {
|
||||||
|
return ErrWriteAfterClose
|
||||||
|
}
|
||||||
|
if tw.err == nil {
|
||||||
|
tw.Flush()
|
||||||
|
}
|
||||||
|
if tw.err != nil {
|
||||||
|
return tw.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// a map to hold pax header records, if any are needed
|
||||||
|
paxHeaders := make(map[string]string)
|
||||||
|
|
||||||
|
// TODO(shanemhansen): we might want to use PAX headers for
|
||||||
|
// subsecond time resolution, but for now let's just capture
|
||||||
|
// too long fields or non ascii characters
|
||||||
|
|
||||||
|
var header []byte
|
||||||
|
|
||||||
|
// We need to select which scratch buffer to use carefully,
|
||||||
|
// since this method is called recursively to write PAX headers.
|
||||||
|
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
|
||||||
|
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
|
||||||
|
// already being used by the non-recursive call, so we must use paxHdrBuff.
|
||||||
|
header = tw.hdrBuff[:]
|
||||||
|
if !allowPax {
|
||||||
|
header = tw.paxHdrBuff[:]
|
||||||
|
}
|
||||||
|
copy(header, zeroBlock)
|
||||||
|
s := slicer(header)
|
||||||
|
|
||||||
|
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||||
|
pathHeaderBytes := s.next(fileNameSize)
|
||||||
|
|
||||||
|
tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
|
||||||
|
|
||||||
|
// Handle out of range ModTime carefully.
|
||||||
|
var modTime int64
|
||||||
|
if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
|
||||||
|
modTime = hdr.ModTime.Unix()
|
||||||
|
}
|
||||||
|
|
||||||
|
tw.octal(s.next(8), hdr.Mode) // 100:108
|
||||||
|
tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
|
||||||
|
tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
|
||||||
|
tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136
|
||||||
|
tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity
|
||||||
|
s.next(8) // chksum (148:156)
|
||||||
|
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||||
|
|
||||||
|
tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
|
||||||
|
|
||||||
|
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||||
|
tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
|
||||||
|
tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
|
||||||
|
tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337
|
||||||
|
tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345
|
||||||
|
|
||||||
|
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||||
|
prefixHeaderBytes := s.next(155)
|
||||||
|
tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix
|
||||||
|
|
||||||
|
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
|
||||||
|
if tw.usedBinary {
|
||||||
|
copy(header[257:265], []byte("ustar \x00"))
|
||||||
|
}
|
||||||
|
|
||||||
|
_, paxPathUsed := paxHeaders[paxPath]
|
||||||
|
// try to use a ustar header when only the name is too long
|
||||||
|
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||||
|
suffix := hdr.Name
|
||||||
|
prefix := ""
|
||||||
|
if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
|
||||||
|
var err error
|
||||||
|
prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
|
||||||
|
if err == nil {
|
||||||
|
// ok we can use a ustar long name instead of pax, now correct the fields
|
||||||
|
|
||||||
|
// remove the path field from the pax header. this will suppress the pax header
|
||||||
|
delete(paxHeaders, paxPath)
|
||||||
|
|
||||||
|
// update the path fields
|
||||||
|
tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
|
||||||
|
tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
|
||||||
|
|
||||||
|
// Use the ustar magic if we used ustar long names.
|
||||||
|
if len(prefix) > 0 && !tw.usedBinary {
|
||||||
|
copy(header[257:265], []byte("ustar\x00"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The chksum field is terminated by a NUL and a space.
|
||||||
|
// This is different from the other octal fields.
|
||||||
|
chksum, _ := checksum(header)
|
||||||
|
tw.octal(header[148:155], chksum)
|
||||||
|
header[155] = ' '
|
||||||
|
|
||||||
|
if tw.err != nil {
|
||||||
|
// problem with header; probably integer too big for a field.
|
||||||
|
return tw.err
|
||||||
|
}
|
||||||
|
|
||||||
|
if allowPax {
|
||||||
|
for k, v := range hdr.Xattrs {
|
||||||
|
paxHeaders[paxXattr+k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(paxHeaders) > 0 {
|
||||||
|
if !allowPax {
|
||||||
|
return errInvalidHeader
|
||||||
|
}
|
||||||
|
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tw.nb = int64(hdr.Size)
|
||||||
|
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
|
||||||
|
|
||||||
|
_, tw.err = tw.w.Write(header)
|
||||||
|
return tw.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeUSTARLongName splits a USTAR long name hdr.Name.
|
||||||
|
// name must be < 256 characters. errNameTooLong is returned
|
||||||
|
// if hdr.Name can't be split. The splitting heuristic
|
||||||
|
// is compatible with gnu tar.
|
||||||
|
func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
|
||||||
|
length := len(name)
|
||||||
|
if length > fileNamePrefixSize+1 {
|
||||||
|
length = fileNamePrefixSize + 1
|
||||||
|
} else if name[length-1] == '/' {
|
||||||
|
length--
|
||||||
|
}
|
||||||
|
i := strings.LastIndex(name[:length], "/")
|
||||||
|
// nlen contains the resulting length in the name field.
|
||||||
|
// plen contains the resulting length in the prefix field.
|
||||||
|
nlen := len(name) - i - 1
|
||||||
|
plen := i
|
||||||
|
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||||
|
err = errNameTooLong
|
||||||
|
return
|
||||||
|
}
|
||||||
|
prefix, suffix = name[:i], name[i+1:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// writePaxHeader writes an extended pax header to the
|
||||||
|
// archive.
|
||||||
|
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
|
||||||
|
// Prepare extended header
|
||||||
|
ext := new(Header)
|
||||||
|
ext.Typeflag = TypeXHeader
|
||||||
|
// Setting ModTime is required for reader parsing to
|
||||||
|
// succeed, and seems harmless enough.
|
||||||
|
ext.ModTime = hdr.ModTime
|
||||||
|
// The spec asks that we namespace our pseudo files
|
||||||
|
// with the current pid.
|
||||||
|
pid := os.Getpid()
|
||||||
|
dir, file := path.Split(hdr.Name)
|
||||||
|
fullName := path.Join(dir,
|
||||||
|
fmt.Sprintf("PaxHeaders.%d", pid), file)
|
||||||
|
|
||||||
|
ascii := toASCII(fullName)
|
||||||
|
if len(ascii) > 100 {
|
||||||
|
ascii = ascii[:100]
|
||||||
|
}
|
||||||
|
ext.Name = ascii
|
||||||
|
// Construct the body
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
for k, v := range paxHeaders {
|
||||||
|
fmt.Fprint(&buf, paxHeader(k+"="+v))
|
||||||
|
}
|
||||||
|
|
||||||
|
ext.Size = int64(len(buf.Bytes()))
|
||||||
|
if err := tw.writeHeader(ext, false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := tw.Write(buf.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := tw.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// paxHeader formats a single pax record, prefixing it with the appropriate length
|
||||||
|
func paxHeader(msg string) string {
|
||||||
|
const padding = 2 // Extra padding for space and newline
|
||||||
|
size := len(msg) + padding
|
||||||
|
size += len(strconv.Itoa(size))
|
||||||
|
record := fmt.Sprintf("%d %s\n", size, msg)
|
||||||
|
if len(record) != size {
|
||||||
|
// Final adjustment if adding size increased
|
||||||
|
// the number of digits in size
|
||||||
|
size = len(record)
|
||||||
|
record = fmt.Sprintf("%d %s\n", size, msg)
|
||||||
|
}
|
||||||
|
return record
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes to the current entry in the tar archive.
|
||||||
|
// Write returns the error ErrWriteTooLong if more than
|
||||||
|
// hdr.Size bytes are written after WriteHeader.
|
||||||
|
func (tw *Writer) Write(b []byte) (n int, err error) {
|
||||||
|
if tw.closed {
|
||||||
|
err = ErrWriteTooLong
|
||||||
|
return
|
||||||
|
}
|
||||||
|
overwrite := false
|
||||||
|
if int64(len(b)) > tw.nb {
|
||||||
|
b = b[0:tw.nb]
|
||||||
|
overwrite = true
|
||||||
|
}
|
||||||
|
n, err = tw.w.Write(b)
|
||||||
|
tw.nb -= int64(n)
|
||||||
|
if err == nil && overwrite {
|
||||||
|
err = ErrWriteTooLong
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tw.err = err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the tar archive, flushing any unwritten
|
||||||
|
// data to the underlying writer.
|
||||||
|
func (tw *Writer) Close() error {
|
||||||
|
if tw.err != nil || tw.closed {
|
||||||
|
return tw.err
|
||||||
|
}
|
||||||
|
tw.Flush()
|
||||||
|
tw.closed = true
|
||||||
|
if tw.err != nil {
|
||||||
|
return tw.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// trailer: two zero blocks
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
_, tw.err = tw.w.Write(zeroBlock)
|
||||||
|
if tw.err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tw.err
|
||||||
|
}
|
491
archive/tar/writer_test.go
Normal file
491
archive/tar/writer_test.go
Normal file
|
@ -0,0 +1,491 @@
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package tar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"testing/iotest"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type writerTestEntry struct {
|
||||||
|
header *Header
|
||||||
|
contents string
|
||||||
|
}
|
||||||
|
|
||||||
|
type writerTest struct {
|
||||||
|
file string // filename of expected output
|
||||||
|
entries []*writerTestEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
var writerTests = []*writerTest{
|
||||||
|
// The writer test file was produced with this command:
|
||||||
|
// tar (GNU tar) 1.26
|
||||||
|
// ln -s small.txt link.txt
|
||||||
|
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
|
||||||
|
{
|
||||||
|
file: "testdata/writer.tar",
|
||||||
|
entries: []*writerTestEntry{
|
||||||
|
{
|
||||||
|
header: &Header{
|
||||||
|
Name: "small.txt",
|
||||||
|
Mode: 0640,
|
||||||
|
Uid: 73025,
|
||||||
|
Gid: 5000,
|
||||||
|
Size: 5,
|
||||||
|
ModTime: time.Unix(1246508266, 0),
|
||||||
|
Typeflag: '0',
|
||||||
|
Uname: "dsymonds",
|
||||||
|
Gname: "eng",
|
||||||
|
},
|
||||||
|
contents: "Kilts",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: &Header{
|
||||||
|
Name: "small2.txt",
|
||||||
|
Mode: 0640,
|
||||||
|
Uid: 73025,
|
||||||
|
Gid: 5000,
|
||||||
|
Size: 11,
|
||||||
|
ModTime: time.Unix(1245217492, 0),
|
||||||
|
Typeflag: '0',
|
||||||
|
Uname: "dsymonds",
|
||||||
|
Gname: "eng",
|
||||||
|
},
|
||||||
|
contents: "Google.com\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
header: &Header{
|
||||||
|
Name: "link.txt",
|
||||||
|
Mode: 0777,
|
||||||
|
Uid: 1000,
|
||||||
|
Gid: 1000,
|
||||||
|
Size: 0,
|
||||||
|
ModTime: time.Unix(1314603082, 0),
|
||||||
|
Typeflag: '2',
|
||||||
|
Linkname: "small.txt",
|
||||||
|
Uname: "strings",
|
||||||
|
Gname: "strings",
|
||||||
|
},
|
||||||
|
// no contents
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// The truncated test file was produced using these commands:
|
||||||
|
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
|
||||||
|
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
|
||||||
|
{
|
||||||
|
file: "testdata/writer-big.tar",
|
||||||
|
entries: []*writerTestEntry{
|
||||||
|
{
|
||||||
|
header: &Header{
|
||||||
|
Name: "tmp/16gig.txt",
|
||||||
|
Mode: 0640,
|
||||||
|
Uid: 73025,
|
||||||
|
Gid: 5000,
|
||||||
|
Size: 16 << 30,
|
||||||
|
ModTime: time.Unix(1254699560, 0),
|
||||||
|
Typeflag: '0',
|
||||||
|
Uname: "dsymonds",
|
||||||
|
Gname: "eng",
|
||||||
|
},
|
||||||
|
// fake contents
|
||||||
|
contents: strings.Repeat("\x00", 4<<10),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// The truncated test file was produced using these commands:
|
||||||
|
// dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
|
||||||
|
// tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
|
||||||
|
{
|
||||||
|
file: "testdata/writer-big-long.tar",
|
||||||
|
entries: []*writerTestEntry{
|
||||||
|
{
|
||||||
|
header: &Header{
|
||||||
|
Name: strings.Repeat("longname/", 15) + "16gig.txt",
|
||||||
|
Mode: 0644,
|
||||||
|
Uid: 1000,
|
||||||
|
Gid: 1000,
|
||||||
|
Size: 16 << 30,
|
||||||
|
ModTime: time.Unix(1399583047, 0),
|
||||||
|
Typeflag: '0',
|
||||||
|
Uname: "guillaume",
|
||||||
|
Gname: "guillaume",
|
||||||
|
},
|
||||||
|
// fake contents
|
||||||
|
contents: strings.Repeat("\x00", 4<<10),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// This file was produced using gnu tar 1.17
|
||||||
|
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
|
||||||
|
{
|
||||||
|
file: "testdata/ustar.tar",
|
||||||
|
entries: []*writerTestEntry{
|
||||||
|
{
|
||||||
|
header: &Header{
|
||||||
|
Name: strings.Repeat("longname/", 15) + "file.txt",
|
||||||
|
Mode: 0644,
|
||||||
|
Uid: 0765,
|
||||||
|
Gid: 024,
|
||||||
|
Size: 06,
|
||||||
|
ModTime: time.Unix(1360135598, 0),
|
||||||
|
Typeflag: '0',
|
||||||
|
Uname: "shane",
|
||||||
|
Gname: "staff",
|
||||||
|
},
|
||||||
|
contents: "hello\n",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
|
||||||
|
func bytestr(offset int, b []byte) string {
|
||||||
|
const rowLen = 32
|
||||||
|
s := fmt.Sprintf("%04x ", offset)
|
||||||
|
for _, ch := range b {
|
||||||
|
switch {
|
||||||
|
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
|
||||||
|
s += fmt.Sprintf(" %c", ch)
|
||||||
|
default:
|
||||||
|
s += fmt.Sprintf(" %02x", ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render a pseudo-diff between two blocks of bytes.
|
||||||
|
func bytediff(a []byte, b []byte) string {
|
||||||
|
const rowLen = 32
|
||||||
|
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
|
||||||
|
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
|
||||||
|
na, nb := rowLen, rowLen
|
||||||
|
if na > len(a) {
|
||||||
|
na = len(a)
|
||||||
|
}
|
||||||
|
if nb > len(b) {
|
||||||
|
nb = len(b)
|
||||||
|
}
|
||||||
|
sa := bytestr(offset, a[0:na])
|
||||||
|
sb := bytestr(offset, b[0:nb])
|
||||||
|
if sa != sb {
|
||||||
|
s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
|
||||||
|
}
|
||||||
|
a = a[na:]
|
||||||
|
b = b[nb:]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriter(t *testing.T) {
|
||||||
|
testLoop:
|
||||||
|
for i, test := range writerTests {
|
||||||
|
expected, err := ioutil.ReadFile(test.file)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
|
||||||
|
big := false
|
||||||
|
for j, entry := range test.entries {
|
||||||
|
big = big || entry.header.Size > 1<<10
|
||||||
|
if err := tw.WriteHeader(entry.header); err != nil {
|
||||||
|
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
|
||||||
|
continue testLoop
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(tw, entry.contents); err != nil {
|
||||||
|
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
|
||||||
|
continue testLoop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Only interested in Close failures for the small tests.
|
||||||
|
if err := tw.Close(); err != nil && !big {
|
||||||
|
t.Errorf("test %d: Failed closing archive: %v", i, err)
|
||||||
|
continue testLoop
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := buf.Bytes()
|
||||||
|
if !bytes.Equal(expected, actual) {
|
||||||
|
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
|
||||||
|
i, bytediff(expected, actual))
|
||||||
|
}
|
||||||
|
if testing.Short() { // The second test is expensive.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPax(t *testing.T) {
|
||||||
|
// Create an archive with a large name
|
||||||
|
fileinfo, err := os.Stat("testdata/small.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
hdr, err := FileInfoHeader(fileinfo, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("os.Stat: %v", err)
|
||||||
|
}
|
||||||
|
// Force a PAX long name to be written
|
||||||
|
longName := strings.Repeat("ab", 100)
|
||||||
|
contents := strings.Repeat(" ", int(hdr.Size))
|
||||||
|
hdr.Name = longName
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := NewWriter(&buf)
|
||||||
|
if err := writer.WriteHeader(hdr); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// Simple test to make sure PAX extensions are in effect
|
||||||
|
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
|
||||||
|
t.Fatal("Expected at least one PAX header to be written.")
|
||||||
|
}
|
||||||
|
// Test that we can get a long name back out of the archive.
|
||||||
|
reader := NewReader(&buf)
|
||||||
|
hdr, err = reader.Next()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if hdr.Name != longName {
|
||||||
|
t.Fatal("Couldn't recover long file name")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPaxSymlink(t *testing.T) {
|
||||||
|
// Create an archive with a large linkname
|
||||||
|
fileinfo, err := os.Stat("testdata/small.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
hdr, err := FileInfoHeader(fileinfo, "")
|
||||||
|
hdr.Typeflag = TypeSymlink
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("os.Stat:1 %v", err)
|
||||||
|
}
|
||||||
|
// Force a PAX long linkname to be written
|
||||||
|
longLinkname := strings.Repeat("1234567890/1234567890", 10)
|
||||||
|
hdr.Linkname = longLinkname
|
||||||
|
|
||||||
|
hdr.Size = 0
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := NewWriter(&buf)
|
||||||
|
if err := writer.WriteHeader(hdr); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// Simple test to make sure PAX extensions are in effect
|
||||||
|
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
|
||||||
|
t.Fatal("Expected at least one PAX header to be written.")
|
||||||
|
}
|
||||||
|
// Test that we can get a long name back out of the archive.
|
||||||
|
reader := NewReader(&buf)
|
||||||
|
hdr, err = reader.Next()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if hdr.Linkname != longLinkname {
|
||||||
|
t.Fatal("Couldn't recover long link name")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPaxNonAscii(t *testing.T) {
|
||||||
|
// Create an archive with non ascii. These should trigger a pax header
|
||||||
|
// because pax headers have a defined utf-8 encoding.
|
||||||
|
fileinfo, err := os.Stat("testdata/small.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr, err := FileInfoHeader(fileinfo, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("os.Stat:1 %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// some sample data
|
||||||
|
chineseFilename := "文件名"
|
||||||
|
chineseGroupname := "組"
|
||||||
|
chineseUsername := "用戶名"
|
||||||
|
|
||||||
|
hdr.Name = chineseFilename
|
||||||
|
hdr.Gname = chineseGroupname
|
||||||
|
hdr.Uname = chineseUsername
|
||||||
|
|
||||||
|
contents := strings.Repeat(" ", int(hdr.Size))
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := NewWriter(&buf)
|
||||||
|
if err := writer.WriteHeader(hdr); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// Simple test to make sure PAX extensions are in effect
|
||||||
|
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
|
||||||
|
t.Fatal("Expected at least one PAX header to be written.")
|
||||||
|
}
|
||||||
|
// Test that we can get a long name back out of the archive.
|
||||||
|
reader := NewReader(&buf)
|
||||||
|
hdr, err = reader.Next()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if hdr.Name != chineseFilename {
|
||||||
|
t.Fatal("Couldn't recover unicode name")
|
||||||
|
}
|
||||||
|
if hdr.Gname != chineseGroupname {
|
||||||
|
t.Fatal("Couldn't recover unicode group")
|
||||||
|
}
|
||||||
|
if hdr.Uname != chineseUsername {
|
||||||
|
t.Fatal("Couldn't recover unicode user")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPaxXattrs(t *testing.T) {
|
||||||
|
xattrs := map[string]string{
|
||||||
|
"user.key": "value",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create an archive with an xattr
|
||||||
|
fileinfo, err := os.Stat("testdata/small.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
hdr, err := FileInfoHeader(fileinfo, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("os.Stat: %v", err)
|
||||||
|
}
|
||||||
|
contents := "Kilts"
|
||||||
|
hdr.Xattrs = xattrs
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := NewWriter(&buf)
|
||||||
|
if err := writer.WriteHeader(hdr); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// Test that we can get the xattrs back out of the archive.
|
||||||
|
reader := NewReader(&buf)
|
||||||
|
hdr, err = reader.Next()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
|
||||||
|
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
|
||||||
|
hdr.Xattrs, xattrs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPAXHeader(t *testing.T) {
|
||||||
|
medName := strings.Repeat("CD", 50)
|
||||||
|
longName := strings.Repeat("AB", 100)
|
||||||
|
paxTests := [][2]string{
|
||||||
|
{paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"},
|
||||||
|
{"a=b", "6 a=b\n"}, // Single digit length
|
||||||
|
{"a=names", "11 a=names\n"}, // Test case involving carries
|
||||||
|
{paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)},
|
||||||
|
{paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}}
|
||||||
|
|
||||||
|
for _, test := range paxTests {
|
||||||
|
key, expected := test[0], test[1]
|
||||||
|
if result := paxHeader(key); result != expected {
|
||||||
|
t.Fatalf("paxHeader: got %s, expected %s", result, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUSTARLongName(t *testing.T) {
|
||||||
|
// Create an archive with a path that failed to split with USTAR extension in previous versions.
|
||||||
|
fileinfo, err := os.Stat("testdata/small.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
hdr, err := FileInfoHeader(fileinfo, "")
|
||||||
|
hdr.Typeflag = TypeDir
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("os.Stat:1 %v", err)
|
||||||
|
}
|
||||||
|
// Force a PAX long name to be written. The name was taken from a practical example
|
||||||
|
// that fails and replaced ever char through numbers to anonymize the sample.
|
||||||
|
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
|
||||||
|
hdr.Name = longName
|
||||||
|
|
||||||
|
hdr.Size = 0
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := NewWriter(&buf)
|
||||||
|
if err := writer.WriteHeader(hdr); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// Test that we can get a long name back out of the archive.
|
||||||
|
reader := NewReader(&buf)
|
||||||
|
hdr, err = reader.Next()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if hdr.Name != longName {
|
||||||
|
t.Fatal("Couldn't recover long name")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidTypeflagWithPAXHeader(t *testing.T) {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
tw := NewWriter(&buffer)
|
||||||
|
|
||||||
|
fileName := strings.Repeat("ab", 100)
|
||||||
|
|
||||||
|
hdr := &Header{
|
||||||
|
Name: fileName,
|
||||||
|
Size: 4,
|
||||||
|
Typeflag: 0,
|
||||||
|
}
|
||||||
|
if err := tw.WriteHeader(hdr); err != nil {
|
||||||
|
t.Fatalf("Failed to write header: %s", err)
|
||||||
|
}
|
||||||
|
if _, err := tw.Write([]byte("fooo")); err != nil {
|
||||||
|
t.Fatalf("Failed to write the file's data: %s", err)
|
||||||
|
}
|
||||||
|
tw.Close()
|
||||||
|
|
||||||
|
tr := NewReader(&buffer)
|
||||||
|
|
||||||
|
for {
|
||||||
|
header, err := tr.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read header: %s", err)
|
||||||
|
}
|
||||||
|
if header.Typeflag != 0 {
|
||||||
|
t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in a new issue