Merge pull request #1271 from aaronlehmann/remove-tarsum
Remove tarsum support for digest package
This commit is contained in:
commit
9162760443
47 changed files with 90 additions and 2358 deletions
5
Godeps/Godeps.json
generated
5
Godeps/Godeps.json
generated
|
@ -83,11 +83,6 @@
|
|||
"ImportPath": "github.com/denverdino/aliyungo/util",
|
||||
"Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/tarsum",
|
||||
"Comment": "v1.4.1-3932-gb63ec6e",
|
||||
"Rev": "b63ec6e4b1f6f5c77a6a74a52fcea9564538c575"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/libtrust",
|
||||
"Rev": "fa567046d9b14f6aa788882a950d69651d230b21"
|
||||
|
|
20
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go
generated
vendored
20
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
package tarsum
|
||||
|
||||
// This interface extends TarSum by adding the Remove method. In general
|
||||
// there was concern about adding this method to TarSum itself so instead
|
||||
// it is being added just to "BuilderContext" which will then only be used
|
||||
// during the .dockerignore file processing - see builder/evaluator.go
|
||||
type BuilderContext interface {
|
||||
TarSum
|
||||
Remove(string)
|
||||
}
|
||||
|
||||
func (bc *tarSum) Remove(filename string) {
|
||||
for i, fis := range bc.sums {
|
||||
if fis.Name() == filename {
|
||||
bc.sums = append(bc.sums[:i], bc.sums[i+1:]...)
|
||||
// Note, we don't just return because there could be
|
||||
// more than one with this name
|
||||
}
|
||||
}
|
||||
}
|
63
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go
generated
vendored
63
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go
generated
vendored
|
@ -1,63 +0,0 @@
|
|||
package tarsum
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing
|
||||
func TestTarSumRemoveNonExistent(t *testing.T) {
|
||||
filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar"
|
||||
reader, err := os.Open(filename)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ts, err := NewTarSum(reader, false, Version0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read and discard bytes so that it populates sums
|
||||
_, err = io.Copy(ioutil.Discard, ts)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read from %s: %s", filename, err)
|
||||
}
|
||||
|
||||
expected := len(ts.GetSums())
|
||||
|
||||
ts.(BuilderContext).Remove("")
|
||||
ts.(BuilderContext).Remove("Anything")
|
||||
|
||||
if len(ts.GetSums()) != expected {
|
||||
t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums())
|
||||
}
|
||||
}
|
||||
|
||||
// Remove a tarsum (in the BuilderContext)
|
||||
func TestTarSumRemove(t *testing.T) {
|
||||
filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar"
|
||||
reader, err := os.Open(filename)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ts, err := NewTarSum(reader, false, Version0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read and discard bytes so that it populates sums
|
||||
_, err = io.Copy(ioutil.Discard, ts)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read from %s: %s", filename, err)
|
||||
}
|
||||
|
||||
expected := len(ts.GetSums()) - 1
|
||||
|
||||
ts.(BuilderContext).Remove("etc/sudoers")
|
||||
|
||||
if len(ts.GetSums()) != expected {
|
||||
t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums()))
|
||||
}
|
||||
}
|
116
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go
generated
vendored
116
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go
generated
vendored
|
@ -1,116 +0,0 @@
|
|||
package tarsum
|
||||
|
||||
import "sort"
|
||||
|
||||
// This info will be accessed through interface so the actual name and sum cannot be medled with
|
||||
type FileInfoSumInterface interface {
|
||||
// File name
|
||||
Name() string
|
||||
// Checksum of this particular file and its headers
|
||||
Sum() string
|
||||
// Position of file in the tar
|
||||
Pos() int64
|
||||
}
|
||||
|
||||
type fileInfoSum struct {
|
||||
name string
|
||||
sum string
|
||||
pos int64
|
||||
}
|
||||
|
||||
func (fis fileInfoSum) Name() string {
|
||||
return fis.name
|
||||
}
|
||||
func (fis fileInfoSum) Sum() string {
|
||||
return fis.sum
|
||||
}
|
||||
func (fis fileInfoSum) Pos() int64 {
|
||||
return fis.pos
|
||||
}
|
||||
|
||||
type FileInfoSums []FileInfoSumInterface
|
||||
|
||||
// GetFile returns the first FileInfoSumInterface with a matching name
|
||||
func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface {
|
||||
for i := range fis {
|
||||
if fis[i].Name() == name {
|
||||
return fis[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAllFile returns a FileInfoSums with all matching names
|
||||
func (fis FileInfoSums) GetAllFile(name string) FileInfoSums {
|
||||
f := FileInfoSums{}
|
||||
for i := range fis {
|
||||
if fis[i].Name() == name {
|
||||
f = append(f, fis[i])
|
||||
}
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) {
|
||||
seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map.
|
||||
for i := range fis {
|
||||
f := fis[i]
|
||||
if _, ok := seen[f.Name()]; ok {
|
||||
dups = append(dups, f)
|
||||
} else {
|
||||
seen[f.Name()] = 0
|
||||
}
|
||||
}
|
||||
return dups
|
||||
}
|
||||
|
||||
func (fis FileInfoSums) Len() int { return len(fis) }
|
||||
func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] }
|
||||
|
||||
func (fis FileInfoSums) SortByPos() {
|
||||
sort.Sort(byPos{fis})
|
||||
}
|
||||
|
||||
func (fis FileInfoSums) SortByNames() {
|
||||
sort.Sort(byName{fis})
|
||||
}
|
||||
|
||||
func (fis FileInfoSums) SortBySums() {
|
||||
dups := fis.GetDuplicatePaths()
|
||||
if len(dups) > 0 {
|
||||
sort.Sort(bySum{fis, dups})
|
||||
} else {
|
||||
sort.Sort(bySum{fis, nil})
|
||||
}
|
||||
}
|
||||
|
||||
// byName is a sort.Sort helper for sorting by file names.
|
||||
// If names are the same, order them by their appearance in the tar archive
|
||||
type byName struct{ FileInfoSums }
|
||||
|
||||
func (bn byName) Less(i, j int) bool {
|
||||
if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() {
|
||||
return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos()
|
||||
}
|
||||
return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name()
|
||||
}
|
||||
|
||||
// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive
|
||||
type bySum struct {
|
||||
FileInfoSums
|
||||
dups FileInfoSums
|
||||
}
|
||||
|
||||
func (bs bySum) Less(i, j int) bool {
|
||||
if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() {
|
||||
return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos()
|
||||
}
|
||||
return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum()
|
||||
}
|
||||
|
||||
// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order
|
||||
type byPos struct{ FileInfoSums }
|
||||
|
||||
func (bp byPos) Less(i, j int) bool {
|
||||
return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos()
|
||||
}
|
62
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go
generated
vendored
62
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go
generated
vendored
|
@ -1,62 +0,0 @@
|
|||
package tarsum
|
||||
|
||||
import "testing"
|
||||
|
||||
func newFileInfoSums() FileInfoSums {
|
||||
return FileInfoSums{
|
||||
fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2},
|
||||
fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5},
|
||||
fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0},
|
||||
fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3},
|
||||
fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4},
|
||||
fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1},
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortFileInfoSums(t *testing.T) {
|
||||
dups := newFileInfoSums().GetAllFile("dup1")
|
||||
if len(dups) != 2 {
|
||||
t.Errorf("expected length 2, got %d", len(dups))
|
||||
}
|
||||
dups.SortByNames()
|
||||
if dups[0].Pos() != 4 {
|
||||
t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos())
|
||||
}
|
||||
|
||||
fis := newFileInfoSums()
|
||||
expected := "0abcdef1234567890"
|
||||
fis.SortBySums()
|
||||
got := fis[0].Sum()
|
||||
if got != expected {
|
||||
t.Errorf("Expected %q, got %q", expected, got)
|
||||
}
|
||||
|
||||
fis = newFileInfoSums()
|
||||
expected = "dup1"
|
||||
fis.SortByNames()
|
||||
gotFis := fis[0]
|
||||
if gotFis.Name() != expected {
|
||||
t.Errorf("Expected %q, got %q", expected, gotFis.Name())
|
||||
}
|
||||
// since a duplicate is first, ensure it is ordered first by position too
|
||||
if gotFis.Pos() != 4 {
|
||||
t.Errorf("Expected %d, got %d", 4, gotFis.Pos())
|
||||
}
|
||||
|
||||
fis = newFileInfoSums()
|
||||
fis.SortByPos()
|
||||
if fis[0].Pos() != 0 {
|
||||
t.Errorf("sorted fileInfoSums by Pos should order them by position.")
|
||||
}
|
||||
|
||||
fis = newFileInfoSums()
|
||||
expected = "deadbeef1"
|
||||
gotFileInfoSum := fis.GetFile("dup1")
|
||||
if gotFileInfoSum.Sum() != expected {
|
||||
t.Errorf("Expected %q, got %q", expected, gotFileInfoSum)
|
||||
}
|
||||
if fis.GetFile("noPresent") != nil {
|
||||
t.Errorf("Should have return nil if name not found.")
|
||||
}
|
||||
|
||||
}
|
276
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go
generated
vendored
276
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go
generated
vendored
|
@ -1,276 +0,0 @@
|
|||
package tarsum
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
buf8K = 8 * 1024
|
||||
buf16K = 16 * 1024
|
||||
buf32K = 32 * 1024
|
||||
)
|
||||
|
||||
// NewTarSum creates a new interface for calculating a fixed time checksum of a
|
||||
// tar archive.
|
||||
//
|
||||
// This is used for calculating checksums of layers of an image, in some cases
|
||||
// including the byte payload of the image's json metadata as well, and for
|
||||
// calculating the checksums for buildcache.
|
||||
func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) {
|
||||
return NewTarSumHash(r, dc, v, DefaultTHash)
|
||||
}
|
||||
|
||||
// Create a new TarSum, providing a THash to use rather than the DefaultTHash
|
||||
func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) {
|
||||
headerSelector, err := getTarHeaderSelector(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}
|
||||
err = ts.initTarSum()
|
||||
return ts, err
|
||||
}
|
||||
|
||||
// Create a new TarSum using the provided TarSum version+hash label.
|
||||
func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) {
|
||||
parts := strings.SplitN(label, "+", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}")
|
||||
}
|
||||
|
||||
versionName, hashName := parts[0], parts[1]
|
||||
|
||||
version, ok := tarSumVersionsByName[versionName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown TarSum version name: %q", versionName)
|
||||
}
|
||||
|
||||
hashConfig, ok := standardHashConfigs[hashName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName)
|
||||
}
|
||||
|
||||
tHash := NewTHash(hashConfig.name, hashConfig.hash.New)
|
||||
|
||||
return NewTarSumHash(r, disableCompression, version, tHash)
|
||||
}
|
||||
|
||||
// TarSum is the generic interface for calculating fixed time
|
||||
// checksums of a tar archive
|
||||
type TarSum interface {
|
||||
io.Reader
|
||||
GetSums() FileInfoSums
|
||||
Sum([]byte) string
|
||||
Version() Version
|
||||
Hash() THash
|
||||
}
|
||||
|
||||
// tarSum struct is the structure for a Version0 checksum calculation
|
||||
type tarSum struct {
|
||||
io.Reader
|
||||
tarR *tar.Reader
|
||||
tarW *tar.Writer
|
||||
writer writeCloseFlusher
|
||||
bufTar *bytes.Buffer
|
||||
bufWriter *bytes.Buffer
|
||||
bufData []byte
|
||||
h hash.Hash
|
||||
tHash THash
|
||||
sums FileInfoSums
|
||||
fileCounter int64
|
||||
currentFile string
|
||||
finished bool
|
||||
first bool
|
||||
DisableCompression bool // false by default. When false, the output gzip compressed.
|
||||
tarSumVersion Version // this field is not exported so it can not be mutated during use
|
||||
headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive
|
||||
}
|
||||
|
||||
func (ts tarSum) Hash() THash {
|
||||
return ts.tHash
|
||||
}
|
||||
|
||||
func (ts tarSum) Version() Version {
|
||||
return ts.tarSumVersion
|
||||
}
|
||||
|
||||
// A hash.Hash type generator and its name
|
||||
type THash interface {
|
||||
Hash() hash.Hash
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Convenience method for creating a THash
|
||||
func NewTHash(name string, h func() hash.Hash) THash {
|
||||
return simpleTHash{n: name, h: h}
|
||||
}
|
||||
|
||||
type tHashConfig struct {
|
||||
name string
|
||||
hash crypto.Hash
|
||||
}
|
||||
|
||||
var (
|
||||
// NOTE: DO NOT include MD5 or SHA1, which are considered insecure.
|
||||
standardHashConfigs = map[string]tHashConfig{
|
||||
"sha256": {name: "sha256", hash: crypto.SHA256},
|
||||
"sha512": {name: "sha512", hash: crypto.SHA512},
|
||||
}
|
||||
)
|
||||
|
||||
// TarSum default is "sha256"
|
||||
var DefaultTHash = NewTHash("sha256", sha256.New)
|
||||
|
||||
type simpleTHash struct {
|
||||
n string
|
||||
h func() hash.Hash
|
||||
}
|
||||
|
||||
func (sth simpleTHash) Name() string { return sth.n }
|
||||
func (sth simpleTHash) Hash() hash.Hash { return sth.h() }
|
||||
|
||||
func (ts *tarSum) encodeHeader(h *tar.Header) error {
|
||||
for _, elem := range ts.headerSelector.selectHeaders(h) {
|
||||
if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *tarSum) initTarSum() error {
|
||||
ts.bufTar = bytes.NewBuffer([]byte{})
|
||||
ts.bufWriter = bytes.NewBuffer([]byte{})
|
||||
ts.tarR = tar.NewReader(ts.Reader)
|
||||
ts.tarW = tar.NewWriter(ts.bufTar)
|
||||
if !ts.DisableCompression {
|
||||
ts.writer = gzip.NewWriter(ts.bufWriter)
|
||||
} else {
|
||||
ts.writer = &nopCloseFlusher{Writer: ts.bufWriter}
|
||||
}
|
||||
if ts.tHash == nil {
|
||||
ts.tHash = DefaultTHash
|
||||
}
|
||||
ts.h = ts.tHash.Hash()
|
||||
ts.h.Reset()
|
||||
ts.first = true
|
||||
ts.sums = FileInfoSums{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *tarSum) Read(buf []byte) (int, error) {
|
||||
if ts.finished {
|
||||
return ts.bufWriter.Read(buf)
|
||||
}
|
||||
if len(ts.bufData) < len(buf) {
|
||||
switch {
|
||||
case len(buf) <= buf8K:
|
||||
ts.bufData = make([]byte, buf8K)
|
||||
case len(buf) <= buf16K:
|
||||
ts.bufData = make([]byte, buf16K)
|
||||
case len(buf) <= buf32K:
|
||||
ts.bufData = make([]byte, buf32K)
|
||||
default:
|
||||
ts.bufData = make([]byte, len(buf))
|
||||
}
|
||||
}
|
||||
buf2 := ts.bufData[:len(buf)]
|
||||
|
||||
n, err := ts.tarR.Read(buf2)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if _, err := ts.h.Write(buf2[:n]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !ts.first {
|
||||
ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter})
|
||||
ts.fileCounter++
|
||||
ts.h.Reset()
|
||||
} else {
|
||||
ts.first = false
|
||||
}
|
||||
|
||||
currentHeader, err := ts.tarR.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if err := ts.tarW.Close(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := ts.writer.Close(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ts.finished = true
|
||||
return n, nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/")
|
||||
if err := ts.encodeHeader(currentHeader); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := ts.tarW.WriteHeader(currentHeader); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if _, err := ts.tarW.Write(buf2[:n]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ts.tarW.Flush()
|
||||
if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ts.writer.Flush()
|
||||
|
||||
return ts.bufWriter.Read(buf)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Filling the hash buffer
|
||||
if _, err = ts.h.Write(buf2[:n]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Filling the tar writter
|
||||
if _, err = ts.tarW.Write(buf2[:n]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ts.tarW.Flush()
|
||||
|
||||
// Filling the output writer
|
||||
if _, err = io.Copy(ts.writer, ts.bufTar); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ts.writer.Flush()
|
||||
|
||||
return ts.bufWriter.Read(buf)
|
||||
}
|
||||
|
||||
func (ts *tarSum) Sum(extra []byte) string {
|
||||
ts.sums.SortBySums()
|
||||
h := ts.tHash.Hash()
|
||||
if extra != nil {
|
||||
h.Write(extra)
|
||||
}
|
||||
for _, fis := range ts.sums {
|
||||
h.Write([]byte(fis.Sum()))
|
||||
}
|
||||
checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil))
|
||||
return checksum
|
||||
}
|
||||
|
||||
func (ts *tarSum) GetSums() FileInfoSums {
|
||||
return ts.sums
|
||||
}
|
225
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
generated
vendored
225
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
generated
vendored
|
@ -1,225 +0,0 @@
|
|||
page_title: TarSum checksum specification
|
||||
page_description: Documentation for algorithms used in the TarSum checksum calculation
|
||||
page_keywords: docker, checksum, validation, tarsum
|
||||
|
||||
# TarSum Checksum Specification
|
||||
|
||||
## Abstract
|
||||
|
||||
This document describes the algorithms used in performing the TarSum checksum
|
||||
calculation on filesystem layers, the need for this method over existing
|
||||
methods, and the versioning of this calculation.
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
The transportation of filesystems, regarding Docker, is done with tar(1)
|
||||
archives. There are a variety of tar serialization formats [2], and a key
|
||||
concern here is ensuring a repeatable checksum given a set of inputs from a
|
||||
generic tar archive. Types of transportation include distribution to and from a
|
||||
registry endpoint, saving and loading through commands or Docker daemon APIs,
|
||||
transferring the build context from client to Docker daemon, and committing the
|
||||
filesystem of a container to become an image.
|
||||
|
||||
As tar archives are used for transit, but not preserved in many situations, the
|
||||
focus of the algorithm is to ensure the integrity of the preserved filesystem,
|
||||
while maintaining a deterministic accountability. This includes neither
|
||||
constraining the ordering or manipulation of the files during the creation or
|
||||
unpacking of the archive, nor include additional metadata state about the file
|
||||
system attributes.
|
||||
|
||||
## Intended Audience
|
||||
|
||||
This document is outlining the methods used for consistent checksum calculation
|
||||
for filesystems transported via tar archives.
|
||||
|
||||
Auditing these methodologies is an open and iterative process. This document
|
||||
should accommodate the review of source code. Ultimately, this document should
|
||||
be the starting point of further refinements to the algorithm and its future
|
||||
versions.
|
||||
|
||||
## Concept
|
||||
|
||||
The checksum mechanism must ensure the integrity and assurance of the
|
||||
filesystem payload.
|
||||
|
||||
## Checksum Algorithm Profile
|
||||
|
||||
A checksum mechanism must define the following operations and attributes:
|
||||
|
||||
* Associated hashing cipher - used to checksum each file payload and attribute
|
||||
information.
|
||||
* Checksum list - each file of the filesystem archive has its checksum
|
||||
calculated from the payload and attributes of the file. The final checksum is
|
||||
calculated from this list, with specific ordering.
|
||||
* Version - as the algorithm adapts to requirements, there are behaviors of the
|
||||
algorithm to manage by versioning.
|
||||
* Archive being calculated - the tar archive having its checksum calculated
|
||||
|
||||
## Elements of TarSum checksum
|
||||
|
||||
The calculated sum output is a text string. The elements included in the output
|
||||
of the calculated sum comprise the information needed for validation of the sum
|
||||
(TarSum version and hashing cipher used) and the expected checksum in hexadecimal
|
||||
form.
|
||||
|
||||
There are two delimiters used:
|
||||
* '+' separates TarSum version from hashing cipher
|
||||
* ':' separates calculation mechanics from expected hash
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
"tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e"
|
||||
| | \ |
|
||||
| | \ |
|
||||
|_version_|_cipher__|__ |
|
||||
| \ |
|
||||
|_calculation_mechanics_|______________________expected_sum_______________________|
|
||||
```
|
||||
|
||||
## Versioning
|
||||
|
||||
Versioning was introduced [0] to accommodate differences in calculation needed,
|
||||
and ability to maintain reverse compatibility.
|
||||
|
||||
The general algorithm will be describe further in the 'Calculation'.
|
||||
|
||||
### Version0
|
||||
|
||||
This is the initial version of TarSum.
|
||||
|
||||
Its element in the TarSum checksum string is `tarsum`.
|
||||
|
||||
### Version1
|
||||
|
||||
Its element in the TarSum checksum is `tarsum.v1`.
|
||||
|
||||
The notable changes in this version:
|
||||
* Exclusion of file `mtime` from the file information headers, in each file
|
||||
checksum calculation
|
||||
* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax
|
||||
tar file info headers) keys and values in each file checksum calculation
|
||||
|
||||
### VersionDev
|
||||
|
||||
*Do not use unless validating refinements to the checksum algorithm*
|
||||
|
||||
Its element in the TarSum checksum is `tarsum.dev`.
|
||||
|
||||
This is a floating place holder for a next version and grounds for testing
|
||||
changes. The methods used for calculation are subject to change without notice,
|
||||
and this version is for testing and not for production use.
|
||||
|
||||
## Ciphers
|
||||
|
||||
The official default and standard hashing cipher used in the calculation mechanic
|
||||
is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4.
|
||||
|
||||
Though the TarSum algorithm itself is not exclusively bound to the single
|
||||
hashing cipher `sha256`, support for alternate hashing ciphers was later added
|
||||
[1]. Use cases for alternate cipher could include future-proofing TarSum
|
||||
checksum format and using faster cipher hashes for tar filesystem checksums.
|
||||
|
||||
## Calculation
|
||||
|
||||
### Requirement
|
||||
|
||||
As mentioned earlier, the calculation is such that it takes into consideration
|
||||
the lifecycle of the tar archive. In that the tar archive is not an immutable,
|
||||
permanent artifact. Otherwise options like relying on a known hashing cipher
|
||||
checksum of the archive itself would be reliable enough. The tar archive of the
|
||||
filesystem is used as a transportation medium for Docker images, and the
|
||||
archive is discarded once its contents are extracted. Therefore, for consistent
|
||||
validation items such as order of files in the tar archive and time stamps are
|
||||
subject to change once an image is received.
|
||||
|
||||
### Process
|
||||
|
||||
The method is typically iterative due to reading tar info headers from the
|
||||
archive stream, though this is not a strict requirement.
|
||||
|
||||
#### Files
|
||||
|
||||
Each file in the tar archive have their contents (headers and body) checksummed
|
||||
individually using the designated associated hashing cipher. The ordered
|
||||
headers of the file are written to the checksum calculation first, and then the
|
||||
payload of the file body.
|
||||
|
||||
The resulting checksum of the file is appended to the list of file sums. The
|
||||
sum is encoded as a string of the hexadecimal digest. Additionally, the file
|
||||
name and position in the archive is kept as reference for special ordering.
|
||||
|
||||
#### Headers
|
||||
|
||||
The following headers are read, in this
|
||||
order ( and the corresponding representation of its value):
|
||||
* 'name' - string
|
||||
* 'mode' - string of the base10 integer
|
||||
* 'uid' - string of the integer
|
||||
* 'gid' - string of the integer
|
||||
* 'size' - string of the integer
|
||||
* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC
|
||||
* 'typeflag' - string of the char
|
||||
* 'linkname' - string
|
||||
* 'uname' - string
|
||||
* 'gname' - string
|
||||
* 'devmajor' - string of the integer
|
||||
* 'devminor' - string of the integer
|
||||
|
||||
For >= Version1, the extented attribute headers ("SCHILY.xattr." prefixed pax
|
||||
headers) included after the above list. These xattrs key/values are first
|
||||
sorted by the keys.
|
||||
|
||||
#### Header Format
|
||||
|
||||
The ordered headers are written to the hash in the format of
|
||||
|
||||
"{.key}{.value}"
|
||||
|
||||
with no newline.
|
||||
|
||||
#### Body
|
||||
|
||||
After the order headers of the file have been added to the checksum for the
|
||||
file, the body of the file is written to the hash.
|
||||
|
||||
#### List of file sums
|
||||
|
||||
The list of file sums is sorted by the string of the hexadecimal digest.
|
||||
|
||||
If there are two files in the tar with matching paths, the order of occurrence
|
||||
for that path is reflected for the sums of the corresponding file header and
|
||||
body.
|
||||
|
||||
#### Final Checksum
|
||||
|
||||
Begin with a fresh or initial state of the associated hash cipher. If there is
|
||||
additional payload to include in the TarSum calculation for the archive, it is
|
||||
written first. Then each checksum from the ordered list of file sums is written
|
||||
to the hash.
|
||||
|
||||
The resulting digest is formatted per the Elements of TarSum checksum,
|
||||
including the TarSum version, the associated hash cipher and the hexadecimal
|
||||
encoded checksum digest.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
The initial version of TarSum has undergone one update that could invalidate
|
||||
handcrafted tar archives. The tar archive format supports appending of files
|
||||
with same names as prior files in the archive. The latter file will clobber the
|
||||
prior file of the same path. Due to this the algorithm now accounts for files
|
||||
with matching paths, and orders the list of file sums accordingly [3].
|
||||
|
||||
## Footnotes
|
||||
|
||||
* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0
|
||||
* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e
|
||||
* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29
|
||||
* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the
|
||||
TarSum calculation.
|
||||
|
648
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go
generated
vendored
648
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go
generated
vendored
|
@ -1,648 +0,0 @@
|
|||
package tarsum
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type testLayer struct {
|
||||
filename string
|
||||
options *sizedOptions
|
||||
jsonfile string
|
||||
gzip bool
|
||||
tarsum string
|
||||
version Version
|
||||
hash THash
|
||||
}
|
||||
|
||||
var testLayers = []testLayer{
|
||||
{
|
||||
filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar",
|
||||
jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json",
|
||||
version: Version0,
|
||||
tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"},
|
||||
{
|
||||
filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar",
|
||||
jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json",
|
||||
version: VersionDev,
|
||||
tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"},
|
||||
{
|
||||
filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar",
|
||||
jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json",
|
||||
gzip: true,
|
||||
tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"},
|
||||
{
|
||||
// Tests existing version of TarSum when xattrs are present
|
||||
filename: "testdata/xattr/layer.tar",
|
||||
jsonfile: "testdata/xattr/json",
|
||||
version: Version0,
|
||||
tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"},
|
||||
{
|
||||
// Tests next version of TarSum when xattrs are present
|
||||
filename: "testdata/xattr/layer.tar",
|
||||
jsonfile: "testdata/xattr/json",
|
||||
version: VersionDev,
|
||||
tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"},
|
||||
{
|
||||
filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar",
|
||||
jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json",
|
||||
tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"},
|
||||
{
|
||||
options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
|
||||
tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"},
|
||||
{
|
||||
// this tar has two files with the same path
|
||||
filename: "testdata/collision/collision-0.tar",
|
||||
tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"},
|
||||
{
|
||||
// this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above
|
||||
filename: "testdata/collision/collision-1.tar",
|
||||
tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"},
|
||||
{
|
||||
// this tar has newer of collider-0.tar, ensuring is has different hash
|
||||
filename: "testdata/collision/collision-2.tar",
|
||||
tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"},
|
||||
{
|
||||
// this tar has newer of collider-1.tar, ensuring is has different hash
|
||||
filename: "testdata/collision/collision-3.tar",
|
||||
tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"},
|
||||
{
|
||||
options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
|
||||
tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53",
|
||||
hash: md5THash,
|
||||
},
|
||||
{
|
||||
options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
|
||||
tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df",
|
||||
hash: sha1Hash,
|
||||
},
|
||||
{
|
||||
options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
|
||||
tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c",
|
||||
hash: sha224Hash,
|
||||
},
|
||||
{
|
||||
options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
|
||||
tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636",
|
||||
hash: sha384Hash,
|
||||
},
|
||||
{
|
||||
options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
|
||||
tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855",
|
||||
hash: sha512Hash,
|
||||
},
|
||||
}
|
||||
|
||||
type sizedOptions struct {
|
||||
num int64
|
||||
size int64
|
||||
isRand bool
|
||||
realFile bool
|
||||
}
|
||||
|
||||
// make a tar:
|
||||
// * num is the number of files the tar should have
|
||||
// * size is the bytes per file
|
||||
// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros)
|
||||
// * realFile will write to a TempFile, instead of an in memory buffer
|
||||
func sizedTar(opts sizedOptions) io.Reader {
|
||||
var (
|
||||
fh io.ReadWriter
|
||||
err error
|
||||
)
|
||||
if opts.realFile {
|
||||
fh, err = ioutil.TempFile("", "tarsum")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
fh = bytes.NewBuffer([]byte{})
|
||||
}
|
||||
tarW := tar.NewWriter(fh)
|
||||
defer tarW.Close()
|
||||
for i := int64(0); i < opts.num; i++ {
|
||||
err := tarW.WriteHeader(&tar.Header{
|
||||
Name: fmt.Sprintf("/testdata%d", i),
|
||||
Mode: 0755,
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Size: opts.size,
|
||||
})
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var rBuf []byte
|
||||
if opts.isRand {
|
||||
rBuf = make([]byte, 8)
|
||||
_, err = rand.Read(rBuf)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0}
|
||||
}
|
||||
|
||||
for i := int64(0); i < opts.size/int64(8); i++ {
|
||||
tarW.Write(rBuf)
|
||||
}
|
||||
}
|
||||
return fh
|
||||
}
|
||||
|
||||
func emptyTarSum(gzip bool) (TarSum, error) {
|
||||
reader, writer := io.Pipe()
|
||||
tarWriter := tar.NewWriter(writer)
|
||||
|
||||
// Immediately close tarWriter and write-end of the
|
||||
// Pipe in a separate goroutine so we don't block.
|
||||
go func() {
|
||||
tarWriter.Close()
|
||||
writer.Close()
|
||||
}()
|
||||
|
||||
return NewTarSum(reader, !gzip, Version0)
|
||||
}
|
||||
|
||||
// Test errors on NewTarsumForLabel
|
||||
func TestNewTarSumForLabelInvalid(t *testing.T) {
|
||||
reader := strings.NewReader("")
|
||||
|
||||
if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil {
|
||||
t.Fatalf("Expected an error, got nothing.")
|
||||
}
|
||||
|
||||
if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil {
|
||||
t.Fatalf("Expected an error, got nothing.")
|
||||
}
|
||||
if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil {
|
||||
t.Fatalf("Expected an error, got nothing.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTarSumForLabel(t *testing.T) {
|
||||
|
||||
layer := testLayers[0]
|
||||
|
||||
reader, err := os.Open(layer.filename)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
label := strings.Split(layer.tarsum, ":")[0]
|
||||
ts, err := NewTarSumForLabel(reader, false, label)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure it actually worked by reading a little bit of it
|
||||
nbByteToRead := 8 * 1024
|
||||
dBuf := make([]byte, nbByteToRead)
|
||||
_, err = ts.Read(dBuf)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestEmptyTar tests that tarsum does not fail to read an empty tar
|
||||
// and correctly returns the hex digest of an empty hash.
|
||||
func TestEmptyTar(t *testing.T) {
|
||||
// Test without gzip.
|
||||
ts, err := emptyTarSum(false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
zeroBlock := make([]byte, 1024)
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
n, err := io.Copy(buf, ts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) {
|
||||
t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n)
|
||||
}
|
||||
|
||||
expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil))
|
||||
resultSum := ts.Sum(nil)
|
||||
|
||||
if resultSum != expectedSum {
|
||||
t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
|
||||
}
|
||||
|
||||
// Test with gzip.
|
||||
ts, err = emptyTarSum(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buf.Reset()
|
||||
|
||||
n, err = io.Copy(buf, ts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bufgz := new(bytes.Buffer)
|
||||
gz := gzip.NewWriter(bufgz)
|
||||
n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock))
|
||||
gz.Close()
|
||||
gzBytes := bufgz.Bytes()
|
||||
|
||||
if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) {
|
||||
t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n)
|
||||
}
|
||||
|
||||
resultSum = ts.Sum(nil)
|
||||
|
||||
if resultSum != expectedSum {
|
||||
t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
|
||||
}
|
||||
|
||||
// Test without ever actually writing anything.
|
||||
if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resultSum = ts.Sum(nil)
|
||||
|
||||
if resultSum != expectedSum {
|
||||
t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
md5THash = NewTHash("md5", md5.New)
|
||||
sha1Hash = NewTHash("sha1", sha1.New)
|
||||
sha224Hash = NewTHash("sha224", sha256.New224)
|
||||
sha384Hash = NewTHash("sha384", sha512.New384)
|
||||
sha512Hash = NewTHash("sha512", sha512.New)
|
||||
)
|
||||
|
||||
// Test all the build-in read size : buf8K, buf16K, buf32K and more
|
||||
func TestTarSumsReadSize(t *testing.T) {
|
||||
// Test always on the same layer (that is big enough)
|
||||
layer := testLayers[0]
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
|
||||
reader, err := os.Open(layer.filename)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ts, err := NewTarSum(reader, false, layer.version)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read and discard bytes so that it populates sums
|
||||
nbByteToRead := (i + 1) * 8 * 1024
|
||||
dBuf := make([]byte, nbByteToRead)
|
||||
_, err = ts.Read(dBuf)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTarSums(t *testing.T) {
|
||||
for _, layer := range testLayers {
|
||||
var (
|
||||
fh io.Reader
|
||||
err error
|
||||
)
|
||||
if len(layer.filename) > 0 {
|
||||
fh, err = os.Open(layer.filename)
|
||||
if err != nil {
|
||||
t.Errorf("failed to open %s: %s", layer.filename, err)
|
||||
continue
|
||||
}
|
||||
} else if layer.options != nil {
|
||||
fh = sizedTar(*layer.options)
|
||||
} else {
|
||||
// What else is there to test?
|
||||
t.Errorf("what to do with %#v", layer)
|
||||
continue
|
||||
}
|
||||
if file, ok := fh.(*os.File); ok {
|
||||
defer file.Close()
|
||||
}
|
||||
|
||||
var ts TarSum
|
||||
if layer.hash == nil {
|
||||
// double negatives!
|
||||
ts, err = NewTarSum(fh, !layer.gzip, layer.version)
|
||||
} else {
|
||||
ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("%q :: %q", err, layer.filename)
|
||||
continue
|
||||
}
|
||||
|
||||
// Read variable number of bytes to test dynamic buffer
|
||||
dBuf := make([]byte, 1)
|
||||
_, err = ts.Read(dBuf)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read 1B from %s: %s", layer.filename, err)
|
||||
continue
|
||||
}
|
||||
dBuf = make([]byte, 16*1024)
|
||||
_, err = ts.Read(dBuf)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read 16KB from %s: %s", layer.filename, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Read and discard remaining bytes
|
||||
_, err = io.Copy(ioutil.Discard, ts)
|
||||
if err != nil {
|
||||
t.Errorf("failed to copy from %s: %s", layer.filename, err)
|
||||
continue
|
||||
}
|
||||
var gotSum string
|
||||
if len(layer.jsonfile) > 0 {
|
||||
jfh, err := os.Open(layer.jsonfile)
|
||||
if err != nil {
|
||||
t.Errorf("failed to open %s: %s", layer.jsonfile, err)
|
||||
continue
|
||||
}
|
||||
buf, err := ioutil.ReadAll(jfh)
|
||||
if err != nil {
|
||||
t.Errorf("failed to readAll %s: %s", layer.jsonfile, err)
|
||||
continue
|
||||
}
|
||||
gotSum = ts.Sum(buf)
|
||||
} else {
|
||||
gotSum = ts.Sum(nil)
|
||||
}
|
||||
|
||||
if layer.tarsum != gotSum {
|
||||
t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum)
|
||||
}
|
||||
var expectedHashName string
|
||||
if layer.hash != nil {
|
||||
expectedHashName = layer.hash.Name()
|
||||
} else {
|
||||
expectedHashName = DefaultTHash.Name()
|
||||
}
|
||||
if expectedHashName != ts.Hash().Name() {
|
||||
t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIteration(t *testing.T) {
|
||||
headerTests := []struct {
|
||||
expectedSum string // TODO(vbatts) it would be nice to get individual sums of each
|
||||
version Version
|
||||
hdr *tar.Header
|
||||
data []byte
|
||||
}{
|
||||
{
|
||||
"tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd",
|
||||
Version0,
|
||||
&tar.Header{
|
||||
Name: "file.txt",
|
||||
Size: 0,
|
||||
Typeflag: tar.TypeReg,
|
||||
Devminor: 0,
|
||||
Devmajor: 0,
|
||||
},
|
||||
[]byte(""),
|
||||
},
|
||||
{
|
||||
"tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465",
|
||||
VersionDev,
|
||||
&tar.Header{
|
||||
Name: "file.txt",
|
||||
Size: 0,
|
||||
Typeflag: tar.TypeReg,
|
||||
Devminor: 0,
|
||||
Devmajor: 0,
|
||||
},
|
||||
[]byte(""),
|
||||
},
|
||||
{
|
||||
"tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef",
|
||||
VersionDev,
|
||||
&tar.Header{
|
||||
Name: "another.txt",
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Uname: "slartibartfast",
|
||||
Gname: "users",
|
||||
Size: 4,
|
||||
Typeflag: tar.TypeReg,
|
||||
Devminor: 0,
|
||||
Devmajor: 0,
|
||||
},
|
||||
[]byte("test"),
|
||||
},
|
||||
{
|
||||
"tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd",
|
||||
VersionDev,
|
||||
&tar.Header{
|
||||
Name: "xattrs.txt",
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Uname: "slartibartfast",
|
||||
Gname: "users",
|
||||
Size: 4,
|
||||
Typeflag: tar.TypeReg,
|
||||
Xattrs: map[string]string{
|
||||
"user.key1": "value1",
|
||||
"user.key2": "value2",
|
||||
},
|
||||
},
|
||||
[]byte("test"),
|
||||
},
|
||||
{
|
||||
"tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760",
|
||||
VersionDev,
|
||||
&tar.Header{
|
||||
Name: "xattrs.txt",
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Uname: "slartibartfast",
|
||||
Gname: "users",
|
||||
Size: 4,
|
||||
Typeflag: tar.TypeReg,
|
||||
Xattrs: map[string]string{
|
||||
"user.KEY1": "value1", // adding different case to ensure different sum
|
||||
"user.key2": "value2",
|
||||
},
|
||||
},
|
||||
[]byte("test"),
|
||||
},
|
||||
{
|
||||
"tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa",
|
||||
Version0,
|
||||
&tar.Header{
|
||||
Name: "xattrs.txt",
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Uname: "slartibartfast",
|
||||
Gname: "users",
|
||||
Size: 4,
|
||||
Typeflag: tar.TypeReg,
|
||||
Xattrs: map[string]string{
|
||||
"user.NOT": "CALCULATED",
|
||||
},
|
||||
},
|
||||
[]byte("test"),
|
||||
},
|
||||
}
|
||||
for _, htest := range headerTests {
|
||||
s, err := renderSumForHeader(htest.version, htest.hdr, htest.data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if s != htest.expectedSum {
|
||||
t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
// first build our test tar
|
||||
tw := tar.NewWriter(buf)
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := tw.Write(data); err != nil {
|
||||
return "", err
|
||||
}
|
||||
tw.Close()
|
||||
|
||||
ts, err := NewTarSum(buf, true, v)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
tr := tar.NewReader(ts)
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if hdr == nil || err == io.EOF {
|
||||
// Signals the end of the archive.
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err = io.Copy(ioutil.Discard, tr); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return ts.Sum(nil), nil
|
||||
}
|
||||
|
||||
func Benchmark9kTar(b *testing.B) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar")
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
n, err := io.Copy(buf, fh)
|
||||
fh.Close()
|
||||
|
||||
reader := bytes.NewReader(buf.Bytes())
|
||||
|
||||
b.SetBytes(n)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
reader.Seek(0, 0)
|
||||
ts, err := NewTarSum(reader, true, Version0)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
io.Copy(ioutil.Discard, ts)
|
||||
ts.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark9kTarGzip(b *testing.B) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar")
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
n, err := io.Copy(buf, fh)
|
||||
fh.Close()
|
||||
|
||||
reader := bytes.NewReader(buf.Bytes())
|
||||
|
||||
b.SetBytes(n)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
reader.Seek(0, 0)
|
||||
ts, err := NewTarSum(reader, false, Version0)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
io.Copy(ioutil.Discard, ts)
|
||||
ts.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
// this is a single big file in the tar archive
|
||||
func Benchmark1mbSingleFileTar(b *testing.B) {
|
||||
benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false)
|
||||
}
|
||||
|
||||
// this is a single big file in the tar archive
|
||||
func Benchmark1mbSingleFileTarGzip(b *testing.B) {
|
||||
benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true)
|
||||
}
|
||||
|
||||
// this is 1024 1k files in the tar archive
|
||||
func Benchmark1kFilesTar(b *testing.B) {
|
||||
benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false)
|
||||
}
|
||||
|
||||
// this is 1024 1k files in the tar archive
|
||||
func Benchmark1kFilesTarGzip(b *testing.B) {
|
||||
benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true)
|
||||
}
|
||||
|
||||
func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) {
|
||||
var fh *os.File
|
||||
tarReader := sizedTar(opts)
|
||||
if br, ok := tarReader.(*os.File); ok {
|
||||
fh = br
|
||||
}
|
||||
defer os.Remove(fh.Name())
|
||||
defer fh.Close()
|
||||
|
||||
b.SetBytes(opts.size * opts.num)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ts, err := NewTarSum(fh, !isGzip, Version0)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
io.Copy(ioutil.Discard, ts)
|
||||
ts.Sum(nil)
|
||||
fh.Seek(0, 0)
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425}
|
Binary file not shown.
|
@ -1 +0,0 @@
|
|||
{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}
|
Binary file not shown.
BIN
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar
generated
vendored
BIN
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar
generated
vendored
Binary file not shown.
BIN
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar
generated
vendored
BIN
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar
generated
vendored
Binary file not shown.
BIN
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar
generated
vendored
BIN
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar
generated
vendored
Binary file not shown.
BIN
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar
generated
vendored
BIN
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar
generated
vendored
Binary file not shown.
1
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json
generated
vendored
1
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json
generated
vendored
|
@ -1 +0,0 @@
|
|||
{"id":"4439c3c7f847954100b42b267e7e5529cac1d6934db082f65795c5ca2e594d93","parent":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","created":"2014-05-16T17:19:44.091534414Z","container":"5f92fb06cc58f357f0cde41394e2bbbb664e663974b2ac1693ab07b7a306749b","container_config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","setcap 'cap_setgid,cap_setuid+ep' ./file \u0026\u0026 getcap ./file"],"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.11.1-dev","config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":0}
|
BIN
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar
generated
vendored
BIN
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar
generated
vendored
Binary file not shown.
150
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go
generated
vendored
150
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go
generated
vendored
|
@ -1,150 +0,0 @@
|
|||
package tarsum
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// versioning of the TarSum algorithm
|
||||
// based on the prefix of the hash used
|
||||
// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"
|
||||
type Version int
|
||||
|
||||
// Prefix of "tarsum"
|
||||
const (
|
||||
Version0 Version = iota
|
||||
Version1
|
||||
// NOTE: this variable will be either the latest or an unsettled next-version of the TarSum calculation
|
||||
VersionDev
|
||||
)
|
||||
|
||||
// VersionLabelForChecksum returns the label for the given tarsum
|
||||
// checksum, i.e., everything before the first `+` character in
|
||||
// the string or an empty string if no label separator is found.
|
||||
func VersionLabelForChecksum(checksum string) string {
|
||||
// Checksums are in the form: {versionLabel}+{hashID}:{hex}
|
||||
sepIndex := strings.Index(checksum, "+")
|
||||
if sepIndex < 0 {
|
||||
return ""
|
||||
}
|
||||
return checksum[:sepIndex]
|
||||
}
|
||||
|
||||
// Get a list of all known tarsum Version
|
||||
func GetVersions() []Version {
|
||||
v := []Version{}
|
||||
for k := range tarSumVersions {
|
||||
v = append(v, k)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
var (
|
||||
tarSumVersions = map[Version]string{
|
||||
Version0: "tarsum",
|
||||
Version1: "tarsum.v1",
|
||||
VersionDev: "tarsum.dev",
|
||||
}
|
||||
tarSumVersionsByName = map[string]Version{
|
||||
"tarsum": Version0,
|
||||
"tarsum.v1": Version1,
|
||||
"tarsum.dev": VersionDev,
|
||||
}
|
||||
)
|
||||
|
||||
func (tsv Version) String() string {
|
||||
return tarSumVersions[tsv]
|
||||
}
|
||||
|
||||
// GetVersionFromTarsum returns the Version from the provided string
|
||||
func GetVersionFromTarsum(tarsum string) (Version, error) {
|
||||
tsv := tarsum
|
||||
if strings.Contains(tarsum, "+") {
|
||||
tsv = strings.SplitN(tarsum, "+", 2)[0]
|
||||
}
|
||||
for v, s := range tarSumVersions {
|
||||
if s == tsv {
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
return -1, ErrNotVersion
|
||||
}
|
||||
|
||||
// Errors that may be returned by functions in this package
|
||||
var (
|
||||
ErrNotVersion = errors.New("string does not include a TarSum Version")
|
||||
ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented")
|
||||
)
|
||||
|
||||
// tarHeaderSelector is the interface which different versions
|
||||
// of tarsum should use for selecting and ordering tar headers
|
||||
// for each item in the archive.
|
||||
type tarHeaderSelector interface {
|
||||
selectHeaders(h *tar.Header) (orderedHeaders [][2]string)
|
||||
}
|
||||
|
||||
type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string)
|
||||
|
||||
func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) {
|
||||
return f(h)
|
||||
}
|
||||
|
||||
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
||||
return [][2]string{
|
||||
{"name", h.Name},
|
||||
{"mode", strconv.Itoa(int(h.Mode))},
|
||||
{"uid", strconv.Itoa(h.Uid)},
|
||||
{"gid", strconv.Itoa(h.Gid)},
|
||||
{"size", strconv.Itoa(int(h.Size))},
|
||||
{"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))},
|
||||
{"typeflag", string([]byte{h.Typeflag})},
|
||||
{"linkname", h.Linkname},
|
||||
{"uname", h.Uname},
|
||||
{"gname", h.Gname},
|
||||
{"devmajor", strconv.Itoa(int(h.Devmajor))},
|
||||
{"devminor", strconv.Itoa(int(h.Devminor))},
|
||||
}
|
||||
}
|
||||
|
||||
func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
||||
// Get extended attributes.
|
||||
xAttrKeys := make([]string, len(h.Xattrs))
|
||||
for k := range h.Xattrs {
|
||||
xAttrKeys = append(xAttrKeys, k)
|
||||
}
|
||||
sort.Strings(xAttrKeys)
|
||||
|
||||
// Make the slice with enough capacity to hold the 11 basic headers
|
||||
// we want from the v0 selector plus however many xattrs we have.
|
||||
orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys))
|
||||
|
||||
// Copy all headers from v0 excluding the 'mtime' header (the 5th element).
|
||||
v0headers := v0TarHeaderSelect(h)
|
||||
orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
|
||||
orderedHeaders = append(orderedHeaders, v0headers[6:]...)
|
||||
|
||||
// Finally, append the sorted xattrs.
|
||||
for _, k := range xAttrKeys {
|
||||
orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]})
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{
|
||||
Version0: v0TarHeaderSelect,
|
||||
Version1: v1TarHeaderSelect,
|
||||
VersionDev: v1TarHeaderSelect,
|
||||
}
|
||||
|
||||
func getTarHeaderSelector(v Version) (tarHeaderSelector, error) {
|
||||
headerSelector, ok := registeredHeaderSelectors[v]
|
||||
if !ok {
|
||||
return nil, ErrVersionNotImplemented
|
||||
}
|
||||
|
||||
return headerSelector, nil
|
||||
}
|
98
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go
generated
vendored
98
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go
generated
vendored
|
@ -1,98 +0,0 @@
|
|||
package tarsum
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestVersionLabelForChecksum(t *testing.T) {
|
||||
version := VersionLabelForChecksum("tarsum+sha256:deadbeef")
|
||||
if version != "tarsum" {
|
||||
t.Fatalf("Version should have been 'tarsum', was %v", version)
|
||||
}
|
||||
version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef")
|
||||
if version != "tarsum.v1" {
|
||||
t.Fatalf("Version should have been 'tarsum.v1', was %v", version)
|
||||
}
|
||||
version = VersionLabelForChecksum("something+somethingelse")
|
||||
if version != "something" {
|
||||
t.Fatalf("Version should have been 'something', was %v", version)
|
||||
}
|
||||
version = VersionLabelForChecksum("invalidChecksum")
|
||||
if version != "" {
|
||||
t.Fatalf("Version should have been empty, was %v", version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersion(t *testing.T) {
|
||||
expected := "tarsum"
|
||||
var v Version
|
||||
if v.String() != expected {
|
||||
t.Errorf("expected %q, got %q", expected, v.String())
|
||||
}
|
||||
|
||||
expected = "tarsum.v1"
|
||||
v = 1
|
||||
if v.String() != expected {
|
||||
t.Errorf("expected %q, got %q", expected, v.String())
|
||||
}
|
||||
|
||||
expected = "tarsum.dev"
|
||||
v = 2
|
||||
if v.String() != expected {
|
||||
t.Errorf("expected %q, got %q", expected, v.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVersion(t *testing.T) {
|
||||
testSet := []struct {
|
||||
Str string
|
||||
Expected Version
|
||||
}{
|
||||
{"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0},
|
||||
{"tarsum+sha256", Version0},
|
||||
{"tarsum", Version0},
|
||||
{"tarsum.dev", VersionDev},
|
||||
{"tarsum.dev+sha256:deadbeef", VersionDev},
|
||||
}
|
||||
|
||||
for _, ts := range testSet {
|
||||
v, err := GetVersionFromTarsum(ts.Str)
|
||||
if err != nil {
|
||||
t.Fatalf("%q : %s", err, ts.Str)
|
||||
}
|
||||
if v != ts.Expected {
|
||||
t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v)
|
||||
}
|
||||
}
|
||||
|
||||
// test one that does not exist, to ensure it errors
|
||||
str := "weak+md5:abcdeabcde"
|
||||
_, err := GetVersionFromTarsum(str)
|
||||
if err != ErrNotVersion {
|
||||
t.Fatalf("%q : %s", err, str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVersions(t *testing.T) {
|
||||
expected := []Version{
|
||||
Version0,
|
||||
Version1,
|
||||
VersionDev,
|
||||
}
|
||||
versions := GetVersions()
|
||||
if len(versions) != len(expected) {
|
||||
t.Fatalf("Expected %v versions, got %v", len(expected), len(versions))
|
||||
}
|
||||
if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) {
|
||||
t.Fatalf("Expected [%v], got [%v]", expected, versions)
|
||||
}
|
||||
}
|
||||
|
||||
func containsVersion(versions []Version, version Version) bool {
|
||||
for _, v := range versions {
|
||||
if v == version {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
22
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go
generated
vendored
22
Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
package tarsum
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
type writeCloseFlusher interface {
|
||||
io.WriteCloser
|
||||
Flush() error
|
||||
}
|
||||
|
||||
type nopCloseFlusher struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (n *nopCloseFlusher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *nopCloseFlusher) Flush() error {
|
||||
return nil
|
||||
}
|
|
@ -4,14 +4,11 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/version"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -80,38 +77,9 @@ func main() {
|
|||
digestFn := algorithm.FromReader
|
||||
|
||||
if !algorithm.Available() {
|
||||
// we cannot digest if is not available. An exception is made for
|
||||
// tarsum.
|
||||
if !strings.HasPrefix(algorithm.String(), "tarsum") {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
var version tarsum.Version
|
||||
if algorithm == "tarsum" {
|
||||
// small hack: if we just have tarsum, use latest
|
||||
version = tarsum.Version1
|
||||
} else {
|
||||
var err error
|
||||
version, err = tarsum.GetVersionFromTarsum(algorithm.String())
|
||||
if err != nil {
|
||||
unsupported()
|
||||
}
|
||||
}
|
||||
|
||||
digestFn = func(rd io.Reader) (digest.Digest, error) {
|
||||
ts, err := tarsum.NewTarSum(rd, true, version)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(ioutil.Discard, ts); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return digest.Digest(ts.Sum(nil)), nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
dgst, err := digestFn(job.reader)
|
||||
if err != nil {
|
||||
|
|
|
@ -4,17 +4,11 @@ import (
|
|||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
)
|
||||
|
||||
const (
|
||||
// DigestTarSumV1EmptyTar is the digest for the empty tar file.
|
||||
DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
|
||||
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
|
||||
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
|
@ -28,11 +22,6 @@ const (
|
|||
//
|
||||
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||
//
|
||||
// More important for this code base, this type is compatible with tarsum
|
||||
// digests. For example, the following would be a valid Digest:
|
||||
//
|
||||
// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b
|
||||
//
|
||||
// This allows to abstract the digest behind this type and work only in those
|
||||
// terms.
|
||||
type Digest string
|
||||
|
@ -78,25 +67,6 @@ func FromReader(rd io.Reader) (Digest, error) {
|
|||
return Canonical.FromReader(rd)
|
||||
}
|
||||
|
||||
// FromTarArchive produces a tarsum digest from reader rd.
|
||||
func FromTarArchive(rd io.Reader) (Digest, error) {
|
||||
ts, err := tarsum.NewTarSum(rd, true, tarsum.Version1)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(ioutil.Discard, ts); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
d, err := ParseDigest(ts.Sum(nil))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// FromBytes digests the input and returns a Digest.
|
||||
func FromBytes(p []byte) Digest {
|
||||
digester := Canonical.New()
|
||||
|
@ -117,13 +87,6 @@ func FromBytes(p []byte) Digest {
|
|||
// error if not.
|
||||
func (d Digest) Validate() error {
|
||||
s := string(d)
|
||||
// Common case will be tarsum
|
||||
_, err := ParseTarSum(s)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Continue on for general parser
|
||||
|
||||
if !DigestRegexpAnchored.MatchString(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
package digest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -13,21 +11,6 @@ func TestParseDigest(t *testing.T) {
|
|||
algorithm Algorithm
|
||||
hex string
|
||||
}{
|
||||
{
|
||||
input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
algorithm: "tarsum+sha256",
|
||||
hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
},
|
||||
{
|
||||
input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
algorithm: "tarsum.dev+sha256",
|
||||
hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
},
|
||||
{
|
||||
input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
|
||||
algorithm: "tarsum.v1+sha256",
|
||||
hex: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
|
||||
},
|
||||
{
|
||||
input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
algorithm: "sha256",
|
||||
|
@ -97,25 +80,3 @@ func TestParseDigest(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A few test cases used to fix behavior we expect in storage backend.
|
||||
|
||||
func TestFromTarArchiveZeroLength(t *testing.T) {
|
||||
checkTarsumDigest(t, "zero-length archive", bytes.NewReader([]byte{}), DigestTarSumV1EmptyTar)
|
||||
}
|
||||
|
||||
func TestFromTarArchiveEmptyTar(t *testing.T) {
|
||||
// String of 1024 zeros is a valid, empty tar file.
|
||||
checkTarsumDigest(t, "1024 zero bytes", bytes.NewReader(bytes.Repeat([]byte("\x00"), 1024)), DigestTarSumV1EmptyTar)
|
||||
}
|
||||
|
||||
func checkTarsumDigest(t *testing.T, msg string, rd io.Reader, expected Digest) {
|
||||
dgst, err := FromTarArchive(rd)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error digesting %s: %v", msg, err)
|
||||
}
|
||||
|
||||
if dgst != expected {
|
||||
t.Fatalf("unexpected digest for %s: %q != %q", msg, dgst, expected)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@ const (
|
|||
SHA256 Algorithm = "sha256" // sha256 with hex encoding
|
||||
SHA384 Algorithm = "sha384" // sha384 with hex encoding
|
||||
SHA512 Algorithm = "sha512" // sha512 with hex encoding
|
||||
TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only
|
||||
|
||||
// Canonical is the primary digest algorithm used with the distribution
|
||||
// project. Other digests may be used but this one is the primary storage
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// Package digest provides a generalized type to opaquely represent message
|
||||
// digests and their operations within the registry. The Digest type is
|
||||
// designed to serve as a flexible identifier in a content-addressable system.
|
||||
// More importantly, it provides tools and wrappers to work with tarsums and
|
||||
// More importantly, it provides tools and wrappers to work with
|
||||
// hash.Hash-based digests with little effort.
|
||||
//
|
||||
// Basics
|
||||
|
@ -16,17 +16,7 @@
|
|||
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||
//
|
||||
// In this case, the string "sha256" is the algorithm and the hex bytes are
|
||||
// the "digest". A tarsum example will be more illustrative of the use case
|
||||
// involved in the registry:
|
||||
//
|
||||
// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b
|
||||
//
|
||||
// For this, we consider the algorithm to be "tarsum+sha256". Prudent
|
||||
// applications will favor the ParseDigest function to verify the format over
|
||||
// using simple type casts. However, a normal string can be cast as a digest
|
||||
// with a simple type conversion:
|
||||
//
|
||||
// Digest("tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b")
|
||||
// the "digest".
|
||||
//
|
||||
// Because the Digest type is simply a string, once a valid Digest is
|
||||
// obtained, comparisons are cheap, quick and simple to express with the
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
package digest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// TarsumRegexp defines a regular expression to match tarsum identifiers.
|
||||
var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+")
|
||||
|
||||
// TarsumRegexpCapturing defines a regular expression to match tarsum identifiers with
|
||||
// capture groups corresponding to each component.
|
||||
var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)")
|
||||
|
||||
// TarSumInfo contains information about a parsed tarsum.
|
||||
type TarSumInfo struct {
|
||||
// Version contains the version of the tarsum.
|
||||
Version string
|
||||
|
||||
// Algorithm contains the algorithm for the final digest
|
||||
Algorithm string
|
||||
|
||||
// Digest contains the hex-encoded digest.
|
||||
Digest string
|
||||
}
|
||||
|
||||
// InvalidTarSumError provides informations about a TarSum that cannot be parsed
|
||||
// by ParseTarSum.
|
||||
type InvalidTarSumError string
|
||||
|
||||
func (e InvalidTarSumError) Error() string {
|
||||
return fmt.Sprintf("invalid tarsum: %q", string(e))
|
||||
}
|
||||
|
||||
// ParseTarSum parses a tarsum string into its components of interest. For
|
||||
// example, this method may receive the tarsum in the following format:
|
||||
//
|
||||
// tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e
|
||||
//
|
||||
// The function will return the following:
|
||||
//
|
||||
// TarSumInfo{
|
||||
// Version: "v1",
|
||||
// Algorithm: "sha256",
|
||||
// Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
|
||||
// }
|
||||
//
|
||||
func ParseTarSum(tarSum string) (tsi TarSumInfo, err error) {
|
||||
components := TarsumRegexpCapturing.FindStringSubmatch(tarSum)
|
||||
|
||||
if len(components) != 1+TarsumRegexpCapturing.NumSubexp() {
|
||||
return TarSumInfo{}, InvalidTarSumError(tarSum)
|
||||
}
|
||||
|
||||
return TarSumInfo{
|
||||
Version: components[3],
|
||||
Algorithm: components[4],
|
||||
Digest: components[5],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the valid, string representation of the tarsum info.
|
||||
func (tsi TarSumInfo) String() string {
|
||||
if tsi.Version == "" {
|
||||
return fmt.Sprintf("tarsum+%s:%s", tsi.Algorithm, tsi.Digest)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("tarsum.%s+%s:%s", tsi.Version, tsi.Algorithm, tsi.Digest)
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
package digest
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseTarSumComponents(t *testing.T) {
|
||||
for _, testcase := range []struct {
|
||||
input string
|
||||
expected TarSumInfo
|
||||
err error
|
||||
}{
|
||||
{
|
||||
input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
|
||||
expected: TarSumInfo{
|
||||
Version: "v1",
|
||||
Algorithm: "sha256",
|
||||
Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "",
|
||||
err: InvalidTarSumError(""),
|
||||
},
|
||||
{
|
||||
input: "purejunk",
|
||||
err: InvalidTarSumError("purejunk"),
|
||||
},
|
||||
{
|
||||
input: "tarsum.v23+test:12341234123412341effefefe",
|
||||
expected: TarSumInfo{
|
||||
Version: "v23",
|
||||
Algorithm: "test",
|
||||
Digest: "12341234123412341effefefe",
|
||||
},
|
||||
},
|
||||
|
||||
// The following test cases are ported from docker core
|
||||
{
|
||||
// Version 0 tarsum
|
||||
input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
expected: TarSumInfo{
|
||||
Algorithm: "sha256",
|
||||
Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
},
|
||||
},
|
||||
{
|
||||
// Dev version tarsum
|
||||
input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
expected: TarSumInfo{
|
||||
Version: "dev",
|
||||
Algorithm: "sha256",
|
||||
Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
|
||||
},
|
||||
},
|
||||
} {
|
||||
tsi, err := ParseTarSum(testcase.input)
|
||||
if err != nil {
|
||||
if testcase.err != nil && err == testcase.err {
|
||||
continue // passes
|
||||
}
|
||||
|
||||
t.Fatalf("unexpected error parsing tarsum: %v", err)
|
||||
}
|
||||
|
||||
if testcase.err != nil {
|
||||
t.Fatalf("expected error not encountered on %q: %v", testcase.input, testcase.err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tsi, testcase.expected) {
|
||||
t.Fatalf("expected tarsum info: %v != %v", tsi, testcase.expected)
|
||||
}
|
||||
|
||||
if testcase.input != tsi.String() {
|
||||
t.Fatalf("input should equal output: %q != %q", tsi.String(), testcase.input)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -3,9 +3,6 @@ package digest
|
|||
import (
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
)
|
||||
|
||||
// Verifier presents a general verification interface to be used with message
|
||||
|
@ -27,70 +24,10 @@ func NewDigestVerifier(d Digest) (Verifier, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
alg := d.Algorithm()
|
||||
switch alg {
|
||||
case "sha256", "sha384", "sha512":
|
||||
return hashVerifier{
|
||||
hash: alg.Hash(),
|
||||
hash: d.Algorithm().Hash(),
|
||||
digest: d,
|
||||
}, nil
|
||||
default:
|
||||
// Assume we have a tarsum.
|
||||
version, err := tarsum.GetVersionFromTarsum(string(d))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
// TODO(stevvooe): We may actually want to ban the earlier versions of
|
||||
// tarsum. That decision may not be the place of the verifier.
|
||||
|
||||
ts, err := tarsum.NewTarSum(pr, true, version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(sday): Ick! A goroutine per digest verification? We'll have to
|
||||
// get the tarsum library to export an io.Writer variant.
|
||||
go func() {
|
||||
if _, err := io.Copy(ioutil.Discard, ts); err != nil {
|
||||
pr.CloseWithError(err)
|
||||
} else {
|
||||
pr.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
return &tarsumVerifier{
|
||||
digest: d,
|
||||
ts: ts,
|
||||
pr: pr,
|
||||
pw: pw,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewLengthVerifier returns a verifier that returns true when the number of
|
||||
// read bytes equals the expected parameter.
|
||||
func NewLengthVerifier(expected int64) Verifier {
|
||||
return &lengthVerifier{
|
||||
expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
type lengthVerifier struct {
|
||||
expected int64 // expected bytes read
|
||||
len int64 // bytes read
|
||||
}
|
||||
|
||||
func (lv *lengthVerifier) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
lv.len += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (lv *lengthVerifier) Verified() bool {
|
||||
return lv.expected == lv.len
|
||||
}
|
||||
|
||||
type hashVerifier struct {
|
||||
|
@ -105,18 +42,3 @@ func (hv hashVerifier) Write(p []byte) (n int, err error) {
|
|||
func (hv hashVerifier) Verified() bool {
|
||||
return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
|
||||
}
|
||||
|
||||
type tarsumVerifier struct {
|
||||
digest Digest
|
||||
ts tarsum.TarSum
|
||||
pr *io.PipeReader
|
||||
pw *io.PipeWriter
|
||||
}
|
||||
|
||||
func (tv *tarsumVerifier) Write(p []byte) (n int, err error) {
|
||||
return tv.pw.Write(p)
|
||||
}
|
||||
|
||||
func (tv *tarsumVerifier) Verified() bool {
|
||||
return tv.digest == Digest(tv.ts.Sum(nil))
|
||||
}
|
||||
|
|
|
@ -3,13 +3,8 @@ package digest
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/testutil"
|
||||
)
|
||||
|
||||
func TestDigestVerifier(t *testing.T) {
|
||||
|
@ -27,43 +22,6 @@ func TestDigestVerifier(t *testing.T) {
|
|||
if !verifier.Verified() {
|
||||
t.Fatalf("bytes not verified")
|
||||
}
|
||||
|
||||
tf, tarSum, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating tarfile: %v", err)
|
||||
}
|
||||
|
||||
digest, err = FromTarArchive(tf)
|
||||
if err != nil {
|
||||
t.Fatalf("error digesting tarsum: %v", err)
|
||||
}
|
||||
|
||||
if digest.String() != tarSum {
|
||||
t.Fatalf("unexpected digest: %q != %q", digest.String(), tarSum)
|
||||
}
|
||||
|
||||
expectedSize, _ := tf.Seek(0, os.SEEK_END) // Get tar file size
|
||||
tf.Seek(0, os.SEEK_SET) // seek back
|
||||
|
||||
// This is the most relevant example for the registry application. It's
|
||||
// effectively a read through pipeline, where the final sink is the digest
|
||||
// verifier.
|
||||
verifier, err = NewDigestVerifier(digest)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting digest verifier: %s", err)
|
||||
}
|
||||
|
||||
lengthVerifier := NewLengthVerifier(expectedSize)
|
||||
rd := io.TeeReader(tf, lengthVerifier)
|
||||
io.Copy(verifier, rd)
|
||||
|
||||
if !lengthVerifier.Verified() {
|
||||
t.Fatalf("verifier detected incorrect length")
|
||||
}
|
||||
|
||||
if !verifier.Verified() {
|
||||
t.Fatalf("bytes not verified")
|
||||
}
|
||||
}
|
||||
|
||||
// TestVerifierUnsupportedDigest ensures that unsupported digest validation is
|
||||
|
@ -81,79 +39,11 @@ func TestVerifierUnsupportedDigest(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestJunkNoDeadlock ensures that junk input into a digest verifier properly
|
||||
// returns errors from the tarsum library. Specifically, we pass in a file
|
||||
// with a "bad header" and should see the error from the io.Copy to verifier.
|
||||
// This has been seen with gzipped tarfiles, mishandled by the tarsum package,
|
||||
// but also on junk input, such as html.
|
||||
func TestJunkNoDeadlock(t *testing.T) {
|
||||
expected := Digest("tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473")
|
||||
junk := bytes.Repeat([]byte{'a'}, 1024)
|
||||
|
||||
verifier, err := NewDigestVerifier(expected)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating verifier: %v", err)
|
||||
}
|
||||
|
||||
rd := bytes.NewReader(junk)
|
||||
if _, err := io.Copy(verifier, rd); err == nil {
|
||||
t.Fatalf("unexpected error verifying input data: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBadTarNoDeadlock runs a tar with a "bad" tar header through digest
|
||||
// verifier, ensuring that the verifier returns an error properly.
|
||||
func TestBadTarNoDeadlock(t *testing.T) {
|
||||
// TODO(stevvooe): This test is exposing a bug in tarsum where if we pass
|
||||
// a gzipped tar file into tarsum, the library returns an error. This
|
||||
// should actually work. When the tarsum package is fixed, this test will
|
||||
// fail and we can remove this test or invert it.
|
||||
|
||||
// This tarfile was causing deadlocks in verifiers due mishandled copy error.
|
||||
// This is a gzipped tar, which we typically don't see but should handle.
|
||||
//
|
||||
// From https://registry-1.docker.io/v2/library/ubuntu/blobs/tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473
|
||||
const badTar = `
|
||||
H4sIAAAJbogA/0otSdZnoDEwMDAxMDc1BdJggE6D2YZGJobGBmbGRsZAdYYGBkZGDAqmtHYYCJQW
|
||||
lyQWAZ1CqTnonhsiAAAAAP//AsV/YkEJTdMAGfFvZmA2Gv/0AAAAAAD//4LFf3F+aVFyarFeTmZx
|
||||
CbXtAOVnMxMTXPFvbGpmjhb/xobmwPinSyCO8PgHAAAA///EVU9v2z4MvedTEMihl9a5/26/YTkU
|
||||
yNKiTTDsKMt0rE0WDYmK628/ym7+bFmH2DksQACbIB/5+J7kObwiQsXc/LdYVGibLObRccw01Qv5
|
||||
19EZ7hbbZudVgWtiDFCSh4paYII4xOVxNgeHLXrYow+GXAAqgSuEQhzlTR5ZgtlsVmB+aKe8rswe
|
||||
zzsOjwtoPGoTEGplHHhMCJqxSNUPwesbEGbzOXxR34VCHndQmjfhUKhEq/FURI0FqJKFR5q9NE5Z
|
||||
qbaoBGoglAB+5TSK0sOh3c3UPkRKE25dEg8dDzzIWmqN2wG3BNY4qRL1VFFAoJJb5SXHU90n34nk
|
||||
SUS8S0AeGwqGyXdZel1nn7KLGhPO0kDeluvN48ty9Q2269ft8/PTy2b5GfKuh9/2LBIWo6oz+N8G
|
||||
uodmWLETg0mW4lMP4XYYCL4+rlawftpIO40SA+W6Yci9wRZE1MNOjmyGdhBQRy9OHpqOdOGh/wT7
|
||||
nZdOkHZ650uIK+WrVZdkgErJfnNEJysLnI5FSAj4xuiCQNpOIoNWmhyLByVHxEpLf3dkr+k9KMsV
|
||||
xV0FhiVB21hgD3V5XwSqRdOmsUYr7oNtZXTVzyTHc2/kqokBy2ihRMVRTN+78goP5Ur/aMhz+KOJ
|
||||
3h2UsK43kdwDo0Q9jfD7ie2RRur7MdpIrx1Z3X4j/Q1qCswN9r/EGCvXiUy0fI4xeSknnH/92T/+
|
||||
fgIAAP//GkWjYBSMXAAIAAD//2zZtzAAEgAA`
|
||||
expected := Digest("tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473")
|
||||
|
||||
verifier, err := NewDigestVerifier(expected)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating verifier: %v", err)
|
||||
}
|
||||
|
||||
rd := base64.NewDecoder(base64.StdEncoding, strings.NewReader(badTar))
|
||||
|
||||
if _, err := io.Copy(verifier, rd); err == nil {
|
||||
t.Fatalf("unexpected error verifying input data: %v", err)
|
||||
}
|
||||
|
||||
if verifier.Verified() {
|
||||
// For now, we expect an error, since tarsum library cannot handle
|
||||
// compressed tars (!!!).
|
||||
t.Fatalf("no error received after invalid tar")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for
|
||||
// DigestVerifier. We should be tarsum/gzip limited for common cases but we
|
||||
// want to verify this.
|
||||
// DigestVerifier.
|
||||
//
|
||||
// The relevant benchmarks for comparison can be run with the following
|
||||
// The relevant benchmark for comparison can be run with the following
|
||||
// commands:
|
||||
//
|
||||
// go test -bench . crypto/sha1
|
||||
// go test -bench . github.com/docker/docker/pkg/tarsum
|
||||
//
|
||||
|
|
|
@ -170,7 +170,7 @@ Content-Type: application/vnd.docker.distribution.events.v1+json
|
|||
"target": {
|
||||
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
||||
"length": 2,
|
||||
"digest": "tarsum.v2+sha256:0123456789abcdef1",
|
||||
"digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
|
||||
"repository": "library/test",
|
||||
"url": "http://example.com/v2/library/test/manifests/latest"
|
||||
},
|
||||
|
@ -195,7 +195,7 @@ Content-Type: application/vnd.docker.distribution.events.v1+json
|
|||
"target": {
|
||||
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
||||
"length": 3,
|
||||
"digest": "tarsum.v2+sha256:0123456789abcdef2",
|
||||
"digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6",
|
||||
"repository": "library/test",
|
||||
"url": "http://example.com/v2/library/test/manifests/latest"
|
||||
},
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
title = "HTTP API V2"
|
||||
description = "Specification for the Registry API."
|
||||
keywords = ["registry, on-prem, images, tags, repository, distribution, api, advanced"]
|
||||
aliases = ["/registry/spec/"]
|
||||
[menu.main]
|
||||
parent="smn_registry_ref"
|
||||
+++
|
||||
|
@ -302,11 +301,6 @@ Some examples of _digests_ include the following:
|
|||
digest | description |
|
||||
----------------------------------------------------------------------------------|------------------------------------------------
|
||||
sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest |
|
||||
tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Tarsum digest, used for legacy layer digests. |
|
||||
|
||||
> __NOTE:__ While we show an example of using a `tarsum` digest, the security
|
||||
> of tarsum has not been verified. It is recommended that most implementations
|
||||
> use sha256 for interoperability.
|
||||
|
||||
While the _algorithm_ does allow one to implement a wide variety of
|
||||
algorithms, compliant implementations should use sha256. Heavy processing of
|
||||
|
@ -364,7 +358,7 @@ the relevant manifest fields for the registry are the following:
|
|||
----------|------------------------------------------------|
|
||||
name | The name of the image. |
|
||||
tag | The tag for this version of the image. |
|
||||
fsLayers | A list of layer descriptors (including tarsum) |
|
||||
fsLayers | A list of layer descriptors (including digest) |
|
||||
signature | A JWS used to verify the manifest content |
|
||||
|
||||
For more information about the manifest format, please see
|
||||
|
@ -372,8 +366,8 @@ For more information about the manifest format, please see
|
|||
|
||||
When the manifest is in hand, the client must verify the signature to ensure
|
||||
the names and layers are valid. Once confirmed, the client will then use the
|
||||
tarsums to download the individual layers. Layers are stored in as blobs in
|
||||
the V2 registry API, keyed by their tarsum digest.
|
||||
digests to download the individual layers. Layers are stored in as blobs in
|
||||
the V2 registry API, keyed by their digest.
|
||||
|
||||
#### Pulling an Image Manifest
|
||||
|
||||
|
@ -396,7 +390,7 @@ for details):
|
|||
"tag": <tag>,
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": <tarsum>
|
||||
"blobSum": <digest>
|
||||
},
|
||||
...
|
||||
]
|
||||
|
@ -410,15 +404,14 @@ before fetching layers.
|
|||
|
||||
#### Pulling a Layer
|
||||
|
||||
Layers are stored in the blob portion of the registry, keyed by tarsum digest.
|
||||
Layers are stored in the blob portion of the registry, keyed by digest.
|
||||
Pulling a layer is carried out by a standard http request. The URL is as
|
||||
follows:
|
||||
|
||||
GET /v2/<name>/blobs/<tarsum>
|
||||
GET /v2/<name>/blobs/<digest>
|
||||
|
||||
Access to a layer will be gated by the `name` of the repository but is
|
||||
identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an
|
||||
opaque field, to be interpreted by the tarsum library.
|
||||
identified uniquely in the registry by `digest`.
|
||||
|
||||
This endpoint may issue a 307 (302 for <HTTP 1.1) redirect to another service
|
||||
for downloading the layer and clients should be prepared to handle redirects.
|
||||
|
@ -469,7 +462,7 @@ API. The request should be formatted as follows:
|
|||
HEAD /v2/<name>/blobs/<digest>
|
||||
```
|
||||
|
||||
If the layer with the tarsum specified in `digest` is available, a 200 OK
|
||||
If the layer with the digest specified in `digest` is available, a 200 OK
|
||||
response will be received, with no actual body content (this is according to
|
||||
http specification). The response will look as follows:
|
||||
|
||||
|
@ -482,7 +475,7 @@ Docker-Content-Digest: <digest>
|
|||
When this response is received, the client can assume that the layer is
|
||||
already available in the registry under the given name and should take no
|
||||
further action to upload the layer. Note that the binary digests may differ
|
||||
for the existing registry layer, but the tarsums will be guaranteed to match.
|
||||
for the existing registry layer, but the digests will be guaranteed to match.
|
||||
|
||||
##### Uploading the Layer
|
||||
|
||||
|
@ -549,7 +542,7 @@ carry out a "monolithic" upload, one can simply put the entire content blob to
|
|||
the provided URL:
|
||||
|
||||
```
|
||||
PUT /v2/<name>/blobs/uploads/<uuid>?digest=<tarsum>[&digest=sha256:<hex digest>]
|
||||
PUT /v2/<name>/blobs/uploads/<uuid>?digest=<digest>
|
||||
Content-Length: <size of layer>
|
||||
Content-Type: application/octet-stream
|
||||
|
||||
|
@ -564,7 +557,7 @@ Additionally, the upload can be completed with a single `POST` request to
|
|||
the uploads endpoint, including the "size" and "digest" parameters:
|
||||
|
||||
```
|
||||
POST /v2/<name>/blobs/uploads/?digest=<tarsum>[&digest=sha256:<hex digest>]
|
||||
POST /v2/<name>/blobs/uploads/?digest=<digest>
|
||||
Content-Length: <size of layer>
|
||||
Content-Type: application/octet-stream
|
||||
|
||||
|
@ -635,7 +628,7 @@ the upload will not be considered complete. The format for the final chunk
|
|||
will be as follows:
|
||||
|
||||
```
|
||||
PUT /v2/<name>/blob/uploads/<uuid>?digest=<tarsum>[&digest=sha256:<hex digest>]
|
||||
PUT /v2/<name>/blob/uploads/<uuid>?digest=<digest>
|
||||
Content-Length: <size of chunk>
|
||||
Content-Range: <start of range>-<end of range>
|
||||
Content-Type: application/octet-stream
|
||||
|
@ -654,7 +647,7 @@ will receive a `201 Created` response:
|
|||
|
||||
```
|
||||
201 Created
|
||||
Location: /v2/<name>/blobs/<tarsum>
|
||||
Location: /v2/<name>/blobs/<digest>
|
||||
Content-Length: 0
|
||||
Docker-Content-Digest: <digest>
|
||||
```
|
||||
|
@ -668,28 +661,15 @@ the uploaded blob data.
|
|||
###### Digest Parameter
|
||||
|
||||
The "digest" parameter is designed as an opaque parameter to support
|
||||
verification of a successful transfer. The initial version of the registry API
|
||||
will support a tarsum digest, in the standard tarsum format. For example, a
|
||||
HTTP URI parameter might be as follows:
|
||||
|
||||
```
|
||||
tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b
|
||||
```
|
||||
|
||||
Given this parameter, the registry will verify that the provided content does
|
||||
result in this tarsum. Optionally, the registry can support other other digest
|
||||
parameters for non-tarfile content stored as a layer. A regular hash digest
|
||||
might be specified as follows:
|
||||
verification of a successful transfer. For example, a HTTP URI parameter
|
||||
might be as follows:
|
||||
|
||||
```
|
||||
sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b
|
||||
```
|
||||
|
||||
Such a parameter would be used to verify that the binary content (as opposed
|
||||
to the tar content) would be verified at the end of the upload process.
|
||||
|
||||
For the initial version, registry servers are only required to support the
|
||||
tarsum format.
|
||||
Given this parameter, the registry will verify that the provided content does
|
||||
match this digest.
|
||||
|
||||
##### Canceling an Upload
|
||||
|
||||
|
@ -751,7 +731,7 @@ image manifest. An image can be pushed using the following request format:
|
|||
"tag": <tag>,
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": <tarsum>
|
||||
"blobSum": <digest>
|
||||
},
|
||||
...
|
||||
]
|
||||
|
@ -770,15 +750,15 @@ for details on possible error codes that may be returned.
|
|||
|
||||
If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are
|
||||
returned. The `detail` field of the error response will have a `digest` field
|
||||
identifying the missing blob, which will be a tarsum. An error is returned for
|
||||
each unknown blob. The response format is as follows:
|
||||
identifying the missing blob. An error is returned for each unknown blob. The
|
||||
response format is as follows:
|
||||
|
||||
{
|
||||
"errors:" [{
|
||||
"code": "BLOB_UNKNOWN",
|
||||
"message": "blob unknown to registry",
|
||||
"detail": {
|
||||
"digest": <tarsum>
|
||||
"digest": <digest>
|
||||
}
|
||||
},
|
||||
...
|
||||
|
|
|
@ -301,11 +301,6 @@ Some examples of _digests_ include the following:
|
|||
digest | description |
|
||||
----------------------------------------------------------------------------------|------------------------------------------------
|
||||
sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest |
|
||||
tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Tarsum digest, used for legacy layer digests. |
|
||||
|
||||
> __NOTE:__ While we show an example of using a `tarsum` digest, the security
|
||||
> of tarsum has not been verified. It is recommended that most implementations
|
||||
> use sha256 for interoperability.
|
||||
|
||||
While the _algorithm_ does allow one to implement a wide variety of
|
||||
algorithms, compliant implementations should use sha256. Heavy processing of
|
||||
|
@ -363,7 +358,7 @@ the relevant manifest fields for the registry are the following:
|
|||
----------|------------------------------------------------|
|
||||
name | The name of the image. |
|
||||
tag | The tag for this version of the image. |
|
||||
fsLayers | A list of layer descriptors (including tarsum) |
|
||||
fsLayers | A list of layer descriptors (including digest) |
|
||||
signature | A JWS used to verify the manifest content |
|
||||
|
||||
For more information about the manifest format, please see
|
||||
|
@ -371,8 +366,8 @@ For more information about the manifest format, please see
|
|||
|
||||
When the manifest is in hand, the client must verify the signature to ensure
|
||||
the names and layers are valid. Once confirmed, the client will then use the
|
||||
tarsums to download the individual layers. Layers are stored in as blobs in
|
||||
the V2 registry API, keyed by their tarsum digest.
|
||||
digests to download the individual layers. Layers are stored in as blobs in
|
||||
the V2 registry API, keyed by their digest.
|
||||
|
||||
#### Pulling an Image Manifest
|
||||
|
||||
|
@ -395,7 +390,7 @@ for details):
|
|||
"tag": <tag>,
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": <tarsum>
|
||||
"blobSum": <digest>
|
||||
},
|
||||
...
|
||||
]
|
||||
|
@ -409,15 +404,14 @@ before fetching layers.
|
|||
|
||||
#### Pulling a Layer
|
||||
|
||||
Layers are stored in the blob portion of the registry, keyed by tarsum digest.
|
||||
Layers are stored in the blob portion of the registry, keyed by digest.
|
||||
Pulling a layer is carried out by a standard http request. The URL is as
|
||||
follows:
|
||||
|
||||
GET /v2/<name>/blobs/<tarsum>
|
||||
GET /v2/<name>/blobs/<digest>
|
||||
|
||||
Access to a layer will be gated by the `name` of the repository but is
|
||||
identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an
|
||||
opaque field, to be interpreted by the tarsum library.
|
||||
identified uniquely in the registry by `digest`.
|
||||
|
||||
This endpoint may issue a 307 (302 for <HTTP 1.1) redirect to another service
|
||||
for downloading the layer and clients should be prepared to handle redirects.
|
||||
|
@ -468,7 +462,7 @@ API. The request should be formatted as follows:
|
|||
HEAD /v2/<name>/blobs/<digest>
|
||||
```
|
||||
|
||||
If the layer with the tarsum specified in `digest` is available, a 200 OK
|
||||
If the layer with the digest specified in `digest` is available, a 200 OK
|
||||
response will be received, with no actual body content (this is according to
|
||||
http specification). The response will look as follows:
|
||||
|
||||
|
@ -481,7 +475,7 @@ Docker-Content-Digest: <digest>
|
|||
When this response is received, the client can assume that the layer is
|
||||
already available in the registry under the given name and should take no
|
||||
further action to upload the layer. Note that the binary digests may differ
|
||||
for the existing registry layer, but the tarsums will be guaranteed to match.
|
||||
for the existing registry layer, but the digests will be guaranteed to match.
|
||||
|
||||
##### Uploading the Layer
|
||||
|
||||
|
@ -548,7 +542,7 @@ carry out a "monolithic" upload, one can simply put the entire content blob to
|
|||
the provided URL:
|
||||
|
||||
```
|
||||
PUT /v2/<name>/blobs/uploads/<uuid>?digest=<tarsum>[&digest=sha256:<hex digest>]
|
||||
PUT /v2/<name>/blobs/uploads/<uuid>?digest=<digest>
|
||||
Content-Length: <size of layer>
|
||||
Content-Type: application/octet-stream
|
||||
|
||||
|
@ -563,7 +557,7 @@ Additionally, the upload can be completed with a single `POST` request to
|
|||
the uploads endpoint, including the "size" and "digest" parameters:
|
||||
|
||||
```
|
||||
POST /v2/<name>/blobs/uploads/?digest=<tarsum>[&digest=sha256:<hex digest>]
|
||||
POST /v2/<name>/blobs/uploads/?digest=<digest>
|
||||
Content-Length: <size of layer>
|
||||
Content-Type: application/octet-stream
|
||||
|
||||
|
@ -634,7 +628,7 @@ the upload will not be considered complete. The format for the final chunk
|
|||
will be as follows:
|
||||
|
||||
```
|
||||
PUT /v2/<name>/blob/uploads/<uuid>?digest=<tarsum>[&digest=sha256:<hex digest>]
|
||||
PUT /v2/<name>/blob/uploads/<uuid>?digest=<digest>
|
||||
Content-Length: <size of chunk>
|
||||
Content-Range: <start of range>-<end of range>
|
||||
Content-Type: application/octet-stream
|
||||
|
@ -653,7 +647,7 @@ will receive a `201 Created` response:
|
|||
|
||||
```
|
||||
201 Created
|
||||
Location: /v2/<name>/blobs/<tarsum>
|
||||
Location: /v2/<name>/blobs/<digest>
|
||||
Content-Length: 0
|
||||
Docker-Content-Digest: <digest>
|
||||
```
|
||||
|
@ -667,28 +661,15 @@ the uploaded blob data.
|
|||
###### Digest Parameter
|
||||
|
||||
The "digest" parameter is designed as an opaque parameter to support
|
||||
verification of a successful transfer. The initial version of the registry API
|
||||
will support a tarsum digest, in the standard tarsum format. For example, a
|
||||
HTTP URI parameter might be as follows:
|
||||
|
||||
```
|
||||
tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b
|
||||
```
|
||||
|
||||
Given this parameter, the registry will verify that the provided content does
|
||||
result in this tarsum. Optionally, the registry can support other other digest
|
||||
parameters for non-tarfile content stored as a layer. A regular hash digest
|
||||
might be specified as follows:
|
||||
verification of a successful transfer. For example, a HTTP URI parameter
|
||||
might be as follows:
|
||||
|
||||
```
|
||||
sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b
|
||||
```
|
||||
|
||||
Such a parameter would be used to verify that the binary content (as opposed
|
||||
to the tar content) would be verified at the end of the upload process.
|
||||
|
||||
For the initial version, registry servers are only required to support the
|
||||
tarsum format.
|
||||
Given this parameter, the registry will verify that the provided content does
|
||||
match this digest.
|
||||
|
||||
##### Canceling an Upload
|
||||
|
||||
|
@ -750,7 +731,7 @@ image manifest. An image can be pushed using the following request format:
|
|||
"tag": <tag>,
|
||||
"fsLayers": [
|
||||
{
|
||||
"blobSum": <tarsum>
|
||||
"blobSum": <digest>
|
||||
},
|
||||
...
|
||||
]
|
||||
|
@ -769,15 +750,15 @@ for details on possible error codes that may be returned.
|
|||
|
||||
If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are
|
||||
returned. The `detail` field of the error response will have a `digest` field
|
||||
identifying the missing blob, which will be a tarsum. An error is returned for
|
||||
each unknown blob. The response format is as follows:
|
||||
identifying the missing blob. An error is returned for each unknown blob. The
|
||||
response format is as follows:
|
||||
|
||||
{
|
||||
"errors:" [{
|
||||
"code": "BLOB_UNKNOWN",
|
||||
"message": "blob unknown to registry",
|
||||
"detail": {
|
||||
"digest": <tarsum>
|
||||
"digest": <digest>
|
||||
}
|
||||
},
|
||||
...
|
||||
|
|
|
@ -56,7 +56,7 @@ Manifest provides the base accessible fields for working with V2 image format
|
|||
- **`blobSum`** *digest.Digest*
|
||||
|
||||
blobSum is the digest of the referenced filesystem image layer. A
|
||||
digest can be a tarsum or sha256 hash.
|
||||
digest must be a sha256 hash.
|
||||
|
||||
|
||||
- **`history`** *array*
|
||||
|
|
|
@ -119,7 +119,7 @@ func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// FSLayer is a container struct for BlobSums defined in an image manifest
|
||||
type FSLayer struct {
|
||||
// BlobSum is the tarsum of the referenced filesystem image layer
|
||||
// BlobSum is the digest of the referenced filesystem image layer
|
||||
BlobSum digest.Digest `json:"blobSum"`
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
|||
"target": {
|
||||
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
||||
"size": 2,
|
||||
"digest": "tarsum.v2+sha256:0123456789abcdef1",
|
||||
"digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
|
||||
"length": 2,
|
||||
"repository": "library/test",
|
||||
"url": "http://example.com/v2/library/test/manifests/latest"
|
||||
|
@ -75,7 +75,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
|||
"target": {
|
||||
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
||||
"size": 3,
|
||||
"digest": "tarsum.v2+sha256:0123456789abcdef2",
|
||||
"digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6",
|
||||
"length": 3,
|
||||
"repository": "library/test",
|
||||
"url": "http://example.com/v2/library/test/manifests/latest"
|
||||
|
@ -127,7 +127,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
|||
var layerPush0 Event
|
||||
layerPush0 = prototype
|
||||
layerPush0.ID = "asdf-asdf-asdf-asdf-1"
|
||||
layerPush0.Target.Digest = "tarsum.v2+sha256:0123456789abcdef1"
|
||||
layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
|
||||
layerPush0.Target.Length = 2
|
||||
layerPush0.Target.Size = 2
|
||||
layerPush0.Target.MediaType = layerMediaType
|
||||
|
@ -137,7 +137,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
|||
var layerPush1 Event
|
||||
layerPush1 = prototype
|
||||
layerPush1.ID = "asdf-asdf-asdf-asdf-2"
|
||||
layerPush1.Target.Digest = "tarsum.v2+sha256:0123456789abcdef2"
|
||||
layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6"
|
||||
layerPush1.Target.Length = 3
|
||||
layerPush1.Target.Size = 3
|
||||
layerPush1.Target.MediaType = layerMediaType
|
||||
|
|
|
@ -87,14 +87,6 @@ func TestRouter(t *testing.T) {
|
|||
"name": "docker.com/foo/bar/baz",
|
||||
},
|
||||
},
|
||||
{
|
||||
RouteName: RouteNameBlob,
|
||||
RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234",
|
||||
Vars: map[string]string{
|
||||
"name": "foo/bar",
|
||||
"digest": "tarsum.dev+foo:abcdef0919234",
|
||||
},
|
||||
},
|
||||
{
|
||||
RouteName: RouteNameBlob,
|
||||
RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234",
|
||||
|
|
|
@ -35,9 +35,9 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase {
|
|||
},
|
||||
{
|
||||
description: "build blob url",
|
||||
expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789",
|
||||
expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
|
||||
build: func() (string, error) {
|
||||
return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789")
|
||||
return urlBuilder.BuildBlobURL("foo/bar", "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5")
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -49,11 +49,11 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase {
|
|||
},
|
||||
{
|
||||
description: "build blob upload url with digest and size",
|
||||
expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000",
|
||||
expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000",
|
||||
build: func() (string, error) {
|
||||
return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{
|
||||
"size": []string{"10000"},
|
||||
"digest": []string{"tarsum.v1+sha256:abcdef0123456789"},
|
||||
"digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"},
|
||||
})
|
||||
},
|
||||
},
|
||||
|
@ -66,11 +66,11 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase {
|
|||
},
|
||||
{
|
||||
description: "build blob upload chunk url with digest and size",
|
||||
expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000",
|
||||
expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000",
|
||||
build: func() (string, error) {
|
||||
return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{
|
||||
"size": []string{"10000"},
|
||||
"digest": []string{"tarsum.v1+sha256:abcdef0123456789"},
|
||||
"digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"},
|
||||
})
|
||||
},
|
||||
},
|
||||
|
|
|
@ -251,22 +251,18 @@ type blobArgs struct {
|
|||
imageName string
|
||||
layerFile io.ReadSeeker
|
||||
layerDigest digest.Digest
|
||||
tarSumStr string
|
||||
}
|
||||
|
||||
func makeBlobArgs(t *testing.T) blobArgs {
|
||||
layerFile, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||
layerFile, layerDigest, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random layer file: %v", err)
|
||||
}
|
||||
|
||||
layerDigest := digest.Digest(tarSumStr)
|
||||
|
||||
args := blobArgs{
|
||||
imageName: "foo/bar",
|
||||
layerFile: layerFile,
|
||||
layerDigest: layerDigest,
|
||||
tarSumStr: tarSumStr,
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
@ -393,7 +389,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
|
|||
|
||||
// -----------------------------------------
|
||||
// Do layer push with an empty body and correct digest
|
||||
zeroDigest, err := digest.FromTarArchive(bytes.NewReader([]byte{}))
|
||||
zeroDigest, err := digest.FromReader(bytes.NewReader([]byte{}))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error digesting empty buffer: %v", err)
|
||||
}
|
||||
|
@ -406,7 +402,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
|
|||
|
||||
// This is a valid but empty tarfile!
|
||||
emptyTar := bytes.Repeat([]byte("\x00"), 1024)
|
||||
emptyDigest, err := digest.FromTarArchive(bytes.NewReader(emptyTar))
|
||||
emptyDigest, err := digest.FromReader(bytes.NewReader(emptyTar))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error digesting empty tar: %v", err)
|
||||
}
|
||||
|
@ -476,7 +472,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
|
|||
|
||||
// ----------------
|
||||
// Fetch the layer with an invalid digest
|
||||
badURL := strings.Replace(layerURL, "tarsum", "trsum", 1)
|
||||
badURL := strings.Replace(layerURL, "sha256", "sha257", 1)
|
||||
resp, err = http.Get(badURL)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error fetching layer: %v", err)
|
||||
|
@ -523,7 +519,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
|
|||
checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK)
|
||||
|
||||
// Missing tests:
|
||||
// - Upload the same tarsum file under and different repository and
|
||||
// - Upload the same tar file under and different repository and
|
||||
// ensure the content remains uncorrupted.
|
||||
return env
|
||||
}
|
||||
|
@ -570,7 +566,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) {
|
|||
|
||||
// ----------------
|
||||
// Attempt to delete a layer with an invalid digest
|
||||
badURL := strings.Replace(layerURL, "tarsum", "trsum", 1)
|
||||
badURL := strings.Replace(layerURL, "sha256", "sha257", 1)
|
||||
resp, err = httpDelete(badURL)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error fetching layer: %v", err)
|
||||
|
@ -612,12 +608,11 @@ func TestDeleteDisabled(t *testing.T) {
|
|||
|
||||
imageName := "foo/bar"
|
||||
// "build" our layer file
|
||||
layerFile, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||
layerFile, layerDigest, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random layer file: %v", err)
|
||||
}
|
||||
|
||||
layerDigest := digest.Digest(tarSumStr)
|
||||
layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("Error building blob URL")
|
||||
|
@ -638,12 +633,11 @@ func TestDeleteReadOnly(t *testing.T) {
|
|||
|
||||
imageName := "foo/bar"
|
||||
// "build" our layer file
|
||||
layerFile, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||
layerFile, layerDigest, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random layer file: %v", err)
|
||||
}
|
||||
|
||||
layerDigest := digest.Digest(tarSumStr)
|
||||
layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("Error building blob URL")
|
||||
|
|
|
@ -102,13 +102,6 @@ func TestAppDispatcher(t *testing.T) {
|
|||
"name", "foo/bar",
|
||||
},
|
||||
},
|
||||
{
|
||||
endpoint: v2.RouteNameBlob,
|
||||
vars: []string{
|
||||
"name", "foo/bar",
|
||||
"digest", "tarsum.v1+bogus:abcdef0123456789",
|
||||
},
|
||||
},
|
||||
{
|
||||
endpoint: v2.RouteNameBlobUpload,
|
||||
vars: []string{
|
||||
|
|
|
@ -20,16 +20,11 @@ import (
|
|||
// TestSimpleBlobUpload covers the blob upload process, exercising common
|
||||
// error paths that might be seen during an upload.
|
||||
func TestSimpleBlobUpload(t *testing.T) {
|
||||
randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile()
|
||||
randomDataReader, dgst, err := testutil.CreateRandomTarFile()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random reader: %v", err)
|
||||
}
|
||||
|
||||
dgst := digest.Digest(tarSumStr)
|
||||
if err != nil {
|
||||
t.Fatalf("error allocating upload store: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
imageName := "foo/bar"
|
||||
driver := inmemory.New()
|
||||
|
@ -225,13 +220,11 @@ func TestSimpleBlobRead(t *testing.T) {
|
|||
}
|
||||
bs := repository.Blobs(ctx)
|
||||
|
||||
randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string.
|
||||
randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string.
|
||||
if err != nil {
|
||||
t.Fatalf("error creating random data: %v", err)
|
||||
}
|
||||
|
||||
dgst := digest.Digest(tarSumStr)
|
||||
|
||||
// Test for existence.
|
||||
desc, err := bs.Stat(ctx, dgst)
|
||||
if err != distribution.ErrBlobUnknown {
|
||||
|
@ -358,7 +351,7 @@ func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expec
|
|||
|
||||
if dgst != expectedDigest {
|
||||
// sanity check on zero digest
|
||||
t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar)
|
||||
t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest)
|
||||
}
|
||||
|
||||
desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst})
|
||||
|
|
|
@ -302,7 +302,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor
|
|||
// get a hash, then the underlying file is deleted, we risk moving
|
||||
// a zero-length blob into a nonzero-length blob location. To
|
||||
// prevent this horrid thing, we employ the hack of only allowing
|
||||
// to this happen for the zero tarsum.
|
||||
// to this happen for the digest of an empty tar.
|
||||
if desc.Digest == digest.DigestSha256EmptyTar {
|
||||
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
|
||||
}
|
||||
|
|
2
registry/storage/cache/redis/redis.go
vendored
2
registry/storage/cache/redis/redis.go
vendored
|
@ -249,7 +249,7 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx cont
|
|||
}
|
||||
|
||||
// Also set the values for the primary descriptor, if they differ by
|
||||
// algorithm (ie sha256 vs tarsum).
|
||||
// algorithm (ie sha256 vs sha512).
|
||||
if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() {
|
||||
if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil {
|
||||
return err
|
||||
|
|
|
@ -282,7 +282,7 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis
|
|||
}
|
||||
|
||||
if target != dgst {
|
||||
// Track when we are doing cross-digest domain lookups. ie, tarsum to sha256.
|
||||
// Track when we are doing cross-digest domain lookups. ie, sha512 to sha256.
|
||||
context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target)
|
||||
}
|
||||
|
||||
|
|
|
@ -396,9 +396,8 @@ type layerLinkPathSpec struct {
|
|||
func (layerLinkPathSpec) pathSpec() {}
|
||||
|
||||
// blobAlgorithmReplacer does some very simple path sanitization for user
|
||||
// input. Mostly, this is to provide some hierarchy for tarsum digests. Paths
|
||||
// should be "safe" before getting this far due to strict digest requirements
|
||||
// but we can add further path conversion here, if needed.
|
||||
// input. Paths should be "safe" before getting this far due to strict digest
|
||||
// requirements but we can add further path conversion here, if needed.
|
||||
var blobAlgorithmReplacer = strings.NewReplacer(
|
||||
"+", "/",
|
||||
".", "/",
|
||||
|
@ -468,10 +467,6 @@ func (repositoriesRootPathSpec) pathSpec() {}
|
|||
//
|
||||
// <algorithm>/<hex digest>
|
||||
//
|
||||
// Most importantly, for tarsum, the layout looks like this:
|
||||
//
|
||||
// tarsum/<version>/<digest algorithm>/<full digest>
|
||||
//
|
||||
// If multilevel is true, the first two bytes of the digest will separate
|
||||
// groups of digest folder. It will be as follows:
|
||||
//
|
||||
|
@ -494,19 +489,5 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error)
|
|||
|
||||
suffix = append(suffix, hex)
|
||||
|
||||
if tsi, err := digest.ParseTarSum(dgst.String()); err == nil {
|
||||
// We have a tarsum!
|
||||
version := tsi.Version
|
||||
if version == "" {
|
||||
version = "v0"
|
||||
}
|
||||
|
||||
prefix = []string{
|
||||
"tarsum",
|
||||
version,
|
||||
tsi.Algorithm,
|
||||
}
|
||||
}
|
||||
|
||||
return append(prefix, suffix...), nil
|
||||
}
|
||||
|
|
|
@ -2,8 +2,6 @@ package storage
|
|||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
)
|
||||
|
||||
func TestPathMapper(t *testing.T) {
|
||||
|
@ -84,25 +82,6 @@ func TestPathMapper(t *testing.T) {
|
|||
},
|
||||
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link",
|
||||
},
|
||||
{
|
||||
spec: layerLinkPathSpec{
|
||||
name: "foo/bar",
|
||||
digest: "tarsum.v1+test:abcdef",
|
||||
},
|
||||
expected: "/docker/registry/v2/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link",
|
||||
},
|
||||
{
|
||||
spec: blobDataPathSpec{
|
||||
digest: digest.Digest("tarsum.dev+sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"),
|
||||
},
|
||||
expected: "/docker/registry/v2/blobs/tarsum/dev/sha512/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data",
|
||||
},
|
||||
{
|
||||
spec: blobDataPathSpec{
|
||||
digest: digest.Digest("tarsum.v1+sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"),
|
||||
},
|
||||
expected: "/docker/registry/v2/blobs/tarsum/v1/sha256/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data",
|
||||
},
|
||||
|
||||
{
|
||||
spec: uploadDataPathSpec{
|
||||
|
|
|
@ -6,17 +6,16 @@ import (
|
|||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
mrand "math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/docker/distribution/digest"
|
||||
)
|
||||
|
||||
// CreateRandomTarFile creates a random tarfile, returning it as an
|
||||
// io.ReadSeeker along with its tarsum. An error is returned if there is a
|
||||
// io.ReadSeeker along with its digest. An error is returned if there is a
|
||||
// problem generating valid content.
|
||||
func CreateRandomTarFile() (rs io.ReadSeeker, tarSum string, err error) {
|
||||
func CreateRandomTarFile() (rs io.ReadSeeker, dgst digest.Digest, err error) {
|
||||
nFiles := mrand.Intn(10) + 10
|
||||
target := &bytes.Buffer{}
|
||||
wr := tar.NewWriter(target)
|
||||
|
@ -73,23 +72,7 @@ func CreateRandomTarFile() (rs io.ReadSeeker, tarSum string, err error) {
|
|||
return nil, "", err
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(target.Bytes())
|
||||
dgst = digest.FromBytes(target.Bytes())
|
||||
|
||||
// A tar builder that supports tarsum inline calculation would be awesome
|
||||
// here.
|
||||
ts, err := tarsum.NewTarSum(reader, true, tarsum.Version1)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
nn, err := io.Copy(ioutil.Discard, ts)
|
||||
if nn != int64(len(target.Bytes())) {
|
||||
return nil, "", fmt.Errorf("short copy when getting tarsum of random layer: %v != %v", nn, len(target.Bytes()))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return bytes.NewReader(target.Bytes()), ts.Sum(nil), nil
|
||||
return bytes.NewReader(target.Bytes()), dgst, nil
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue