dep: Update containers/image to 1d7e25b91705e4d1cddb5396baf112caeb1119f3

Signed-off-by: Andrew Pilloud <andrewpilloud@igneoussystems.com>
This commit is contained in:
Andrew Pilloud 2017-03-13 09:33:17 -07:00
parent 54c176e336
commit de9995d5f0
84 changed files with 3091 additions and 748 deletions

View file

@ -1,62 +0,0 @@
package copy
import (
"bytes"
"compress/bzip2"
"compress/gzip"
"io"
"github.com/pkg/errors"
"github.com/Sirupsen/logrus"
)
// decompressorFunc, given a compressed stream, returns the decompressed stream.
type decompressorFunc func(io.Reader) (io.Reader, error)
func gzipDecompressor(r io.Reader) (io.Reader, error) {
return gzip.NewReader(r)
}
func bzip2Decompressor(r io.Reader) (io.Reader, error) {
return bzip2.NewReader(r), nil
}
func xzDecompressor(r io.Reader) (io.Reader, error) {
return nil, errors.New("Decompressing xz streams is not supported")
}
// compressionAlgos is an internal implementation detail of detectCompression
var compressionAlgos = map[string]struct {
prefix []byte
decompressor decompressorFunc
}{
"gzip": {[]byte{0x1F, 0x8B, 0x08}, gzipDecompressor}, // gzip (RFC 1952)
"bzip2": {[]byte{0x42, 0x5A, 0x68}, bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress)
"xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, xzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt)
}
// detectCompression returns a decompressorFunc if the input is recognized as a compressed format, nil otherwise.
// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
func detectCompression(input io.Reader) (decompressorFunc, io.Reader, error) {
buffer := [8]byte{}
n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
// This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again.
// Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later.
return nil, nil, err
}
var decompressor decompressorFunc
for name, algo := range compressionAlgos {
if bytes.HasPrefix(buffer[:n], algo.prefix) {
logrus.Debugf("Detected compression format %s", name)
decompressor = algo.decompressor
break
}
}
if decompressor == nil {
logrus.Debugf("No compression detected")
}
return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
}

View file

@ -1,86 +0,0 @@
package copy
import (
"bytes"
"io"
"io/ioutil"
"os"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDetectCompression(t *testing.T) {
cases := []struct {
filename string
unimplemented bool
}{
{"fixtures/Hello.uncompressed", false},
{"fixtures/Hello.gz", false},
{"fixtures/Hello.bz2", false},
{"fixtures/Hello.xz", true},
}
// The original stream is preserved.
for _, c := range cases {
originalContents, err := ioutil.ReadFile(c.filename)
require.NoError(t, err, c.filename)
stream, err := os.Open(c.filename)
require.NoError(t, err, c.filename)
defer stream.Close()
_, updatedStream, err := detectCompression(stream)
require.NoError(t, err, c.filename)
updatedContents, err := ioutil.ReadAll(updatedStream)
require.NoError(t, err, c.filename)
assert.Equal(t, originalContents, updatedContents, c.filename)
}
// The correct decompressor is chosen, and the result is as expected.
for _, c := range cases {
stream, err := os.Open(c.filename)
require.NoError(t, err, c.filename)
defer stream.Close()
decompressor, updatedStream, err := detectCompression(stream)
require.NoError(t, err, c.filename)
var uncompressedStream io.Reader
switch {
case decompressor == nil:
uncompressedStream = updatedStream
case c.unimplemented:
_, err := decompressor(updatedStream)
assert.Error(t, err)
continue
default:
s, err := decompressor(updatedStream)
require.NoError(t, err)
uncompressedStream = s
}
uncompressedContents, err := ioutil.ReadAll(uncompressedStream)
require.NoError(t, err, c.filename)
assert.Equal(t, []byte("Hello"), uncompressedContents, c.filename)
}
// Empty input is handled reasonably.
decompressor, updatedStream, err := detectCompression(bytes.NewReader([]byte{}))
require.NoError(t, err)
assert.Nil(t, decompressor)
updatedContents, err := ioutil.ReadAll(updatedStream)
require.NoError(t, err)
assert.Equal(t, []byte{}, updatedContents)
// Error reading input
reader, writer := io.Pipe()
defer reader.Close()
writer.CloseWithError(errors.New("Expected error reading input in detectCompression"))
_, _, err = detectCompression(reader)
assert.Error(t, err)
}

View file

@ -7,12 +7,14 @@ import (
"io"
"io/ioutil"
"reflect"
"time"
pb "gopkg.in/cheggaaa/pb.v1"
"github.com/Sirupsen/logrus"
"github.com/containers/image/image"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/signature"
"github.com/containers/image/transports"
"github.com/containers/image/types"
@ -45,6 +47,8 @@ type imageCopier struct {
diffIDsAreNeeded bool
canModifyManifest bool
reportWriter io.Writer
progressInterval time.Duration
progress chan types.ProgressProperties
}
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
@ -92,14 +96,28 @@ type Options struct {
ReportWriter io.Writer
SourceCtx *types.SystemContext
DestinationCtx *types.SystemContext
ProgressInterval time.Duration // time to wait between reports to signal the progress channel
Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
}
// Image copies image from srcRef to destRef, using policyContext to validate source image admissibility.
func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) error {
// Image copies image from srcRef to destRef, using policyContext to validate
// source image admissibility.
func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (retErr error) {
// NOTE this function uses an output parameter for the error return value.
// Setting this and returning is the ideal way to return an error.
//
// the defers in this routine will wrap the error return with its own errors
// which can be valuable context in the middle of a multi-streamed copy.
if options == nil {
options = &Options{}
}
reportWriter := ioutil.Discard
if options != nil && options.ReportWriter != nil {
if options.ReportWriter != nil {
reportWriter = options.ReportWriter
}
writeReport := func(f string, a ...interface{}) {
fmt.Fprintf(reportWriter, f, a...)
}
@ -108,7 +126,12 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
if err != nil {
return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
}
defer dest.Close()
defer func() {
if err := dest.Close(); err != nil {
retErr = errors.Wrapf(retErr, " (dest: %v)", err)
}
}()
destSupportedManifestMIMETypes := dest.SupportedManifestMIMETypes()
rawSource, err := srcRef.NewImageSource(options.SourceCtx, destSupportedManifestMIMETypes)
@ -118,7 +141,9 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
unparsedImage := image.UnparsedFromSource(rawSource)
defer func() {
if unparsedImage != nil {
unparsedImage.Close()
if err := unparsedImage.Close(); err != nil {
retErr = errors.Wrapf(retErr, " (unparsed: %v)", err)
}
}
}()
@ -131,14 +156,18 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef))
}
unparsedImage = nil
defer src.Close()
defer func() {
if err := src.Close(); err != nil {
retErr = errors.Wrapf(retErr, " (source: %v)", err)
}
}()
if src.IsMultiImage() {
return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef))
}
var sigs [][]byte
if options != nil && options.RemoveSignatures {
if options.RemoveSignatures {
sigs = [][]byte{}
} else {
writeReport("Getting image source signatures\n")
@ -173,6 +202,8 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates),
canModifyManifest: canModifyManifest,
reportWriter: reportWriter,
progressInterval: options.ProgressInterval,
progress: options.Progress,
}
if err := ic.copyLayers(); err != nil {
@ -199,7 +230,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
return err
}
if options != nil && options.SignBy != "" {
if options.SignBy != "" {
mech, err := signature.NewGPGSigningMechanism()
if err != nil {
return errors.Wrap(err, "Error initializing GPG")
@ -370,7 +401,7 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) {
var getDiffIDRecorder func(decompressorFunc) io.Writer // = nil
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below
@ -381,7 +412,7 @@ func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.Bl
pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
}()
getDiffIDRecorder = func(decompressor decompressorFunc) io.Writer {
getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer {
// If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
// reading from the pipe has failed, we dont really care.
// We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
@ -399,7 +430,7 @@ func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.Bl
}
// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor decompressorFunc) {
func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) {
result := diffIDResult{
digest: "",
err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
@ -411,7 +442,7 @@ func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadClo
}
// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
func computeDiffID(stream io.Reader, decompressor decompressorFunc) (digest.Digest, error) {
func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) {
if decompressor != nil {
s, err := decompressor(stream)
if err != nil {
@ -428,7 +459,7 @@ func computeDiffID(stream io.Reader, decompressor decompressorFunc) (digest.Dige
// perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied blob.
func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor decompressorFunc) io.Writer,
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
canCompress bool) (types.BlobInfo, error) {
// The copying happens through a pipeline of connected io.Readers.
// === Input: srcStream
@ -446,8 +477,8 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
var destStream io.Reader = digestingReader
// === Detect compression of the input stream.
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by detectCompression.
decompressor, destStream, err := detectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
}
@ -489,6 +520,17 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
inputInfo.Size = -1
}
// === Report progress using the ic.progress channel, if required.
if ic.progress != nil && ic.progressInterval > 0 {
destStream = &progressReader{
source: destStream,
channel: ic.progress,
interval: ic.progressInterval,
artifact: srcInfo,
lastTime: time.Now(),
}
}
// === Finally, send the layer stream to dest.
uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo)
if err != nil {

View file

@ -9,6 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/containers/image/pkg/compression"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -63,7 +64,7 @@ func TestDigestingReaderRead(t *testing.T) {
}
}
func goDiffIDComputationGoroutineWithTimeout(layerStream io.ReadCloser, decompressor decompressorFunc) *diffIDResult {
func goDiffIDComputationGoroutineWithTimeout(layerStream io.ReadCloser, decompressor compression.DecompressorFunc) *diffIDResult {
ch := make(chan diffIDResult)
go diffIDComputationGoroutine(ch, layerStream, nil)
timeout := time.After(time.Second)
@ -94,12 +95,12 @@ func TestDiffIDComputationGoroutine(t *testing.T) {
func TestComputeDiffID(t *testing.T) {
for _, c := range []struct {
filename string
decompressor decompressorFunc
decompressor compression.DecompressorFunc
result digest.Digest
}{
{"fixtures/Hello.uncompressed", nil, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
{"fixtures/Hello.gz", nil, "sha256:0bd4409dcd76476a263b8f3221b4ce04eb4686dec40bfdcc2e86a7403de13609"},
{"fixtures/Hello.gz", gzipDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
{"fixtures/Hello.gz", compression.GzipDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
} {
stream, err := os.Open(c.filename)
require.NoError(t, err, c.filename)
@ -111,7 +112,7 @@ func TestComputeDiffID(t *testing.T) {
}
// Error initializing decompression
_, err := computeDiffID(bytes.NewReader([]byte{}), gzipDecompressor)
_, err := computeDiffID(bytes.NewReader([]byte{}), compression.GzipDecompressor)
assert.Error(t, err)
// Error reading input

Binary file not shown.

View file

@ -0,0 +1 @@
../../pkg/compression/fixtures/Hello.bz2

Binary file not shown.

View file

@ -0,0 +1 @@
../../pkg/compression/fixtures/Hello.gz

View file

@ -1 +0,0 @@
Hello

View file

@ -0,0 +1 @@
../../pkg/compression/fixtures/Hello.uncompressed

Binary file not shown.

View file

@ -0,0 +1 @@
../../pkg/compression/fixtures/Hello.xz

View file

@ -0,0 +1,28 @@
package copy
import (
"io"
"time"
"github.com/containers/image/types"
)
// progressReader is a reader that reports its progress on an interval.
type progressReader struct {
source io.Reader
channel chan types.ProgressProperties
interval time.Duration
artifact types.BlobInfo
lastTime time.Time
offset uint64
}
func (r *progressReader) Read(p []byte) (int, error) {
n, err := r.source.Read(p)
r.offset += uint64(n)
if time.Since(r.lastTime) > r.interval {
r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset}
r.lastTime = time.Now()
}
return n, err
}