Merge pull request #450 from nalind/canonical

Fix pull-by-digest
This commit is contained in:
Antonio Murdaca 2017-04-27 23:18:16 +02:00 committed by GitHub
commit d45ecac0f8
47 changed files with 3071 additions and 1025 deletions

View file

@ -1,5 +1,5 @@
{
"memo": "e99fe9f7a283d8fb8e0ec8b05fa68d01a7dfa4c7c48b6e85a84986a079685711",
"memo": "5791d48b7e77e9f18a26535dfb184838f1d863f5d364fc9907cf16b6013e9846",
"projects": [
{
"name": "cloud.google.com/go",
@ -92,20 +92,24 @@
{
"name": "github.com/containers/image",
"branch": "master",
"revision": "9fcd2ba2c6983f74026db5f2c0f79b529a098dee",
"revision": "efae29995d4846ffa6163eb4d466fd61bda43aae",
"packages": [
"copy",
"directory",
"directory/explicitfilepath",
"docker",
"docker/archive",
"docker/daemon",
"docker/policyconfiguration",
"docker/reference",
"docker/tarfile",
"image",
"manifest",
"oci/layout",
"openshift",
"ostree",
"pkg/compression",
"pkg/strslice",
"signature",
"storage",
"transports",
@ -117,13 +121,13 @@
{
"name": "github.com/containers/storage",
"branch": "master",
"revision": "ff48947baaf205756dd67a00ac688d694a778ef6",
"revision": "d10d8680af74070b362637408a7fe28c4b1f1eff",
"packages": [
"drivers",
"drivers/aufs",
"drivers/btrfs",
"drivers/devmapper",
"drivers/overlay2",
"drivers/overlay",
"drivers/register",
"drivers/vfs",
"drivers/windows",
@ -149,6 +153,7 @@
"pkg/reexec",
"pkg/stringid",
"pkg/system",
"pkg/truncindex",
"storage",
"storageversion"
]

View file

@ -43,7 +43,19 @@ type ImageServer interface {
func (svc *imageService) ListImages(filter string) ([]ImageResult, error) {
results := []ImageResult{}
if filter != "" {
if image, err := svc.store.GetImage(filter); err == nil {
ref, err := alltransports.ParseImageName(filter)
if err != nil {
ref2, err2 := istorage.Transport.ParseStoreReference(svc.store, "@"+filter)
if err2 != nil {
ref3, err3 := istorage.Transport.ParseStoreReference(svc.store, filter)
if err3 != nil {
return nil, err
}
ref2 = ref3
}
ref = ref2
}
if image, err := istorage.Transport.GetStoreImage(svc.store, ref); err == nil {
results = append(results, ImageResult{
ID: image.ID,
Names: image.Names,
@ -136,6 +148,9 @@ func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName
if tagged, ok := srcRef.DockerReference().(reference.NamedTagged); ok {
dest = dest + ":" + tagged.Tag()
}
if canonical, ok := srcRef.DockerReference().(reference.Canonical); ok {
dest = dest + "@" + canonical.Digest().String()
}
}
destRef, err := istorage.Transport.ParseStoreReference(svc.store, dest)
if err != nil {
@ -145,18 +160,6 @@ func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName
if err != nil {
return nil, err
}
// Go find the image, and attach the requested name to it, so that we
// can more easily find it later, even if the destination reference
// looks different.
destImage, err := istorage.Transport.GetStoreImage(svc.store, destRef)
if err != nil {
return nil, err
}
names := append(destImage.Names, imageName, dest)
err = svc.store.SetNames(destImage.ID, names)
if err != nil {
return nil, err
}
return destRef, nil
}

View file

@ -32,6 +32,36 @@ function teardown() {
stop_ocid
}
@test "image pull and list by digest" {
start_ocid "" "" --no-pause-image
run ocic image pull nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
echo "$output"
[ "$status" -eq 0 ]
run ocic image list --quiet nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
[ "$status" -eq 0 ]
echo "$output"
[ "$output" != "" ]
run ocic image list --quiet nginx@4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
[ "$status" -eq 0 ]
echo "$output"
[ "$output" != "" ]
run ocic image list --quiet @4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
[ "$status" -eq 0 ]
echo "$output"
[ "$output" != "" ]
run ocic image list --quiet 4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
[ "$status" -eq 0 ]
echo "$output"
[ "$output" != "" ]
cleanup_images
stop_ocid
}
@test "image list with filter" {
start_ocid "" "" --no-pause-image
run ocic image pull "$IMAGE"
@ -64,6 +94,7 @@ function teardown() {
run ocic image list --quiet
echo "$output"
[ "$status" -eq 0 ]
[ "$output" != "" ]
printf '%s\n' "$output" | while IFS= read -r id; do
run ocic image remove --id "$id"
echo "$output"
@ -72,6 +103,7 @@ function teardown() {
run ocic image list --quiet
echo "$output"
[ "$status" -eq 0 ]
[ "$output" = "" ]
printf '%s\n' "$output" | while IFS= read -r id; do
echo "$id"
status=1
@ -88,10 +120,12 @@ function teardown() {
run ocic image list --quiet
echo "$output"
[ "$status" -eq 0 ]
[ "$output" != "" ]
printf '%s\n' "$output" | while IFS= read -r id; do
run ocic image status --id "$id"
echo "$output"
[ "$status" -eq 0 ]
[ "$output" != "" ]
run ocic image remove --id "$id"
echo "$output"
[ "$status" -eq 0 ]
@ -99,6 +133,7 @@ function teardown() {
run ocic image list --quiet
echo "$output"
[ "$status" -eq 0 ]
[ "$output" = "" ]
printf '%s\n' "$output" | while IFS= read -r id; do
echo "$id"
status=1

View file

@ -0,0 +1,57 @@
package archive
import (
"io"
"os"
"github.com/containers/image/docker/tarfile"
"github.com/containers/image/types"
"github.com/pkg/errors"
)
type archiveImageDestination struct {
*tarfile.Destination // Implements most of types.ImageDestination
ref archiveReference
writer io.Closer
}
func newImageDestination(ctx *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
if ref.destinationRef == nil {
return nil, errors.Errorf("docker-archive: destination reference not supplied (must be of form <path>:<reference:tag>)")
}
fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_EXCL|os.O_CREATE, 0644)
if err != nil {
// FIXME: It should be possible to modify archives, but the only really
// sane way of doing it is to create a copy of the image, modify
// it and then do a rename(2).
if os.IsExist(err) {
err = errors.New("docker-archive doesn't support modifying existing images")
}
return nil, err
}
return &archiveImageDestination{
Destination: tarfile.NewDestination(fh, ref.destinationRef),
ref: ref,
writer: fh,
}, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *archiveImageDestination) Reference() types.ImageReference {
return d.ref
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *archiveImageDestination) Close() error {
return d.writer.Close()
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *archiveImageDestination) Commit() error {
return d.Destination.Commit()
}

Binary file not shown.

View file

@ -0,0 +1,36 @@
package archive
import (
"github.com/Sirupsen/logrus"
"github.com/containers/image/docker/tarfile"
"github.com/containers/image/types"
)
type archiveImageSource struct {
*tarfile.Source // Implements most of types.ImageSource
ref archiveReference
}
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(ctx *types.SystemContext, ref archiveReference) types.ImageSource {
if ref.destinationRef != nil {
logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
}
src := tarfile.NewSource(ref.path)
return &archiveImageSource{
Source: src,
ref: ref,
}
}
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (s *archiveImageSource) Reference() types.ImageReference {
return s.ref
}
// Close removes resources associated with an initialized ImageSource, if any.
func (s *archiveImageSource) Close() error {
return nil
}

View file

@ -0,0 +1,155 @@
package archive
import (
"fmt"
"strings"
"github.com/containers/image/docker/reference"
ctrImage "github.com/containers/image/image"
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/pkg/errors"
)
func init() {
transports.Register(Transport)
}
// Transport is an ImageTransport for local Docker archives.
var Transport = archiveTransport{}
type archiveTransport struct{}
func (t archiveTransport) Name() string {
return "docker-archive"
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) {
return ParseReference(reference)
}
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
// See the explanation in archiveReference.PolicyConfigurationIdentity.
return errors.New(`docker-archive: does not support any scopes except the default "" one`)
}
// archiveReference is an ImageReference for Docker images.
type archiveReference struct {
destinationRef reference.NamedTagged // only used for destinations
path string
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
func ParseReference(refString string) (types.ImageReference, error) {
if refString == "" {
return nil, errors.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
}
parts := strings.SplitN(refString, ":", 2)
path := parts[0]
var destinationRef reference.NamedTagged
// A :tag was specified, which is only necessary for destinations.
if len(parts) == 2 {
ref, err := reference.ParseNormalizedNamed(parts[1])
if err != nil {
return nil, errors.Wrapf(err, "docker-archive parsing reference")
}
ref = reference.TagNameOnly(ref)
if _, isDigest := ref.(reference.Canonical); isDigest {
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString)
}
refTagged, isTagged := ref.(reference.NamedTagged)
if !isTagged {
// Really shouldn't be hit...
return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString)
}
destinationRef = refTagged
}
return archiveReference{
destinationRef: destinationRef,
path: path,
}, nil
}
func (ref archiveReference) Transport() types.ImageTransport {
return Transport
}
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref archiveReference) StringWithinTransport() string {
if ref.destinationRef == nil {
return ref.path
}
return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String())
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref archiveReference) DockerReference() reference.Named {
return ref.destinationRef
}
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
// (i.e. various references with exactly the same semantics should return the same configuration identity)
// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
// Returns "" if configuration identities for these references are not supported.
func (ref archiveReference) PolicyConfigurationIdentity() string {
// Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity.
return ""
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
// in order, terminating on first match, and an implicit "" is always checked at the end.
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref archiveReference) PolicyConfigurationNamespaces() []string {
// TODO
return []string{}
}
// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned Image.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
src := newImageSource(ctx, ref)
return ctrImage.FromSource(src)
}
// NewImageSource returns a types.ImageSource for this reference,
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
// The caller must call .Close() on the returned ImageSource.
func (ref archiveReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
return newImageSource(ctx, ref), nil
}
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref archiveReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
return newImageDestination(ctx, ref)
}
// DeleteImage deletes the named image from the registry, if supported.
func (ref archiveReference) DeleteImage(ctx *types.SystemContext) error {
// Not really supported, for safety reasons.
return errors.New("Deleting images not implemented for docker-archive: images")
}

View file

@ -0,0 +1,198 @@
package archive
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
sha256digest = "@sha256:" + sha256digestHex
tarFixture = "fixtures/almostempty.tar"
)
func TestTransportName(t *testing.T) {
assert.Equal(t, "docker-archive", Transport.Name())
}
func TestTransportParseReference(t *testing.T) {
testParseReference(t, Transport.ParseReference)
}
func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
for _, scope := range []string{ // A semi-representative assortment of values; everything is rejected.
"docker.io/library/busybox:notlatest",
"docker.io/library/busybox",
"docker.io/library",
"docker.io",
"",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.Error(t, err, scope)
}
}
func TestParseReference(t *testing.T) {
testParseReference(t, ParseReference)
}
// testParseReference is a test shared for Transport.ParseReference and ParseReference.
func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
for _, c := range []struct{ input, expectedPath, expectedRef string }{
{"", "", ""}, // Empty input is explicitly rejected
{"/path", "/path", ""},
{"/path:busybox:notlatest", "/path", "docker.io/library/busybox:notlatest"}, // Explicit tag
{"/path:busybox" + sha256digest, "", ""}, // Digest references are forbidden
{"/path:busybox", "/path", "docker.io/library/busybox:latest"}, // Default tag
// A github.com/distribution/reference value can have a tag and a digest at the same time!
{"/path:busybox:latest" + sha256digest, "", ""}, // Both tag and digest is rejected
{"/path:docker.io/library/busybox:latest", "/path", "docker.io/library/busybox:latest"}, // All implied values explicitly specified
{"/path:UPPERCASEISINVALID", "", ""}, // Invalid input
} {
ref, err := fn(c.input)
if c.expectedPath == "" {
assert.Error(t, err, c.input)
} else {
require.NoError(t, err, c.input)
archiveRef, ok := ref.(archiveReference)
require.True(t, ok, c.input)
assert.Equal(t, c.expectedPath, archiveRef.path, c.input)
if c.expectedRef == "" {
assert.Nil(t, archiveRef.destinationRef, c.input)
} else {
require.NotNil(t, archiveRef.destinationRef, c.input)
assert.Equal(t, c.expectedRef, archiveRef.destinationRef.String(), c.input)
}
}
}
}
// refWithTagAndDigest is a reference.NamedTagged and reference.Canonical at the same time.
type refWithTagAndDigest struct{ reference.Canonical }
func (ref refWithTagAndDigest) Tag() string {
return "notLatest"
}
// A common list of reference formats to test for the various ImageReference methods.
var validReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{
{"/pathonly", "", "/pathonly"},
{"/path:busybox:notlatest", "docker.io/library/busybox:notlatest", "/path:docker.io/library/busybox:notlatest"}, // Explicit tag
{"/path:docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "/path:docker.io/library/busybox:latest"}, // All implied values explicitly specified
{"/path:example.com/ns/foo:bar", "example.com/ns/foo:bar", "/path:example.com/ns/foo:bar"}, // All values explicitly specified
}
func TestReferenceTransport(t *testing.T) {
ref, err := ParseReference("/tmp/archive.tar")
require.NoError(t, err)
assert.Equal(t, Transport, ref.Transport())
}
func TestReferenceStringWithinTransport(t *testing.T) {
for _, c := range validReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
stringRef := ref.StringWithinTransport()
assert.Equal(t, c.stringWithinTransport, stringRef, c.input)
// Do one more round to verify that the output can be parsed, to an equal value.
ref2, err := Transport.ParseReference(stringRef)
require.NoError(t, err, c.input)
stringRef2 := ref2.StringWithinTransport()
assert.Equal(t, stringRef, stringRef2, c.input)
}
}
func TestReferenceDockerReference(t *testing.T) {
for _, c := range validReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
dockerRef := ref.DockerReference()
if c.dockerRef != "" {
require.NotNil(t, dockerRef, c.input)
assert.Equal(t, c.dockerRef, dockerRef.String(), c.input)
} else {
require.Nil(t, dockerRef, c.input)
}
}
}
func TestReferencePolicyConfigurationIdentity(t *testing.T) {
for _, c := range validReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
assert.Equal(t, "", ref.PolicyConfigurationIdentity(), c.input)
}
}
func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
for _, c := range validReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
assert.Empty(t, "", ref.PolicyConfigurationNamespaces(), c.input)
}
}
func TestReferenceNewImage(t *testing.T) {
for _, suffix := range []string{"", ":thisisignoredbutaccepted"} {
ref, err := ParseReference(tarFixture + suffix)
require.NoError(t, err, suffix)
img, err := ref.NewImage(nil)
assert.NoError(t, err, suffix)
defer img.Close()
}
}
func TestReferenceNewImageSource(t *testing.T) {
for _, suffix := range []string{"", ":thisisignoredbutaccepted"} {
ref, err := ParseReference(tarFixture + suffix)
require.NoError(t, err, suffix)
src, err := ref.NewImageSource(nil, nil)
assert.NoError(t, err, suffix)
defer src.Close()
}
}
func TestReferenceNewImageDestination(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "docker-archive-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
ref, err := ParseReference(filepath.Join(tmpDir, "no-reference"))
require.NoError(t, err)
dest, err := ref.NewImageDestination(nil)
assert.Error(t, err)
ref, err = ParseReference(filepath.Join(tmpDir, "with-reference") + "busybox:latest")
require.NoError(t, err)
dest, err = ref.NewImageDestination(nil)
assert.NoError(t, err)
defer dest.Close()
}
func TestReferenceDeleteImage(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "docker-archive-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for i, suffix := range []string{"", ":thisisignoredbutaccepted"} {
testFile := filepath.Join(tmpDir, fmt.Sprintf("file%d.tar", i))
err := ioutil.WriteFile(testFile, []byte("nonempty"), 0644)
require.NoError(t, err, suffix)
ref, err := ParseReference(testFile + suffix)
require.NoError(t, err, suffix)
err = ref.DeleteImage(nil)
assert.Error(t, err, suffix)
_, err = os.Lstat(testFile)
assert.NoError(t, err, suffix)
}
}

View file

@ -1,36 +1,26 @@
package daemon
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"time"
"github.com/Sirupsen/logrus"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/docker/tarfile"
"github.com/containers/image/types"
"github.com/docker/docker/client"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
type daemonImageDestination struct {
ref daemonReference
namedTaggedRef reference.NamedTagged // Strictly speaking redundant with ref above; having the field makes it structurally impossible for later users to fail.
ref daemonReference
*tarfile.Destination // Implements most of types.ImageDestination
// For talking to imageLoadGoroutine
goroutineCancel context.CancelFunc
statusChannel <-chan error
writer *io.PipeWriter
tar *tar.Writer
// Other state
committed bool // writer has been closed
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
committed bool // writer has been closed
}
// newImageDestination returns a types.ImageDestination for the specified image reference.
@ -57,13 +47,11 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t
return &daemonImageDestination{
ref: ref,
namedTaggedRef: namedTaggedRef,
Destination: tarfile.NewDestination(writer, namedTaggedRef),
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
writer: writer,
tar: tar.NewWriter(writer),
committed: false,
blobs: make(map[digest.Digest]types.BlobInfo),
}, nil
}
@ -115,205 +103,13 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
return d.ref
}
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
// If an empty slice or nil it's returned, then any mime type can be tried to upload
func (d *daemonImageDestination) SupportedManifestMIMETypes() []string {
return []string{
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
}
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *daemonImageDestination) SupportsSignatures() error {
return errors.Errorf("Storing signatures for docker-daemon: destinations is not supported")
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *daemonImageDestination) ShouldCompressLayers() bool {
return false
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *daemonImageDestination) AcceptsForeignLayerURLs() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
if inputInfo.Digest.String() == "" {
return types.BlobInfo{}, errors.Errorf(`Can not stream a blob with unknown digest to "docker-daemon:"`)
}
ok, size, err := d.HasBlob(inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
if ok {
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
}
if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size.
logrus.Debugf("docker-daemon: input with unknown size, streaming to disk first…")
streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-blob")
if err != nil {
return types.BlobInfo{}, err
}
defer os.Remove(streamCopy.Name())
defer streamCopy.Close()
size, err := io.Copy(streamCopy, stream)
if err != nil {
return types.BlobInfo{}, err
}
_, err = streamCopy.Seek(0, os.SEEK_SET)
if err != nil {
return types.BlobInfo{}, err
}
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
stream = streamCopy
logrus.Debugf("… streaming done")
}
digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash())
if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil {
return types.BlobInfo{}, err
}
d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}
return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
}
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
func (d *daemonImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
if info.Digest == "" {
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
if blob, ok := d.blobs[info.Digest]; ok {
return true, blob.Size, nil
}
return false, -1, nil
}
func (d *daemonImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
return info, nil
}
func (d *daemonImageDestination) PutManifest(m []byte) error {
var man schema2Manifest
if err := json.Unmarshal(m, &man); err != nil {
return errors.Wrap(err, "Error parsing manifest")
}
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
}
layerPaths := []string{}
for _, l := range man.Layers {
layerPaths = append(layerPaths, l.Digest.String())
}
// For github.com/docker/docker consumers, this works just as well as
// refString := d.namedTaggedRef.String() [i.e. d.ref.ref.String()]
// because when reading the RepoTags strings, github.com/docker/docker/reference
// normalizes both of them to the same value.
//
// Doing it this way to include the normalized-out `docker.io[/library]` does make
// a difference for github.com/projectatomic/docker consumers, with the
// “Add --add-registry and --block-registry options to docker daemon” patch.
// These consumers treat reference strings which include a hostname and reference
// strings without a hostname differently.
//
// Using the host name here is more explicit about the intent, and it has the same
// effect as (docker pull) in projectatomic/docker, which tags the result using
// a hostname-qualified reference.
// See https://github.com/containers/image/issues/72 for a more detailed
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", d.namedTaggedRef.Name(), d.namedTaggedRef.Tag())
items := []manifestItem{{
Config: man.Config.Digest.String(),
RepoTags: []string{refString},
Layers: layerPaths,
Parent: "",
LayerSources: nil,
}}
itemsBytes, err := json.Marshal(&items)
if err != nil {
return err
}
// FIXME? Do we also need to support the legacy format?
return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes))
}
type tarFI struct {
path string
size int64
}
func (t *tarFI) Name() string {
return t.path
}
func (t *tarFI) Size() int64 {
return t.size
}
func (t *tarFI) Mode() os.FileMode {
return 0444
}
func (t *tarFI) ModTime() time.Time {
return time.Unix(0, 0)
}
func (t *tarFI) IsDir() bool {
return false
}
func (t *tarFI) Sys() interface{} {
return nil
}
// sendFile sends a file into the tar stream.
func (d *daemonImageDestination) sendFile(path string, expectedSize int64, stream io.Reader) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
if err != nil {
return nil
}
logrus.Debugf("Sending as tar file %s", path)
if err := d.tar.WriteHeader(hdr); err != nil {
return err
}
size, err := io.Copy(d.tar, stream)
if err != nil {
return err
}
if size != expectedSize {
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
}
return nil
}
func (d *daemonImageDestination) PutSignatures(signatures [][]byte) error {
if len(signatures) != 0 {
return errors.Errorf("Storing signatures for docker-daemon: destinations is not supported")
}
return nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *daemonImageDestination) Commit() error {
logrus.Debugf("docker-daemon: Closing tar stream")
if err := d.tar.Close(); err != nil {
if err := d.Destination.Commit(); err != nil {
return err
}
if err := d.writer.Close(); err != nil {

View file

@ -1,19 +1,13 @@
package daemon
import (
"archive/tar"
"bytes"
"encoding/json"
"io"
"io/ioutil"
"os"
"path"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/docker/tarfile"
"github.com/containers/image/types"
"github.com/docker/docker/client"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
@ -21,16 +15,9 @@ import (
const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
type daemonImageSource struct {
ref daemonReference
tarCopyPath string
// The following data is only available after ensureCachedDataIsPresent() succeeds
tarManifest *manifestItem // nil if not available yet.
configBytes []byte
configDigest digest.Digest
orderedDiffIDList []diffID
knownLayers map[diffID]*layerInfo
// Other state
generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
ref daemonReference
*tarfile.Source // Implements most of types.ImageSource
tarCopyPath string
}
type layerInfo struct {
@ -81,6 +68,7 @@ func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageS
succeeded = true
return &daemonImageSource{
ref: ref,
Source: tarfile.NewSource(tarCopyFile.Name()),
tarCopyPath: tarCopyFile.Name(),
}, nil
}
@ -95,310 +83,3 @@ func (s *daemonImageSource) Reference() types.ImageReference {
func (s *daemonImageSource) Close() error {
return os.Remove(s.tarCopyPath)
}
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
type tarReadCloser struct {
*tar.Reader
backingFile *os.File
}
func (t *tarReadCloser) Close() error {
return t.backingFile.Close()
}
// openTarComponent returns a ReadCloser for the specific file within the archive.
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarCopyPath cheap enough.
// The caller should call .Close() on the returned stream.
func (s *daemonImageSource) openTarComponent(componentPath string) (io.ReadCloser, error) {
f, err := os.Open(s.tarCopyPath)
if err != nil {
return nil, err
}
succeeded := false
defer func() {
if !succeeded {
f.Close()
}
}()
tarReader, header, err := findTarComponent(f, componentPath)
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
// We follow only one symlink; so no loops are possible.
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
return nil, err
}
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
// so we don't care.
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
}
if !header.FileInfo().Mode().IsRegular() {
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
}
succeeded = true
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
}
// findTarComponent returns a header and a reader matching path within inputFile,
// or (nil, nil, nil) if not found.
func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
t := tar.NewReader(inputFile)
for {
h, err := t.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, nil, err
}
if h.Name == path {
return t, h, nil
}
}
return nil, nil, nil
}
// readTarComponent returns full contents of componentPath.
func (s *daemonImageSource) readTarComponent(path string) ([]byte, error) {
file, err := s.openTarComponent(path)
if err != nil {
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
}
defer file.Close()
bytes, err := ioutil.ReadAll(file)
if err != nil {
return nil, err
}
return bytes, nil
}
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
func (s *daemonImageSource) ensureCachedDataIsPresent() error {
if s.tarManifest != nil {
return nil
}
// Read and parse manifest.json
tarManifest, err := s.loadTarManifest()
if err != nil {
return err
}
// Read and parse config.
configBytes, err := s.readTarComponent(tarManifest.Config)
if err != nil {
return err
}
var parsedConfig dockerImage // Most fields ommitted, we only care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
}
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
if err != nil {
return err
}
// Success; commit.
s.tarManifest = tarManifest
s.configBytes = configBytes
s.configDigest = digest.FromBytes(configBytes)
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
s.knownLayers = knownLayers
return nil
}
// loadTarManifest loads and decodes the manifest.json.
func (s *daemonImageSource) loadTarManifest() (*manifestItem, error) {
// FIXME? Do we need to deal with the legacy format?
bytes, err := s.readTarComponent(manifestFileName)
if err != nil {
return nil, err
}
var items []manifestItem
if err := json.Unmarshal(bytes, &items); err != nil {
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
}
if len(items) != 1 {
return nil, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items))
}
return &items[0], nil
}
func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedConfig *dockerImage) (map[diffID]*layerInfo, error) {
// Collect layer data available in manifest and config.
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
}
knownLayers := map[diffID]*layerInfo{}
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
for i, diffID := range parsedConfig.RootFS.DiffIDs {
if _, ok := knownLayers[diffID]; ok {
// Apparently it really can happen that a single image contains the same layer diff more than once.
// In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
// which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
continue
}
layerPath := tarManifest.Layers[i]
if _, ok := unknownLayerSizes[layerPath]; ok {
return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
}
li := &layerInfo{ // A new element in each iteration
path: layerPath,
size: -1,
}
knownLayers[diffID] = li
unknownLayerSizes[layerPath] = li
}
// Scan the tar file to collect layer sizes.
file, err := os.Open(s.tarCopyPath)
if err != nil {
return nil, err
}
defer file.Close()
t := tar.NewReader(file)
for {
h, err := t.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if li, ok := unknownLayerSizes[h.Name]; ok {
li.size = h.Size
delete(unknownLayerSizes, h.Name)
}
}
if len(unknownLayerSizes) != 0 {
return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
}
return knownLayers, nil
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
func (s *daemonImageSource) GetManifest() ([]byte, string, error) {
if s.generatedManifest == nil {
if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, "", err
}
m := schema2Manifest{
SchemaVersion: 2,
MediaType: manifest.DockerV2Schema2MediaType,
Config: distributionDescriptor{
MediaType: manifest.DockerV2Schema2ConfigMediaType,
Size: int64(len(s.configBytes)),
Digest: s.configDigest,
},
Layers: []distributionDescriptor{},
}
for _, diffID := range s.orderedDiffIDList {
li, ok := s.knownLayers[diffID]
if !ok {
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
}
m.Layers = append(m.Layers, distributionDescriptor{
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
MediaType: manifest.DockerV2Schema2LayerMediaType,
Size: li.size,
})
}
manifestBytes, err := json.Marshal(&m)
if err != nil {
return nil, "", err
}
s.generatedManifest = manifestBytes
}
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
}
// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
// out of a manifest list.
func (s *daemonImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
// How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
}
type readCloseWrapper struct {
io.Reader
closeFunc func() error
}
func (r readCloseWrapper) Close() error {
if r.closeFunc != nil {
return r.closeFunc()
}
return nil
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
func (s *daemonImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, 0, err
}
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
}
if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball,
stream, err := s.openTarComponent(li.path)
if err != nil {
return nil, 0, err
}
// In order to handle the fact that digests != diffIDs (and thus that a
// caller which is trying to verify the blob will run into problems),
// we need to decompress blobs. This is a bit ugly, but it's a
// consequence of making everything addressable by their DiffID rather
// than by their digest...
//
// In particular, because the v2s2 manifest being generated uses
// DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
// layers not their _actual_ digest. The result is that copy/... will
// be verifing a "digest" which is not the actual layer's digest (but
// is instead the DiffID).
decompressFunc, reader, err := compression.DetectCompression(stream)
if err != nil {
return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest)
}
if decompressFunc != nil {
reader, err = decompressFunc(reader)
if err != nil {
return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest)
}
}
newStream := readCloseWrapper{
Reader: reader,
closeFunc: stream.Close,
}
return newStream, li.size, nil
}
return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
func (s *daemonImageSource) GetSignatures() ([][]byte, error) {
return [][]byte{}, nil
}

View file

@ -0,0 +1,250 @@
package tarfile
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"time"
"github.com/Sirupsen/logrus"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
type Destination struct {
writer io.Writer
tar *tar.Writer
repoTag string
// Other state.
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
}
// NewDestination returns a tarfile.Destination for the specified io.Writer.
func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
// For github.com/docker/docker consumers, this works just as well as
// refString := ref.String()
// because when reading the RepoTags strings, github.com/docker/docker/reference
// normalizes both of them to the same value.
//
// Doing it this way to include the normalized-out `docker.io[/library]` does make
// a difference for github.com/projectatomic/docker consumers, with the
// “Add --add-registry and --block-registry options to docker daemon” patch.
// These consumers treat reference strings which include a hostname and reference
// strings without a hostname differently.
//
// Using the host name here is more explicit about the intent, and it has the same
// effect as (docker pull) in projectatomic/docker, which tags the result using
// a hostname-qualified reference.
// See https://github.com/containers/image/issues/72 for a more detailed
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", ref.Name(), ref.Tag())
return &Destination{
writer: dest,
tar: tar.NewWriter(dest),
repoTag: refString,
blobs: make(map[digest.Digest]types.BlobInfo),
}
}
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
// If an empty slice or nil it's returned, then any mime type can be tried to upload
func (d *Destination) SupportedManifestMIMETypes() []string {
return []string{
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
}
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *Destination) SupportsSignatures() error {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *Destination) ShouldCompressLayers() bool {
return false
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *Destination) AcceptsForeignLayerURLs() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
if inputInfo.Digest.String() == "" {
return types.BlobInfo{}, errors.Errorf("Can not stream a blob with unknown digest to docker tarfile")
}
ok, size, err := d.HasBlob(inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
if ok {
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
}
if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size.
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-tarfile-blob")
if err != nil {
return types.BlobInfo{}, err
}
defer os.Remove(streamCopy.Name())
defer streamCopy.Close()
size, err := io.Copy(streamCopy, stream)
if err != nil {
return types.BlobInfo{}, err
}
_, err = streamCopy.Seek(0, os.SEEK_SET)
if err != nil {
return types.BlobInfo{}, err
}
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
stream = streamCopy
logrus.Debugf("... streaming done")
}
digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash())
if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil {
return types.BlobInfo{}, err
}
d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}
return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
}
// HasBlob returns true iff the image destination already contains a blob with
// the matching digest which can be reapplied using ReapplyBlob. Unlike
// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
// the blob must also be returned. If the destination does not contain the
// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
// returns a non-nil error only on an unexpected failure.
func (d *Destination) HasBlob(info types.BlobInfo) (bool, int64, error) {
if info.Digest == "" {
return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
}
if blob, ok := d.blobs[info.Digest]; ok {
return true, blob.Size, nil
}
return false, -1, nil
}
// ReapplyBlob informs the image destination that a blob for which HasBlob
// previously returned true would have been passed to PutBlob if it had
// returned false. Like HasBlob and unlike PutBlob, the digest can not be
// empty. If the blob is a filesystem layer, this signifies that the changes
// it describes need to be applied again when composing a filesystem tree.
func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
return info, nil
}
// PutManifest sends the given manifest blob to the destination.
// FIXME? This should also receive a MIME type if known, to differentiate
// between schema versions.
func (d *Destination) PutManifest(m []byte) error {
var man schema2Manifest
if err := json.Unmarshal(m, &man); err != nil {
return errors.Wrap(err, "Error parsing manifest")
}
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
}
layerPaths := []string{}
for _, l := range man.Layers {
layerPaths = append(layerPaths, l.Digest.String())
}
items := []manifestItem{{
Config: man.Config.Digest.String(),
RepoTags: []string{d.repoTag},
Layers: layerPaths,
Parent: "",
LayerSources: nil,
}}
itemsBytes, err := json.Marshal(&items)
if err != nil {
return err
}
// FIXME? Do we also need to support the legacy format?
return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes))
}
type tarFI struct {
path string
size int64
}
func (t *tarFI) Name() string {
return t.path
}
func (t *tarFI) Size() int64 {
return t.size
}
func (t *tarFI) Mode() os.FileMode {
return 0444
}
func (t *tarFI) ModTime() time.Time {
return time.Unix(0, 0)
}
func (t *tarFI) IsDir() bool {
return false
}
func (t *tarFI) Sys() interface{} {
return nil
}
// sendFile sends a file into the tar stream.
func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
if err != nil {
return nil
}
logrus.Debugf("Sending as tar file %s", path)
if err := d.tar.WriteHeader(hdr); err != nil {
return err
}
size, err := io.Copy(d.tar, stream)
if err != nil {
return err
}
if size != expectedSize {
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
}
return nil
}
// PutSignatures adds the given signatures to the docker tarfile (currently not
// supported). MUST be called after PutManifest (signatures reference manifest
// contents)
func (d *Destination) PutSignatures(signatures [][]byte) error {
if len(signatures) != 0 {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
return nil
}
// Commit finishes writing data to the underlying io.Writer.
// It is the caller's responsibility to close it, if necessary.
func (d *Destination) Commit() error {
return d.tar.Close()
}

View file

@ -0,0 +1,3 @@
// Package tarfile is an internal implementation detail of some transports.
// Do not use outside of the github.com/containers/image repo!
package tarfile

View file

@ -0,0 +1,352 @@
package tarfile
import (
"archive/tar"
"bytes"
"encoding/json"
"io"
"io/ioutil"
"os"
"path"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// Source is a partial implementation of types.ImageSource for reading from tarPath.
type Source struct {
tarPath string
// The following data is only available after ensureCachedDataIsPresent() succeeds
tarManifest *manifestItem // nil if not available yet.
configBytes []byte
configDigest digest.Digest
orderedDiffIDList []diffID
knownLayers map[diffID]*layerInfo
// Other state
generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
}
type layerInfo struct {
path string
size int64
}
// NewSource returns a tarfile.Source for the specified path.
func NewSource(path string) *Source {
// TODO: We could add support for multiple images in a single archive, so
// that people could use docker-archive:opensuse.tar:opensuse:leap as
// the source of an image.
return &Source{
tarPath: path,
}
}
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
type tarReadCloser struct {
*tar.Reader
backingFile *os.File
}
func (t *tarReadCloser) Close() error {
return t.backingFile.Close()
}
// openTarComponent returns a ReadCloser for the specific file within the archive.
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
// The caller should call .Close() on the returned stream.
func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) {
f, err := os.Open(s.tarPath)
if err != nil {
return nil, err
}
succeeded := false
defer func() {
if !succeeded {
f.Close()
}
}()
tarReader, header, err := findTarComponent(f, componentPath)
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
// We follow only one symlink; so no loops are possible.
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
return nil, err
}
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
// so we don't care.
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
}
if !header.FileInfo().Mode().IsRegular() {
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
}
succeeded = true
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
}
// findTarComponent returns a header and a reader matching path within inputFile,
// or (nil, nil, nil) if not found.
func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
t := tar.NewReader(inputFile)
for {
h, err := t.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, nil, err
}
if h.Name == path {
return t, h, nil
}
}
return nil, nil, nil
}
// readTarComponent returns full contents of componentPath.
func (s *Source) readTarComponent(path string) ([]byte, error) {
file, err := s.openTarComponent(path)
if err != nil {
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
}
defer file.Close()
bytes, err := ioutil.ReadAll(file)
if err != nil {
return nil, err
}
return bytes, nil
}
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
func (s *Source) ensureCachedDataIsPresent() error {
if s.tarManifest != nil {
return nil
}
// Read and parse manifest.json
tarManifest, err := s.loadTarManifest()
if err != nil {
return err
}
// Read and parse config.
configBytes, err := s.readTarComponent(tarManifest.Config)
if err != nil {
return err
}
var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
}
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
if err != nil {
return err
}
// Success; commit.
s.tarManifest = tarManifest
s.configBytes = configBytes
s.configDigest = digest.FromBytes(configBytes)
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
s.knownLayers = knownLayers
return nil
}
// loadTarManifest loads and decodes the manifest.json.
func (s *Source) loadTarManifest() (*manifestItem, error) {
// FIXME? Do we need to deal with the legacy format?
bytes, err := s.readTarComponent(manifestFileName)
if err != nil {
return nil, err
}
var items []manifestItem
if err := json.Unmarshal(bytes, &items); err != nil {
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
}
if len(items) != 1 {
return nil, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items))
}
return &items[0], nil
}
func (s *Source) prepareLayerData(tarManifest *manifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) {
// Collect layer data available in manifest and config.
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
}
knownLayers := map[diffID]*layerInfo{}
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
for i, diffID := range parsedConfig.RootFS.DiffIDs {
if _, ok := knownLayers[diffID]; ok {
// Apparently it really can happen that a single image contains the same layer diff more than once.
// In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
// which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
continue
}
layerPath := tarManifest.Layers[i]
if _, ok := unknownLayerSizes[layerPath]; ok {
return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
}
li := &layerInfo{ // A new element in each iteration
path: layerPath,
size: -1,
}
knownLayers[diffID] = li
unknownLayerSizes[layerPath] = li
}
// Scan the tar file to collect layer sizes.
file, err := os.Open(s.tarPath)
if err != nil {
return nil, err
}
defer file.Close()
t := tar.NewReader(file)
for {
h, err := t.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if li, ok := unknownLayerSizes[h.Name]; ok {
li.size = h.Size
delete(unknownLayerSizes, h.Name)
}
}
if len(unknownLayerSizes) != 0 {
return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
}
return knownLayers, nil
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
func (s *Source) GetManifest() ([]byte, string, error) {
if s.generatedManifest == nil {
if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, "", err
}
m := schema2Manifest{
SchemaVersion: 2,
MediaType: manifest.DockerV2Schema2MediaType,
Config: distributionDescriptor{
MediaType: manifest.DockerV2Schema2ConfigMediaType,
Size: int64(len(s.configBytes)),
Digest: s.configDigest,
},
Layers: []distributionDescriptor{},
}
for _, diffID := range s.orderedDiffIDList {
li, ok := s.knownLayers[diffID]
if !ok {
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
}
m.Layers = append(m.Layers, distributionDescriptor{
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
MediaType: manifest.DockerV2Schema2LayerMediaType,
Size: li.size,
})
}
manifestBytes, err := json.Marshal(&m)
if err != nil {
return nil, "", err
}
s.generatedManifest = manifestBytes
}
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
}
// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
// out of a manifest list.
func (s *Source) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
// How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
}
type readCloseWrapper struct {
io.Reader
closeFunc func() error
}
func (r readCloseWrapper) Close() error {
if r.closeFunc != nil {
return r.closeFunc()
}
return nil
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, 0, err
}
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
}
if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball,
stream, err := s.openTarComponent(li.path)
if err != nil {
return nil, 0, err
}
// In order to handle the fact that digests != diffIDs (and thus that a
// caller which is trying to verify the blob will run into problems),
// we need to decompress blobs. This is a bit ugly, but it's a
// consequence of making everything addressable by their DiffID rather
// than by their digest...
//
// In particular, because the v2s2 manifest being generated uses
// DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
// layers not their _actual_ digest. The result is that copy/... will
// be verifing a "digest" which is not the actual layer's digest (but
// is instead the DiffID).
decompressFunc, reader, err := compression.DetectCompression(stream)
if err != nil {
return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest)
}
if decompressFunc != nil {
reader, err = decompressFunc(reader)
if err != nil {
return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest)
}
}
newStream := readCloseWrapper{
Reader: reader,
closeFunc: stream.Close,
}
return newStream, li.size, nil
}
return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
func (s *Source) GetSignatures() ([][]byte, error) {
return [][]byte{}, nil
}

View file

@ -1,4 +1,4 @@
package daemon
package tarfile
import "github.com/opencontainers/go-digest"
@ -43,7 +43,7 @@ type schema2Manifest struct {
// Based on github.com/docker/docker/image/image.go
// MOST CONTENT OMITTED AS UNNECESSARY
type dockerImage struct {
type image struct {
RootFS *rootFS `json:"rootfs,omitempty"`
}

View file

@ -4,8 +4,8 @@ import (
"time"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/strslice"
"github.com/containers/image/types"
"github.com/docker/docker/api/types/strslice"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)

View file

@ -0,0 +1,284 @@
package ostree
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/archive"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type blobToImport struct {
Size int64
Digest digest.Digest
BlobPath string
}
type descriptor struct {
Size int64 `json:"size"`
Digest digest.Digest `json:"digest"`
}
type manifestSchema struct {
ConfigDescriptor descriptor `json:"config"`
LayersDescriptors []descriptor `json:"layers"`
}
type ostreeImageDestination struct {
ref ostreeReference
manifest string
schema manifestSchema
tmpDirPath string
blobs map[string]*blobToImport
}
// newImageDestination returns an ImageDestination for writing to an existing ostree.
func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) {
tmpDirPath = filepath.Join(tmpDirPath, ref.branchName)
if err := ensureDirectoryExists(tmpDirPath); err != nil {
return nil, err
}
return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}}, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *ostreeImageDestination) Reference() types.ImageReference {
return d.ref
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *ostreeImageDestination) Close() error {
return os.RemoveAll(d.tmpDirPath)
}
func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string {
return []string{
manifest.DockerV2Schema2MediaType,
}
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *ostreeImageDestination) SupportsSignatures() error {
return nil
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *ostreeImageDestination) ShouldCompressLayers() bool {
return false
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool {
return false
}
func (d *ostreeImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
if err != nil {
return types.BlobInfo{}, err
}
blobPath := filepath.Join(tmpDir, "content")
blobFile, err := os.Create(blobPath)
if err != nil {
return types.BlobInfo{}, err
}
defer blobFile.Close()
digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash())
size, err := io.Copy(blobFile, tee)
if err != nil {
return types.BlobInfo{}, err
}
computedDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
}
hash := computedDigest.Hex()
d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath}
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
}
func fixUsermodeFiles(dir string) error {
entries, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
for _, info := range entries {
fullpath := filepath.Join(dir, info.Name())
if info.IsDir() {
if err := os.Chmod(dir, info.Mode()|0700); err != nil {
return err
}
err = fixUsermodeFiles(fullpath)
if err != nil {
return err
}
} else if info.Mode().IsRegular() {
if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
return err
}
}
}
return nil
}
func (d *ostreeImageDestination) importBlob(blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
if err := ensureDirectoryExists(destinationPath); err != nil {
return err
}
defer func() {
os.Remove(blob.BlobPath)
os.RemoveAll(destinationPath)
}()
if os.Getuid() == 0 {
if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil {
return err
}
} else {
os.MkdirAll(destinationPath, 0755)
if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil {
return err
}
if err := fixUsermodeFiles(destinationPath); err != nil {
return err
}
}
return exec.Command("ostree", "commit",
"--repo", d.ref.repo,
fmt.Sprintf("--add-metadata-string=docker.size=%d", blob.Size),
"--branch", ostreeBranch,
fmt.Sprintf("--tree=dir=%s", destinationPath)).Run()
}
func (d *ostreeImageDestination) importConfig(blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
return exec.Command("ostree", "commit",
"--repo", d.ref.repo,
fmt.Sprintf("--add-metadata-string=docker.size=%d", blob.Size),
"--branch", ostreeBranch, filepath.Dir(blob.BlobPath)).Run()
}
func (d *ostreeImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
output, err := exec.Command("ostree", "show", "--repo", d.ref.repo, "--print-metadata-key=docker.size", branch).CombinedOutput()
if err != nil {
if bytes.Index(output, []byte("not found")) >= 0 || bytes.Index(output, []byte("No such")) >= 0 {
return false, -1, nil
}
return false, -1, err
}
size, err := strconv.ParseInt(strings.Trim(string(output), "'\n"), 10, 64)
if err != nil {
return false, -1, err
}
return true, size, nil
}
func (d *ostreeImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
return info, nil
}
func (d *ostreeImageDestination) PutManifest(manifest []byte) error {
d.manifest = string(manifest)
if err := json.Unmarshal(manifest, &d.schema); err != nil {
return err
}
manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath())
if err := ensureParentDirectoryExists(manifestPath); err != nil {
return err
}
return ioutil.WriteFile(manifestPath, manifest, 0644)
}
func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error {
path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0))
if err := ensureParentDirectoryExists(path); err != nil {
return err
}
for i, sig := range signatures {
signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i))
if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil {
return err
}
}
return nil
}
func (d *ostreeImageDestination) Commit() error {
for _, layer := range d.schema.LayersDescriptors {
hash := layer.Digest.Hex()
blob := d.blobs[hash]
// if the blob is not present in d.blobs then it is already stored in OSTree,
// and we don't need to import it.
if blob == nil {
continue
}
err := d.importBlob(blob)
if err != nil {
return err
}
}
hash := d.schema.ConfigDescriptor.Digest.Hex()
blob := d.blobs[hash]
if blob != nil {
err := d.importConfig(blob)
if err != nil {
return err
}
}
manifestPath := filepath.Join(d.tmpDirPath, "manifest")
err := exec.Command("ostree", "commit",
"--repo", d.ref.repo,
fmt.Sprintf("--add-metadata-string=docker.manifest=%s", string(d.manifest)),
fmt.Sprintf("--branch=ociimage/%s", d.ref.branchName),
manifestPath).Run()
return err
}
func ensureDirectoryExists(path string) error {
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
}
return nil
}
func ensureParentDirectoryExists(path string) error {
return ensureDirectoryExists(filepath.Dir(path))
}

View file

@ -0,0 +1,235 @@
package ostree
import (
"bytes"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/pkg/errors"
"github.com/containers/image/directory/explicitfilepath"
"github.com/containers/image/docker/reference"
"github.com/containers/image/transports"
"github.com/containers/image/types"
)
const defaultOSTreeRepo = "/ostree/repo"
// Transport is an ImageTransport for ostree paths.
var Transport = ostreeTransport{}
type ostreeTransport struct{}
func (t ostreeTransport) Name() string {
return "ostree"
}
func init() {
transports.Register(Transport)
}
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error {
sep := strings.Index(scope, ":")
if sep < 0 {
return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope)
}
repo := scope[:sep]
if !strings.HasPrefix(repo, "/") {
return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope)
}
cleaned := filepath.Clean(repo)
if cleaned != repo {
return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
// FIXME? In the namespaces within a repo,
// we could be verifying the various character set and length restrictions
// from docker/distribution/reference.regexp.go, but other than that there
// are few semantically invalid strings.
return nil
}
// ostreeReference is an ImageReference for ostree paths.
type ostreeReference struct {
image string
branchName string
repo string
}
func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) {
var repo = ""
var image = ""
s := strings.SplitN(ref, "@/", 2)
if len(s) == 1 {
image, repo = s[0], defaultOSTreeRepo
} else {
image, repo = s[0], "/"+s[1]
}
return NewReference(image, repo)
}
// NewReference returns an OSTree reference for a specified repo and image.
func NewReference(image string, repo string) (types.ImageReference, error) {
// image is not _really_ in a containers/image/docker/reference format;
// as far as the libOSTree ociimage/* namespace is concerned, it is more or
// less an arbitrary string with an implied tag.
// We use the reference.* parsers basically for the default tag name in
// reference.TagNameOnly, and incidentally for some character set and length
// restrictions.
var ostreeImage reference.Named
s := strings.SplitN(image, ":", 2)
named, err := reference.WithName(s[0])
if err != nil {
return nil, err
}
if len(s) == 1 {
ostreeImage = reference.TagNameOnly(named)
} else {
ostreeImage, err = reference.WithTag(named, s[1])
if err != nil {
return nil, err
}
}
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo)
if err != nil {
// With os.IsNotExist(err), the parent directory of repo is also not existent;
// that should ordinarily not happen, but it would be a bit weird to reject
// references which do not specify a repo just because the implicit defaultOSTreeRepo
// does not exist.
if os.IsNotExist(err) && repo == defaultOSTreeRepo {
resolved = repo
} else {
return nil, err
}
}
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
// from being ambiguous with values of PolicyConfigurationIdentity.
if strings.Contains(resolved, ":") {
return nil, errors.Errorf("Invalid OSTreeCI reference %s@%s: path %s contains a colon", image, repo, resolved)
}
return ostreeReference{
image: ostreeImage.String(),
branchName: encodeOStreeRef(ostreeImage.String()),
repo: resolved,
}, nil
}
func (ref ostreeReference) Transport() types.ImageTransport {
return Transport
}
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref ostreeReference) StringWithinTransport() string {
return fmt.Sprintf("%s@%s", ref.image, ref.repo)
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref ostreeReference) DockerReference() reference.Named {
return nil
}
func (ref ostreeReference) PolicyConfigurationIdentity() string {
return fmt.Sprintf("%s:%s", ref.repo, ref.image)
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
// in order, terminating on first match, and an implicit "" is always checked at the end.
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref ostreeReference) PolicyConfigurationNamespaces() []string {
s := strings.SplitN(ref.image, ":", 2)
if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image))
}
name := s[0]
res := []string{}
for {
res = append(res, fmt.Sprintf("%s:%s", ref.repo, name))
lastSlash := strings.LastIndex(name, "/")
if lastSlash == -1 {
break
}
name = name[:lastSlash]
}
return res
}
// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned Image.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
return nil, errors.New("Reading ostree: images is currently not supported")
}
// NewImageSource returns a types.ImageSource for this reference,
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
// The caller must call .Close() on the returned ImageSource.
func (ref ostreeReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
return nil, errors.New("Reading ostree: images is currently not supported")
}
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref ostreeReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
var tmpDir string
if ctx == nil || ctx.OSTreeTmpDirPath == "" {
tmpDir = os.TempDir()
} else {
tmpDir = ctx.OSTreeTmpDirPath
}
return newImageDestination(ref, tmpDir)
}
// DeleteImage deletes the named image from the registry, if supported.
func (ref ostreeReference) DeleteImage(ctx *types.SystemContext) error {
return errors.Errorf("Deleting images not implemented for ostree: images")
}
var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`)
func encodeOStreeRef(in string) string {
var buffer bytes.Buffer
for i := range in {
sub := in[i : i+1]
if ostreeRefRegexp.MatchString(sub) {
buffer.WriteString(sub)
} else {
buffer.WriteString(fmt.Sprintf("_%02X", sub[0]))
}
}
return buffer.String()
}
// manifestPath returns a path for the manifest within a ostree using our conventions.
func (ref ostreeReference) manifestPath() string {
return filepath.Join("manifest", "manifest.json")
}
// signaturePath returns a path for a signature within a ostree using our conventions.
func (ref ostreeReference) signaturePath(index int) string {
return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1))
}

View file

@ -0,0 +1,316 @@
package ostree
import (
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
"path/filepath"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
sha256digest = "@sha256:" + sha256digestHex
)
func TestTransportName(t *testing.T) {
assert.Equal(t, "ostree", Transport.Name())
}
// A helper to replace $TMP in a repo path with a real temporary directory
func withTmpDir(repo string, tmpDir string) string {
return strings.Replace(repo, "$TMP", tmpDir, -1)
}
// A common list of repo suffixes to test for the various ImageReference methods.
var repoSuffixes = []struct{ repoSuffix, resolvedRepo string }{
{"", "/ostree/repo"},
{"@/ostree/repo", "/ostree/repo"}, // /ostree/repo is accepted even if neither /ostree/repo nor /ostree exists, as a special case.
{"@$TMP/at@sign@repo", "$TMP/at@sign@repo"},
// Rejected as ambiguous: /repo:with:colons could either be an (/repo, with:colons) policy configuration identity, or a (/repo:with, colons) policy configuration namespace.
{"@$TMP/repo:with:colons", ""},
}
// A common list of cases for image name parsing and normalization
var imageNameTestcases = []struct{ input, normalized, branchName string }{
{"busybox:notlatest", "busybox:notlatest", "busybox_3Anotlatest"}, // Explicit tag
{"busybox", "busybox:latest", "busybox_3Alatest"}, // Default tag
{"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "docker.io_2Flibrary_2Fbusybox_3Alatest"}, // A hierarchical name
{"UPPERCASEISINVALID", "", ""}, // Invalid input
{"busybox" + sha256digest, "", ""}, // Digested references are not supported (parsed as invalid repository name)
{"busybox:invalid+tag", "", ""}, // Invalid tag value
{"busybox:tag:with:colons", "", ""}, // Multiple colons - treated as a tag which contains a colon, which is invalid
{"", "", ""}, // Empty input is rejected (invalid repository.Named)
}
func TestTransportParseReference(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreeParseReference")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, c := range imageNameTestcases {
for _, suffix := range repoSuffixes {
fullInput := c.input + withTmpDir(suffix.repoSuffix, tmpDir)
ref, err := Transport.ParseReference(fullInput)
if c.normalized == "" || suffix.resolvedRepo == "" {
assert.Error(t, err, fullInput)
} else {
require.NoError(t, err, fullInput)
ostreeRef, ok := ref.(ostreeReference)
require.True(t, ok, fullInput)
assert.Equal(t, c.normalized, ostreeRef.image, fullInput)
assert.Equal(t, c.branchName, ostreeRef.branchName, fullInput)
assert.Equal(t, withTmpDir(suffix.resolvedRepo, tmpDir), ostreeRef.repo, fullInput)
}
}
}
}
func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
for _, scope := range []string{
"/etc:docker.io/library/busybox:notlatest", // This also demonstrates that two colons are interpreted as repo:name:tag.
"/etc:docker.io/library/busybox",
"/etc:docker.io/library",
"/etc:docker.io",
"/etc:repo",
"/this/does/not/exist:notlatest",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.NoError(t, err, scope)
}
for _, scope := range []string{
"/colon missing as a path-reference delimiter",
"relative/path:busybox",
"/double//slashes:busybox",
"/has/./dot:busybox",
"/has/dot/../dot:busybox",
"/trailing/slash/:busybox",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.Error(t, err, scope)
}
}
func TestNewReference(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreeNewReference")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, c := range imageNameTestcases {
for _, suffix := range repoSuffixes {
if suffix.repoSuffix == "" {
continue
}
caseName := c.input + suffix.repoSuffix
ref, err := NewReference(c.input, withTmpDir(strings.TrimPrefix(suffix.repoSuffix, "@"), tmpDir))
if c.normalized == "" || suffix.resolvedRepo == "" {
assert.Error(t, err, caseName)
} else {
require.NoError(t, err, caseName)
ostreeRef, ok := ref.(ostreeReference)
require.True(t, ok, caseName)
assert.Equal(t, c.normalized, ostreeRef.image, caseName)
assert.Equal(t, c.branchName, ostreeRef.branchName, caseName)
assert.Equal(t, withTmpDir(suffix.resolvedRepo, tmpDir), ostreeRef.repo, caseName)
}
}
}
for _, path := range []string{
"/",
"/etc",
tmpDir,
"relativepath",
tmpDir + "/thisdoesnotexist",
} {
_, err := NewReference("busybox", path)
require.NoError(t, err, path)
}
_, err = NewReference("busybox", tmpDir+"/thisparentdoesnotexist/something")
assert.Error(t, err)
}
// A common list of reference formats to test for the various ImageReference methods.
var validReferenceTestCases = []struct{ input, stringWithinTransport, policyConfigurationIdentity string }{
{"busybox", "busybox:latest@/ostree/repo", "/ostree/repo:busybox:latest"}, // Everything implied
{"busybox:latest@/ostree/repo", "busybox:latest@/ostree/repo", "/ostree/repo:busybox:latest"}, // All implied values explicitly specified
{"example.com/ns/foo:bar@$TMP/non-DEFAULT", "example.com/ns/foo:bar@$TMP/non-DEFAULT", "$TMP/non-DEFAULT:example.com/ns/foo:bar"}, // All values explicitly specified, a hierarchical name
// A non-canonical path. Testing just one, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
{"busybox@$TMP/.", "busybox:latest@$TMP", "$TMP:busybox:latest"},
// "/" as a corner case
{"busybox@/", "busybox:latest@/", "/:busybox:latest"},
}
func TestReferenceTransport(t *testing.T) {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
assert.Equal(t, Transport, ref.Transport())
}
func TestReferenceStringWithinTransport(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreeStringWithinTransport")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, c := range validReferenceTestCases {
ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir))
require.NoError(t, err, c.input)
stringRef := ref.StringWithinTransport()
assert.Equal(t, withTmpDir(c.stringWithinTransport, tmpDir), stringRef, c.input)
// Do one more round to verify that the output can be parsed, to an equal value.
ref2, err := Transport.ParseReference(stringRef)
require.NoError(t, err, c.input)
stringRef2 := ref2.StringWithinTransport()
assert.Equal(t, stringRef, stringRef2, c.input)
}
}
func TestReferenceDockerReference(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreeDockerReference")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, c := range validReferenceTestCases {
ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir))
require.NoError(t, err, c.input)
dockerRef := ref.DockerReference()
assert.Nil(t, dockerRef, c.input)
}
}
func TestReferencePolicyConfigurationIdentity(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreePolicyConfigurationIdentity")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, c := range validReferenceTestCases {
ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir))
require.NoError(t, err, c.input)
assert.Equal(t, withTmpDir(c.policyConfigurationIdentity, tmpDir), ref.PolicyConfigurationIdentity(), c.input)
}
}
func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreePolicyConfigurationNamespaces")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
// Test both that DockerReferenceIdentity returns the expected value (fullName+suffix),
// and that DockerReferenceNamespaces starts with the expected value (fullName), i.e. that the two functions are
// consistent.
for inputName, expectedNS := range map[string][]string{
"example.com/ns/repo": {"example.com/ns/repo", "example.com/ns", "example.com"},
"example.com/repo": {"example.com/repo", "example.com"},
"localhost/ns/repo": {"localhost/ns/repo", "localhost/ns", "localhost"},
"localhost/repo": {"localhost/repo", "localhost"},
"ns/repo": {"ns/repo", "ns"},
"repo": {"repo"},
} {
// Test with a known path which should exist. Test just one non-canonical
// path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
for _, repoInput := range []string{tmpDir, tmpDir + "/./."} {
fullName := inputName + ":notlatest"
ref, err := NewReference(fullName, repoInput)
require.NoError(t, err, fullName)
identity := ref.PolicyConfigurationIdentity()
assert.Equal(t, tmpDir+":"+expectedNS[0]+":notlatest", identity, fullName)
ns := ref.PolicyConfigurationNamespaces()
require.NotNil(t, ns, fullName)
require.Len(t, ns, len(expectedNS), fullName)
moreSpecific := identity
for i := range expectedNS {
assert.Equal(t, tmpDir+":"+expectedNS[i], ns[i], fmt.Sprintf("%s item %d", fullName, i))
assert.True(t, strings.HasPrefix(moreSpecific, ns[i]))
moreSpecific = ns[i]
}
}
}
}
func TestReferenceNewImage(t *testing.T) {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
_, err = ref.NewImage(nil)
assert.Error(t, err)
}
func TestReferenceNewImageSource(t *testing.T) {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
_, err = ref.NewImageSource(nil, nil)
assert.Error(t, err)
}
func TestReferenceNewImageDestination(t *testing.T) {
otherTmpDir, err := ioutil.TempDir("", "ostree-transport-test")
require.NoError(t, err)
defer os.RemoveAll(otherTmpDir)
for _, c := range []struct {
ctx *types.SystemContext
tmpDir string
}{
{nil, os.TempDir()},
{&types.SystemContext{}, os.TempDir()},
{&types.SystemContext{OSTreeTmpDirPath: otherTmpDir}, otherTmpDir},
} {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
dest, err := ref.NewImageDestination(c.ctx)
require.NoError(t, err)
ostreeDest, ok := dest.(*ostreeImageDestination)
require.True(t, ok)
assert.Equal(t, c.tmpDir+"/busybox_3Alatest", ostreeDest.tmpDirPath)
defer dest.Close()
}
}
func TestReferenceDeleteImage(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreeDeleteImage")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
ref, err := Transport.ParseReference(withTmpDir("busybox@$TMP/this-repo-does-not-exist", tmpDir))
require.NoError(t, err)
err = ref.DeleteImage(nil)
assert.Error(t, err)
}
func TestEncodeOSTreeRef(t *testing.T) {
// Just a smoke test
assert.Equal(t, "busybox_3Alatest", encodeOStreeRef("busybox:latest"))
}
func TestReferenceManifestPath(t *testing.T) {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
ostreeRef, ok := ref.(ostreeReference)
require.True(t, ok)
assert.Equal(t, fmt.Sprintf("manifest%cmanifest.json", filepath.Separator), ostreeRef.manifestPath())
}
func TestReferenceSignaturePath(t *testing.T) {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
ostreeRef, ok := ref.(ostreeReference)
require.True(t, ok)
for _, c := range []struct {
input int
suffix string
}{
{0, "-1"},
{42, "-43"},
} {
assert.Equal(t, fmt.Sprintf("manifest%csignature%s", filepath.Separator, c.suffix), ostreeRef.signaturePath(c.input), string(c.input))
}
}

View file

@ -0,0 +1 @@
This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice).

View file

@ -0,0 +1,30 @@
package strslice
import "encoding/json"
// StrSlice represents a string or an array of strings.
// We need to override the json decoder to accept both options.
type StrSlice []string
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
// strings. This method is needed to implement json.Unmarshaler.
func (e *StrSlice) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
// With no input, we preserve the existing value by returning nil and
// leaving the target alone. This allows defining default values for
// the type.
return nil
}
p := make([]string, 0, 1)
if err := json.Unmarshal(b, &p); err != nil {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
p = append(p, s)
}
*e = p
return nil
}

View file

@ -0,0 +1,86 @@
package strslice
import (
"encoding/json"
"reflect"
"testing"
)
func TestStrSliceMarshalJSON(t *testing.T) {
for _, testcase := range []struct {
input StrSlice
expected string
}{
// MADNESS(stevvooe): No clue why nil would be "" but empty would be
// "null". Had to make a change here that may affect compatibility.
{input: nil, expected: "null"},
{StrSlice{}, "[]"},
{StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`},
} {
data, err := json.Marshal(testcase.input)
if err != nil {
t.Fatal(err)
}
if string(data) != testcase.expected {
t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data))
}
}
}
func TestStrSliceUnmarshalJSON(t *testing.T) {
parts := map[string][]string{
"": {"default", "values"},
"[]": {},
`["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"},
}
for json, expectedParts := range parts {
strs := StrSlice{"default", "values"}
if err := strs.UnmarshalJSON([]byte(json)); err != nil {
t.Fatal(err)
}
actualParts := []string(strs)
if !reflect.DeepEqual(actualParts, expectedParts) {
t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts)
}
}
}
func TestStrSliceUnmarshalString(t *testing.T) {
var e StrSlice
echo, err := json.Marshal("echo")
if err != nil {
t.Fatal(err)
}
if err := json.Unmarshal(echo, &e); err != nil {
t.Fatal(err)
}
if len(e) != 1 {
t.Fatalf("expected 1 element after unmarshal: %q", e)
}
if e[0] != "echo" {
t.Fatalf("expected `echo`, got: %q", e[0])
}
}
func TestStrSliceUnmarshalSlice(t *testing.T) {
var e StrSlice
echo, err := json.Marshal([]string{"echo"})
if err != nil {
t.Fatal(err)
}
if err := json.Unmarshal(echo, &e); err != nil {
t.Fatal(err)
}
if len(e) != 1 {
t.Fatalf("expected 1 element after unmarshal: %q", e)
}
if e[0] != "echo" {
t.Fatalf("expected `echo`, got: %q", e[0])
}
}

View file

@ -71,14 +71,9 @@ type storageImage struct {
// newImageSource sets us up to read out an image, which needs to already exist.
func newImageSource(imageRef storageReference) (*storageImageSource, error) {
id := imageRef.resolveID()
if id == "" {
logrus.Errorf("no image matching reference %q found", imageRef.StringWithinTransport())
return nil, ErrNoSuchImage
}
img, err := imageRef.transport.store.GetImage(id)
img, err := imageRef.resolveImage()
if err != nil {
return nil, errors.Wrapf(err, "error reading image %q", id)
return nil, err
}
image := &storageImageSource{
imageRef: imageRef,
@ -336,21 +331,37 @@ func (s *storageImageDestination) Commit() error {
}
img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil)
if err != nil {
logrus.Debugf("error creating image: %q", err)
return err
if err != storage.ErrDuplicateID {
logrus.Debugf("error creating image: %q", err)
return errors.Wrapf(err, "error creating image %q", s.ID)
}
img, err = s.imageRef.transport.store.GetImage(s.ID)
if err != nil {
return errors.Wrapf(err, "error reading image %q", s.ID)
}
if img.TopLayer != lastLayer {
logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", err)
return errors.Wrapf(err, "image with ID %q already exists, but uses a different top layer", s.ID)
}
logrus.Debugf("reusing image ID %q", img.ID)
} else {
logrus.Debugf("created new image ID %q", img.ID)
}
logrus.Debugf("created new image ID %q", img.ID)
s.ID = img.ID
names := img.Names
if s.Tag != "" {
// We have a name to set, so move the name to this image.
if err := s.imageRef.transport.store.SetNames(img.ID, []string{s.Tag}); err != nil {
names = append(names, s.Tag)
}
// We have names to set, so move those names to this image.
if len(names) > 0 {
if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
logrus.Debugf("error setting names on image %q: %v", img.ID, err)
return err
}
logrus.Debugf("set name of image %q to %q", img.ID, s.Tag)
logrus.Debugf("set names of image %q to %v", img.ID, names)
}
// Save the data blobs to disk, and drop their contents from memory.
keys := []ddigest.Digest{}

View file

@ -6,6 +6,8 @@ import (
"github.com/Sirupsen/logrus"
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/containers/storage/storage"
"github.com/pkg/errors"
)
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
@ -32,15 +34,36 @@ func newReference(transport storageTransport, reference, id string, name referen
}
// Resolve the reference's name to an image ID in the store, if there's already
// one present with the same name or ID.
func (s *storageReference) resolveID() string {
// one present with the same name or ID, and return the image.
func (s *storageReference) resolveImage() (*storage.Image, error) {
if s.id == "" {
image, err := s.transport.store.GetImage(s.reference)
if image != nil && err == nil {
s.id = image.ID
}
}
return s.id
if s.id == "" {
logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport())
return nil, ErrNoSuchImage
}
img, err := s.transport.store.GetImage(s.id)
if err != nil {
return nil, errors.Wrapf(err, "error reading image %q", s.id)
}
if s.reference != "" {
nameMatch := false
for _, name := range img.Names {
if name == s.reference {
nameMatch = true
break
}
}
if !nameMatch {
logrus.Errorf("no image matching reference %q found", s.StringWithinTransport())
return nil, ErrNoSuchImage
}
}
return img, nil
}
// Return a Transport object that defaults to using the same store that we used
@ -103,14 +126,13 @@ func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error
}
func (s storageReference) DeleteImage(ctx *types.SystemContext) error {
id := s.resolveID()
if id == "" {
logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport())
return ErrNoSuchImage
img, err := s.resolveImage()
if err != nil {
return err
}
layers, err := s.transport.store.DeleteImage(id, true)
layers, err := s.transport.store.DeleteImage(img.ID, true)
if err == nil {
logrus.Debugf("deleted image %q", id)
logrus.Debugf("deleted image %q", img.ID)
for _, layer := range layers {
logrus.Debugf("deleted layer %q", layer)
}

View file

@ -2,7 +2,6 @@ package storage
import (
"path/filepath"
"regexp"
"strings"
"github.com/pkg/errors"
@ -30,7 +29,6 @@ var (
// ErrPathNotAbsolute is returned when a graph root is not an absolute
// path name.
ErrPathNotAbsolute = errors.New("path name is not absolute")
idRegexp = regexp.MustCompile("^(sha256:)?([0-9a-fA-F]{64})$")
)
// StoreTransport is an ImageTransport that uses a storage.Store to parse
@ -100,9 +98,12 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
return nil, err
}
}
sum, err = digest.Parse("sha256:" + refInfo[1])
if err != nil {
return nil, err
sum, err = digest.Parse(refInfo[1])
if err != nil || sum.Validate() != nil {
sum, err = digest.Parse("sha256:" + refInfo[1])
if err != nil || sum.Validate() != nil {
return nil, err
}
}
} else { // Coverage: len(refInfo) is always 1 or 2
// Anything else: store specified in a form we don't
@ -285,7 +286,7 @@ func verboseName(name reference.Named) string {
name = reference.TagNameOnly(name)
tag := ""
if tagged, ok := name.(reference.NamedTagged); ok {
tag = tagged.Tag()
tag = ":" + tagged.Tag()
}
return name.Name() + ":" + tag
return name.Name() + tag
}

View file

@ -34,10 +34,10 @@ func TestTransportParseStoreReference(t *testing.T) {
{"busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, explicit tag
{"docker.io/library/busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, everything explicit
{"UPPERCASEISINVALID@" + sha256digestHex, "", ""}, // Invalid name in name@ID
{"busybox@ab", "", ""}, // Invalid ID in name@ID
{"busybox@", "", ""}, // Empty ID in name@ID
{"busybox@sha256:" + sha256digestHex, "", ""}, // This (a digested docker/docker reference format) is also invalid, since it's an invalid ID in name@ID
{"UPPERCASEISINVALID@" + sha256digestHex, "", ""}, // Invalid name in name@ID
{"busybox@ab", "", ""}, // Invalid ID in name@ID
{"busybox@", "", ""}, // Empty ID in name@ID
{"busybox@sha256:" + sha256digestHex, "docker.io/library/busybox:latest", sha256digestHex}, // Valid two-component name, with ID using "sha256:" prefix
{"@" + sha256digestHex, "", sha256digestHex}, // Valid two-component name, with ID only
{"busybox@" + sha256digestHex, "docker.io/library/busybox:latest", sha256digestHex}, // Valid two-component name, implicit tag
{"busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid two-component name, explicit tag

View file

@ -8,9 +8,11 @@ import (
// a transport.
_ "github.com/containers/image/directory"
_ "github.com/containers/image/docker"
_ "github.com/containers/image/docker/archive"
_ "github.com/containers/image/docker/daemon"
_ "github.com/containers/image/oci/layout"
_ "github.com/containers/image/openshift"
_ "github.com/containers/image/ostree"
_ "github.com/containers/image/storage"
"github.com/containers/image/transports"
"github.com/containers/image/types"

View file

@ -30,6 +30,8 @@ func TestImageNameHandling(t *testing.T) {
{"docker", "//busybox:notlatest", "//busybox:notlatest"}, // This also tests handling of multiple ":" characters
{"docker-daemon", "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"},
{"docker-daemon", "busybox:latest", "busybox:latest"},
{"docker-archive", "/var/lib/oci/busybox.tar:busybox:latest", "/var/lib/oci/busybox.tar:docker.io/library/busybox:latest"},
{"docker-archive", "busybox.tar:busybox:latest", "busybox.tar:docker.io/library/busybox:latest"},
{"oci", "/etc:sometag", "/etc:sometag"},
// "atomic" not tested here because it depends on per-user configuration for the default cluster.
// "containers-storage" not tested here because it needs to initialize various directories on the fs.

View file

@ -299,6 +299,8 @@ type SystemContext struct {
// Note that this field is used mainly to integrate containers/image into projectatomic/docker
// in order to not break any existing docker's integration tests.
DockerDisableV1Ping bool
// Directory to use for OSTree temporary files
OSTreeTmpDirPath string
}
// ProgressProperties is used to pass information from the copy code to a monitor which

View file

@ -20,25 +20,10 @@ func container(flags *mflag.FlagSet, action string, m storage.Store, args []stri
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
}
containers, err := m.Containers()
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
}
matches := []storage.Container{}
for _, container := range containers {
nextContainer:
for _, arg := range args {
if container.ID == arg {
matches = append(matches, container)
break nextContainer
}
for _, name := range container.Names {
if name == arg {
matches = append(matches, container)
break nextContainer
}
}
matches := []*storage.Container{}
for _, arg := range args {
if container, err := m.GetContainer(arg); err == nil {
matches = append(matches, container)
}
}
if jsonOutput {

View file

@ -15,25 +15,10 @@ var (
)
func image(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
images, err := m.Images()
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
}
matched := []storage.Image{}
for _, image := range images {
nextImage:
for _, arg := range args {
if image.ID == arg {
matched = append(matched, image)
break nextImage
}
for _, name := range image.Names {
if name == arg {
matched = append(matched, image)
break nextImage
}
}
matched := []*storage.Image{}
for _, arg := range args {
if image, err := m.GetImage(arg); err == nil {
matched = append(matched, image)
}
}
if jsonOutput {

View file

@ -1,6 +1,6 @@
// +build linux
package overlay2
package overlay
import (
"bytes"

View file

@ -1,6 +1,6 @@
// +build linux
package overlay2
package overlay
import (
"bufio"
@ -61,10 +61,9 @@ var (
// that mounts do not fail due to length.
const (
driverName = "overlay2"
linkDir = "l"
lowerFile = "lower"
maxDepth = 128
linkDir = "l"
lowerFile = "lower"
maxDepth = 128
// idLength represents the number of random characters
// which can be used to create the unique link identifer
@ -78,6 +77,7 @@ const (
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
type Driver struct {
name string
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
@ -87,13 +87,13 @@ type Driver struct {
var backingFs = "<unknown>"
func init() {
graphdriver.Register(driverName, Init)
graphdriver.Register("overlay", InitAsOverlay)
graphdriver.Register("overlay2", InitAsOverlay2)
}
// Init returns the a native diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
// InitWithName returns the a naive diff driver for the overlay filesystem,
// which returns the passed-in name when asked which driver it is.
func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
opts, err := parseOptions(options)
if err != nil {
return nil, err
@ -112,7 +112,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
if !opts.overrideKernelCheck {
return nil, graphdriver.ErrNotSupported
}
logrus.Warnf("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update")
logrus.Warnf("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update")
}
fsMagic, err := graphdriver.GetFSMagic(home)
@ -126,7 +126,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
switch fsMagic {
case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
logrus.Errorf("'overlay2' is not supported over %s", backingFs)
logrus.Errorf("'overlay' is not supported over %s", backingFs)
return nil, graphdriver.ErrIncompatibleFS
}
@ -144,6 +144,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
d := &Driver{
name: name,
home: home,
uidMaps: uidMaps,
gidMaps: gidMaps,
@ -153,6 +154,20 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
return d, nil
}
// InitAsOverlay returns the a naive diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
func InitAsOverlay(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
return InitWithName("overlay", home, options, uidMaps, gidMaps)
}
// InitAsOverlay2 returns the a naive diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
func InitAsOverlay2(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
return InitWithName("overlay2", home, options, uidMaps, gidMaps)
}
type overlayOptions struct {
overrideKernelCheck bool
}
@ -166,13 +181,13 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
key = strings.ToLower(key)
switch key {
case "overlay2.override_kernel_check":
case "overlay.override_kernel_check", "overlay2.override_kernel_check":
o.overrideKernelCheck, err = strconv.ParseBool(val)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("overlay2: Unknown option %s", key)
return nil, fmt.Errorf("overlay: Unknown option %s", key)
}
}
return o, nil
@ -200,7 +215,7 @@ func supportsOverlay() error {
}
func (d *Driver) String() string {
return driverName
return d.name
}
// Status returns current driver information in a two dimensional string array.

View file

@ -1,6 +1,6 @@
// +build linux
package overlay2
package overlay
import (
"os"
@ -13,6 +13,8 @@ import (
"github.com/containers/storage/pkg/reexec"
)
const driverName = "overlay"
func init() {
// Do not sure chroot to speed run time and allow archive
// errors or hangs to be debugged directly from the test process.

View file

@ -1,6 +1,6 @@
// +build linux
package overlay2
package overlay
import (
"crypto/rand"

View file

@ -3,6 +3,6 @@
package register
import (
// register the overlay2 graphdriver
_ "github.com/containers/storage/drivers/overlay2"
// register the overlay graphdriver
_ "github.com/containers/storage/drivers/overlay"
)

View file

@ -1,7 +1,7 @@
#!/bin/bash
set -e
CROSSPLATFORMS="linux/amd64 linux/386 linux/arm"
CROSSPLATFORMS="linux/amd64 linux/386 linux/arm darwin/amd64"
BUILDTAGS+=" exclude_graphdriver_devicemapper"
for platform in $CROSSPLATFORMS; do

View file

@ -0,0 +1,137 @@
// Package truncindex provides a general 'index tree', used by Docker
// in order to be able to reference containers by only a few unambiguous
// characters of their id.
package truncindex
import (
"errors"
"fmt"
"strings"
"sync"
"github.com/tchap/go-patricia/patricia"
)
var (
// ErrEmptyPrefix is an error returned if the prefix was empty.
ErrEmptyPrefix = errors.New("Prefix can't be empty")
// ErrIllegalChar is returned when a space is in the ID
ErrIllegalChar = errors.New("illegal character: ' '")
// ErrNotExist is returned when ID or its prefix not found in index.
ErrNotExist = errors.New("ID does not exist")
)
// ErrAmbiguousPrefix is returned if the prefix was ambiguous
// (multiple ids for the prefix).
type ErrAmbiguousPrefix struct {
prefix string
}
func (e ErrAmbiguousPrefix) Error() string {
return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix)
}
// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
type TruncIndex struct {
sync.RWMutex
trie *patricia.Trie
ids map[string]struct{}
}
// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs.
func NewTruncIndex(ids []string) (idx *TruncIndex) {
idx = &TruncIndex{
ids: make(map[string]struct{}),
// Change patricia max prefix per node length,
// because our len(ID) always 64
trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)),
}
for _, id := range ids {
idx.addID(id)
}
return
}
func (idx *TruncIndex) addID(id string) error {
if strings.Contains(id, " ") {
return ErrIllegalChar
}
if id == "" {
return ErrEmptyPrefix
}
if _, exists := idx.ids[id]; exists {
return fmt.Errorf("id already exists: '%s'", id)
}
idx.ids[id] = struct{}{}
if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted {
return fmt.Errorf("failed to insert id: %s", id)
}
return nil
}
// Add adds a new ID to the TruncIndex.
func (idx *TruncIndex) Add(id string) error {
idx.Lock()
defer idx.Unlock()
if err := idx.addID(id); err != nil {
return err
}
return nil
}
// Delete removes an ID from the TruncIndex. If there are multiple IDs
// with the given prefix, an error is thrown.
func (idx *TruncIndex) Delete(id string) error {
idx.Lock()
defer idx.Unlock()
if _, exists := idx.ids[id]; !exists || id == "" {
return fmt.Errorf("no such id: '%s'", id)
}
delete(idx.ids, id)
if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted {
return fmt.Errorf("no such id: '%s'", id)
}
return nil
}
// Get retrieves an ID from the TruncIndex. If there are multiple IDs
// with the given prefix, an error is thrown.
func (idx *TruncIndex) Get(s string) (string, error) {
if s == "" {
return "", ErrEmptyPrefix
}
var (
id string
)
subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error {
if id != "" {
// we haven't found the ID if there are two or more IDs
id = ""
return ErrAmbiguousPrefix{prefix: string(prefix)}
}
id = string(prefix)
return nil
}
idx.RLock()
defer idx.RUnlock()
if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil {
return "", err
}
if id != "" {
return id, nil
}
return "", ErrNotExist
}
// Iterate iterates over all stored IDs, and passes each of them to the given handler.
func (idx *TruncIndex) Iterate(handler func(id string)) {
idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
handler(string(prefix))
return nil
})
}

View file

@ -0,0 +1,429 @@
package truncindex
import (
"math/rand"
"testing"
"github.com/containers/storage/pkg/stringid"
)
// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix.
func TestTruncIndex(t *testing.T) {
ids := []string{}
index := NewTruncIndex(ids)
// Get on an empty index
if _, err := index.Get("foobar"); err == nil {
t.Fatal("Get on an empty index should return an error")
}
// Spaces should be illegal in an id
if err := index.Add("I have a space"); err == nil {
t.Fatalf("Adding an id with ' ' should return an error")
}
id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96"
// Add an id
if err := index.Add(id); err != nil {
t.Fatal(err)
}
// Add an empty id (should fail)
if err := index.Add(""); err == nil {
t.Fatalf("Adding an empty id should return an error")
}
// Get a non-existing id
assertIndexGet(t, index, "abracadabra", "", true)
// Get an empty id
assertIndexGet(t, index, "", "", true)
// Get the exact id
assertIndexGet(t, index, id, id, false)
// The first letter should match
assertIndexGet(t, index, id[:1], id, false)
// The first half should match
assertIndexGet(t, index, id[:len(id)/2], id, false)
// The second half should NOT match
assertIndexGet(t, index, id[len(id)/2:], "", true)
id2 := id[:6] + "blabla"
// Add an id
if err := index.Add(id2); err != nil {
t.Fatal(err)
}
// Both exact IDs should work
assertIndexGet(t, index, id, id, false)
assertIndexGet(t, index, id2, id2, false)
// 6 characters or less should conflict
assertIndexGet(t, index, id[:6], "", true)
assertIndexGet(t, index, id[:4], "", true)
assertIndexGet(t, index, id[:1], "", true)
// An ambiguous id prefix should return an error
if _, err := index.Get(id[:4]); err == nil {
t.Fatal("An ambiguous id prefix should return an error")
}
// 7 characters should NOT conflict
assertIndexGet(t, index, id[:7], id, false)
assertIndexGet(t, index, id2[:7], id2, false)
// Deleting a non-existing id should return an error
if err := index.Delete("non-existing"); err == nil {
t.Fatalf("Deleting a non-existing id should return an error")
}
// Deleting an empty id should return an error
if err := index.Delete(""); err == nil {
t.Fatal("Deleting an empty id should return an error")
}
// Deleting id2 should remove conflicts
if err := index.Delete(id2); err != nil {
t.Fatal(err)
}
// id2 should no longer work
assertIndexGet(t, index, id2, "", true)
assertIndexGet(t, index, id2[:7], "", true)
assertIndexGet(t, index, id2[:11], "", true)
// conflicts between id and id2 should be gone
assertIndexGet(t, index, id[:6], id, false)
assertIndexGet(t, index, id[:4], id, false)
assertIndexGet(t, index, id[:1], id, false)
// non-conflicting substrings should still not conflict
assertIndexGet(t, index, id[:7], id, false)
assertIndexGet(t, index, id[:15], id, false)
assertIndexGet(t, index, id, id, false)
assertIndexIterate(t)
}
func assertIndexIterate(t *testing.T) {
ids := []string{
"19b36c2c326ccc11e726eee6ee78a0baf166ef96",
"28b36c2c326ccc11e726eee6ee78a0baf166ef96",
"37b36c2c326ccc11e726eee6ee78a0baf166ef96",
"46b36c2c326ccc11e726eee6ee78a0baf166ef96",
}
index := NewTruncIndex(ids)
index.Iterate(func(targetId string) {
for _, id := range ids {
if targetId == id {
return
}
}
t.Fatalf("An unknown ID '%s'", targetId)
})
}
func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) {
if result, err := index.Get(input); err != nil && !expectError {
t.Fatalf("Unexpected error getting '%s': %s", input, err)
} else if err == nil && expectError {
t.Fatalf("Getting '%s' should return an error, not '%s'", input, result)
} else if result != expectedResult {
t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult)
}
}
func BenchmarkTruncIndexAdd100(b *testing.B) {
var testSet []string
for i := 0; i < 100; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
}
}
}
func BenchmarkTruncIndexAdd250(b *testing.B) {
var testSet []string
for i := 0; i < 250; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
}
}
}
func BenchmarkTruncIndexAdd500(b *testing.B) {
var testSet []string
for i := 0; i < 500; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
}
}
}
func BenchmarkTruncIndexGet100(b *testing.B) {
var testSet []string
var testKeys []string
for i := 0; i < 100; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, id := range testKeys {
if res, err := index.Get(id); err != nil {
b.Fatal(res, err)
}
}
}
}
func BenchmarkTruncIndexGet250(b *testing.B) {
var testSet []string
var testKeys []string
for i := 0; i < 250; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, id := range testKeys {
if res, err := index.Get(id); err != nil {
b.Fatal(res, err)
}
}
}
}
func BenchmarkTruncIndexGet500(b *testing.B) {
var testSet []string
var testKeys []string
for i := 0; i < 500; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, id := range testKeys {
if res, err := index.Get(id); err != nil {
b.Fatal(res, err)
}
}
}
}
func BenchmarkTruncIndexDelete100(b *testing.B) {
var testSet []string
for i := 0; i < 100; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
}
b.StartTimer()
for _, id := range testSet {
if err := index.Delete(id); err != nil {
b.Fatal(err)
}
}
}
}
func BenchmarkTruncIndexDelete250(b *testing.B) {
var testSet []string
for i := 0; i < 250; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
}
b.StartTimer()
for _, id := range testSet {
if err := index.Delete(id); err != nil {
b.Fatal(err)
}
}
}
}
func BenchmarkTruncIndexDelete500(b *testing.B) {
var testSet []string
for i := 0; i < 500; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
}
b.StartTimer()
for _, id := range testSet {
if err := index.Delete(id); err != nil {
b.Fatal(err)
}
}
}
}
func BenchmarkTruncIndexNew100(b *testing.B) {
var testSet []string
for i := 0; i < 100; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
NewTruncIndex(testSet)
}
}
func BenchmarkTruncIndexNew250(b *testing.B) {
var testSet []string
for i := 0; i < 250; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
NewTruncIndex(testSet)
}
}
func BenchmarkTruncIndexNew500(b *testing.B) {
var testSet []string
for i := 0; i < 500; i++ {
testSet = append(testSet, stringid.GenerateNonCryptoID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
NewTruncIndex(testSet)
}
}
func BenchmarkTruncIndexAddGet100(b *testing.B) {
var testSet []string
var testKeys []string
for i := 0; i < 500; i++ {
id := stringid.GenerateNonCryptoID()
testSet = append(testSet, id)
l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
}
for _, id := range testKeys {
if res, err := index.Get(id); err != nil {
b.Fatal(res, err)
}
}
}
}
func BenchmarkTruncIndexAddGet250(b *testing.B) {
var testSet []string
var testKeys []string
for i := 0; i < 500; i++ {
id := stringid.GenerateNonCryptoID()
testSet = append(testSet, id)
l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
}
for _, id := range testKeys {
if res, err := index.Get(id); err != nil {
b.Fatal(res, err)
}
}
}
}
func BenchmarkTruncIndexAddGet500(b *testing.B) {
var testSet []string
var testKeys []string
for i := 0; i < 500; i++ {
id := stringid.GenerateNonCryptoID()
testSet = append(testSet, id)
l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
index := NewTruncIndex([]string{})
for _, id := range testSet {
if err := index.Add(id); err != nil {
b.Fatal(err)
}
}
for _, id := range testKeys {
if res, err := index.Get(id); err != nil {
b.Fatal(res, err)
}
}
}
}

View file

@ -10,6 +10,7 @@ import (
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
)
var (
@ -93,6 +94,7 @@ type containerStore struct {
lockfile Locker
dir string
containers []Container
idindex *truncindex.TruncIndex
byid map[string]*Container
bylayer map[string]*Container
byname map[string]*Container
@ -123,10 +125,12 @@ func (r *containerStore) Load() error {
}
containers := []Container{}
layers := make(map[string]*Container)
idlist := []string{}
ids := make(map[string]*Container)
names := make(map[string]*Container)
if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
for n, container := range containers {
idlist = append(idlist, container.ID)
ids[container.ID] = &containers[n]
layers[container.LayerID] = &containers[n]
for _, name := range container.Names {
@ -139,6 +143,7 @@ func (r *containerStore) Load() error {
}
}
r.containers = containers
r.idindex = truncindex.NewTruncIndex(idlist)
r.byid = ids
r.bylayer = layers
r.byname = names
@ -185,30 +190,35 @@ func newContainerStore(dir string) (ContainerStore, error) {
return &cstore, nil
}
func (r *containerStore) ClearFlag(id string, flag string) error {
if container, ok := r.byname[id]; ok {
id = container.ID
func (r *containerStore) lookup(id string) (*Container, bool) {
if container, ok := r.byid[id]; ok {
return container, ok
} else if container, ok := r.byname[id]; ok {
return container, ok
} else if container, ok := r.bylayer[id]; ok {
id = container.ID
return container, ok
} else if longid, err := r.idindex.Get(id); err == nil {
if container, ok := r.byid[longid]; ok {
return container, ok
}
}
if _, ok := r.byid[id]; !ok {
return nil, false
}
func (r *containerStore) ClearFlag(id string, flag string) error {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
container := r.byid[id]
delete(container.Flags, flag)
return r.Save()
}
func (r *containerStore) SetFlag(id string, flag string, value interface{}) error {
if container, ok := r.byname[id]; ok {
id = container.ID
} else if container, ok := r.bylayer[id]; ok {
id = container.ID
}
if _, ok := r.byid[id]; !ok {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
container := r.byid[id]
container.Flags[flag] = value
return r.Save()
}
@ -244,6 +254,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
r.containers = append(r.containers, newContainer)
container = &r.containers[len(r.containers)-1]
r.byid[id] = container
r.idindex.Add(id)
r.bylayer[layer] = container
for _, name := range names {
r.byname[name] = container
@ -254,24 +265,14 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
}
func (r *containerStore) GetMetadata(id string) (string, error) {
if container, ok := r.byname[id]; ok {
id = container.ID
} else if container, ok := r.bylayer[id]; ok {
id = container.ID
}
if container, ok := r.byid[id]; ok {
if container, ok := r.lookup(id); ok {
return container.Metadata, nil
}
return "", ErrContainerUnknown
}
func (r *containerStore) SetMetadata(id, metadata string) error {
if container, ok := r.byname[id]; ok {
id = container.ID
} else if container, ok := r.bylayer[id]; ok {
id = container.ID
}
if container, ok := r.byid[id]; ok {
if container, ok := r.lookup(id); ok {
container.Metadata = metadata
return r.Save()
}
@ -279,22 +280,11 @@ func (r *containerStore) SetMetadata(id, metadata string) error {
}
func (r *containerStore) removeName(container *Container, name string) {
newNames := []string{}
for _, oldName := range container.Names {
if oldName != name {
newNames = append(newNames, oldName)
}
}
container.Names = newNames
container.Names = stringSliceWithoutValue(container.Names, name)
}
func (r *containerStore) SetNames(id string, names []string) error {
if container, ok := r.byname[id]; ok {
id = container.ID
} else if container, ok := r.bylayer[id]; ok {
id = container.ID
}
if container, ok := r.byid[id]; ok {
if container, ok := r.lookup(id); ok {
for _, name := range container.Names {
delete(r.byname, name)
}
@ -311,133 +301,104 @@ func (r *containerStore) SetNames(id string, names []string) error {
}
func (r *containerStore) Delete(id string) error {
if container, ok := r.byname[id]; ok {
id = container.ID
} else if container, ok := r.bylayer[id]; ok {
id = container.ID
}
if _, ok := r.byid[id]; !ok {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
if container, ok := r.byid[id]; ok {
newContainers := []Container{}
for _, candidate := range r.containers {
if candidate.ID != id {
newContainers = append(newContainers, candidate)
}
}
delete(r.byid, container.ID)
delete(r.bylayer, container.LayerID)
for _, name := range container.Names {
delete(r.byname, name)
}
r.containers = newContainers
if err := r.Save(); err != nil {
return err
}
if err := os.RemoveAll(r.datadir(id)); err != nil {
return err
id = container.ID
newContainers := []Container{}
for _, candidate := range r.containers {
if candidate.ID != id {
newContainers = append(newContainers, candidate)
}
}
delete(r.byid, id)
r.idindex.Delete(id)
delete(r.bylayer, container.LayerID)
for _, name := range container.Names {
delete(r.byname, name)
}
r.containers = newContainers
if err := r.Save(); err != nil {
return err
}
if err := os.RemoveAll(r.datadir(id)); err != nil {
return err
}
return nil
}
func (r *containerStore) Get(id string) (*Container, error) {
if c, ok := r.byname[id]; ok {
return c, nil
} else if c, ok := r.bylayer[id]; ok {
return c, nil
}
if c, ok := r.byid[id]; ok {
return c, nil
if container, ok := r.lookup(id); ok {
return container, nil
}
return nil, ErrContainerUnknown
}
func (r *containerStore) Lookup(name string) (id string, err error) {
container, ok := r.byname[name]
if !ok {
container, ok = r.byid[name]
if !ok {
return "", ErrContainerUnknown
}
if container, ok := r.lookup(name); ok {
return container.ID, nil
}
return container.ID, nil
return "", ErrContainerUnknown
}
func (r *containerStore) Exists(id string) bool {
if _, ok := r.byname[id]; ok {
return true
}
if _, ok := r.bylayer[id]; ok {
return true
}
if _, ok := r.byid[id]; ok {
return true
}
return false
_, ok := r.lookup(id)
return ok
}
func (r *containerStore) GetBigData(id, key string) ([]byte, error) {
if img, ok := r.byname[id]; ok {
id = img.ID
}
if _, ok := r.byid[id]; !ok {
c, ok := r.lookup(id)
if !ok {
return nil, ErrContainerUnknown
}
return ioutil.ReadFile(r.datapath(id, key))
return ioutil.ReadFile(r.datapath(c.ID, key))
}
func (r *containerStore) GetBigDataSize(id, key string) (int64, error) {
if img, ok := r.byname[id]; ok {
id = img.ID
}
if _, ok := r.byid[id]; !ok {
c, ok := r.lookup(id)
if !ok {
return -1, ErrContainerUnknown
}
if size, ok := r.byid[id].BigDataSizes[key]; ok {
if size, ok := c.BigDataSizes[key]; ok {
return size, nil
}
return -1, ErrSizeUnknown
}
func (r *containerStore) GetBigDataNames(id string) ([]string, error) {
if img, ok := r.byname[id]; ok {
id = img.ID
}
if _, ok := r.byid[id]; !ok {
c, ok := r.lookup(id)
if !ok {
return nil, ErrContainerUnknown
}
return r.byid[id].BigDataNames, nil
return c.BigDataNames, nil
}
func (r *containerStore) SetBigData(id, key string, data []byte) error {
if img, ok := r.byname[id]; ok {
id = img.ID
}
if _, ok := r.byid[id]; !ok {
c, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
if err := os.MkdirAll(r.datadir(id), 0700); err != nil {
if err := os.MkdirAll(r.datadir(c.ID), 0700); err != nil {
return err
}
err := ioutils.AtomicWriteFile(r.datapath(id, key), data, 0600)
err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600)
if err == nil {
save := false
oldSize, ok := r.byid[id].BigDataSizes[key]
r.byid[id].BigDataSizes[key] = int64(len(data))
if !ok || oldSize != r.byid[id].BigDataSizes[key] {
oldSize, ok := c.BigDataSizes[key]
c.BigDataSizes[key] = int64(len(data))
if !ok || oldSize != c.BigDataSizes[key] {
save = true
}
add := true
for _, name := range r.byid[id].BigDataNames {
for _, name := range c.BigDataNames {
if name == key {
add = false
break
}
}
if add {
r.byid[id].BigDataNames = append(r.byid[id].BigDataNames, key)
c.BigDataNames = append(c.BigDataNames, key)
save = true
}
if save {

View file

@ -10,6 +10,7 @@ import (
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
)
var (
@ -88,6 +89,7 @@ type imageStore struct {
lockfile Locker
dir string
images []Image
idindex *truncindex.TruncIndex
byid map[string]*Image
byname map[string]*Image
}
@ -116,11 +118,13 @@ func (r *imageStore) Load() error {
return err
}
images := []Image{}
idlist := []string{}
ids := make(map[string]*Image)
names := make(map[string]*Image)
if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
for n, image := range images {
ids[image.ID] = &images[n]
idlist = append(idlist, image.ID)
for _, name := range image.Names {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
@ -131,6 +135,7 @@ func (r *imageStore) Load() error {
}
}
r.images = images
r.idindex = truncindex.NewTruncIndex(idlist)
r.byid = ids
r.byname = names
if needSave {
@ -175,26 +180,32 @@ func newImageStore(dir string) (ImageStore, error) {
return &istore, nil
}
func (r *imageStore) ClearFlag(id string, flag string) error {
if image, ok := r.byname[id]; ok {
id = image.ID
func (r *imageStore) lookup(id string) (*Image, bool) {
if image, ok := r.byid[id]; ok {
return image, ok
} else if image, ok := r.byname[id]; ok {
return image, ok
} else if longid, err := r.idindex.Get(id); err == nil {
image, ok := r.byid[longid]
return image, ok
}
if _, ok := r.byid[id]; !ok {
return nil, false
}
func (r *imageStore) ClearFlag(id string, flag string) error {
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
}
image := r.byid[id]
delete(image.Flags, flag)
return r.Save()
}
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
if image, ok := r.byname[id]; ok {
id = image.ID
}
if _, ok := r.byid[id]; !ok {
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
}
image := r.byid[id]
image.Flags[flag] = value
return r.Save()
}
@ -228,6 +239,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string) (
}
r.images = append(r.images, newImage)
image = &r.images[len(r.images)-1]
r.idindex.Add(id)
r.byid[id] = image
for _, name := range names {
r.byname[name] = image
@ -238,20 +250,14 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string) (
}
func (r *imageStore) GetMetadata(id string) (string, error) {
if image, ok := r.byname[id]; ok {
id = image.ID
}
if image, ok := r.byid[id]; ok {
if image, ok := r.lookup(id); ok {
return image.Metadata, nil
}
return "", ErrImageUnknown
}
func (r *imageStore) SetMetadata(id, metadata string) error {
if image, ok := r.byname[id]; ok {
id = image.ID
}
if image, ok := r.byid[id]; ok {
if image, ok := r.lookup(id); ok {
image.Metadata = metadata
return r.Save()
}
@ -259,20 +265,11 @@ func (r *imageStore) SetMetadata(id, metadata string) error {
}
func (r *imageStore) removeName(image *Image, name string) {
newNames := []string{}
for _, oldName := range image.Names {
if oldName != name {
newNames = append(newNames, oldName)
}
}
image.Names = newNames
image.Names = stringSliceWithoutValue(image.Names, name)
}
func (r *imageStore) SetNames(id string, names []string) error {
if image, ok := r.byname[id]; ok {
id = image.ID
}
if image, ok := r.byid[id]; ok {
if image, ok := r.lookup(id); ok {
for _, name := range image.Names {
delete(r.byname, name)
}
@ -289,125 +286,103 @@ func (r *imageStore) SetNames(id string, names []string) error {
}
func (r *imageStore) Delete(id string) error {
if image, ok := r.byname[id]; ok {
id = image.ID
}
if _, ok := r.byid[id]; !ok {
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
}
if image, ok := r.byid[id]; ok {
newImages := []Image{}
for _, candidate := range r.images {
if candidate.ID != id {
newImages = append(newImages, candidate)
}
}
delete(r.byid, image.ID)
for _, name := range image.Names {
delete(r.byname, name)
}
r.images = newImages
if err := r.Save(); err != nil {
return err
}
if err := os.RemoveAll(r.datadir(id)); err != nil {
return err
id = image.ID
newImages := []Image{}
for _, candidate := range r.images {
if candidate.ID != id {
newImages = append(newImages, candidate)
}
}
delete(r.byid, id)
r.idindex.Delete(id)
for _, name := range image.Names {
delete(r.byname, name)
}
r.images = newImages
if err := r.Save(); err != nil {
return err
}
if err := os.RemoveAll(r.datadir(id)); err != nil {
return err
}
return nil
}
func (r *imageStore) Get(id string) (*Image, error) {
if image, ok := r.byname[id]; ok {
return image, nil
}
if image, ok := r.byid[id]; ok {
if image, ok := r.lookup(id); ok {
return image, nil
}
return nil, ErrImageUnknown
}
func (r *imageStore) Lookup(name string) (id string, err error) {
image, ok := r.byname[name]
if !ok {
image, ok = r.byid[name]
if !ok {
return "", ErrImageUnknown
}
if image, ok := r.lookup(name); ok {
return image.ID, nil
}
return image.ID, nil
return "", ErrImageUnknown
}
func (r *imageStore) Exists(id string) bool {
if _, ok := r.byname[id]; ok {
return true
}
if _, ok := r.byid[id]; ok {
return true
}
return false
_, ok := r.lookup(id)
return ok
}
func (r *imageStore) GetBigData(id, key string) ([]byte, error) {
if img, ok := r.byname[id]; ok {
id = img.ID
}
if _, ok := r.byid[id]; !ok {
image, ok := r.lookup(id)
if !ok {
return nil, ErrImageUnknown
}
return ioutil.ReadFile(r.datapath(id, key))
return ioutil.ReadFile(r.datapath(image.ID, key))
}
func (r *imageStore) GetBigDataSize(id, key string) (int64, error) {
if img, ok := r.byname[id]; ok {
id = img.ID
}
if _, ok := r.byid[id]; !ok {
image, ok := r.lookup(id)
if !ok {
return -1, ErrImageUnknown
}
if size, ok := r.byid[id].BigDataSizes[key]; ok {
if size, ok := image.BigDataSizes[key]; ok {
return size, nil
}
return -1, ErrSizeUnknown
}
func (r *imageStore) GetBigDataNames(id string) ([]string, error) {
if img, ok := r.byname[id]; ok {
id = img.ID
}
if _, ok := r.byid[id]; !ok {
image, ok := r.lookup(id)
if !ok {
return nil, ErrImageUnknown
}
return r.byid[id].BigDataNames, nil
return image.BigDataNames, nil
}
func (r *imageStore) SetBigData(id, key string, data []byte) error {
if img, ok := r.byname[id]; ok {
id = img.ID
}
if _, ok := r.byid[id]; !ok {
image, ok := r.lookup(id)
if !ok {
return ErrImageUnknown
}
if err := os.MkdirAll(r.datadir(id), 0700); err != nil {
if err := os.MkdirAll(r.datadir(image.ID), 0700); err != nil {
return err
}
err := ioutils.AtomicWriteFile(r.datapath(id, key), data, 0600)
err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
if err == nil {
add := true
save := false
oldSize, ok := r.byid[id].BigDataSizes[key]
r.byid[id].BigDataSizes[key] = int64(len(data))
if !ok || oldSize != r.byid[id].BigDataSizes[key] {
oldSize, ok := image.BigDataSizes[key]
image.BigDataSizes[key] = int64(len(data))
if !ok || oldSize != image.BigDataSizes[key] {
save = true
}
for _, name := range r.byid[id].BigDataNames {
for _, name := range image.BigDataNames {
if name == key {
add = false
break
}
}
if add {
r.byid[id].BigDataNames = append(r.byid[id].BigDataNames, key)
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
if save {

View file

@ -15,6 +15,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
@ -159,6 +160,7 @@ type layerStore struct {
driver drivers.Driver
layerdir string
layers []Layer
idindex *truncindex.TruncIndex
byid map[string]*Layer
byname map[string]*Layer
byparent map[string][]*Layer
@ -185,6 +187,7 @@ func (r *layerStore) Load() error {
return err
}
layers := []Layer{}
idlist := []string{}
ids := make(map[string]*Layer)
names := make(map[string]*Layer)
mounts := make(map[string]*Layer)
@ -192,6 +195,7 @@ func (r *layerStore) Load() error {
if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
for n, layer := range layers {
ids[layer.ID] = &layers[n]
idlist = append(idlist, layer.ID)
for _, name := range layer.Names {
if conflict, ok := names[name]; ok {
r.removeName(conflict, name)
@ -224,6 +228,7 @@ func (r *layerStore) Load() error {
}
}
r.layers = layers
r.idindex = truncindex.NewTruncIndex(idlist)
r.byid = ids
r.byname = names
r.byparent = parents
@ -312,26 +317,32 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (Layer
return &rlstore, nil
}
func (r *layerStore) ClearFlag(id string, flag string) error {
if layer, ok := r.byname[id]; ok {
id = layer.ID
func (r *layerStore) lookup(id string) (*Layer, bool) {
if layer, ok := r.byid[id]; ok {
return layer, ok
} else if layer, ok := r.byname[id]; ok {
return layer, ok
} else if longid, err := r.idindex.Get(id); err == nil {
layer, ok := r.byid[longid]
return layer, ok
}
if _, ok := r.byid[id]; !ok {
return nil, false
}
func (r *layerStore) ClearFlag(id string, flag string) error {
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
}
layer := r.byid[id]
delete(layer.Flags, flag)
return r.Save()
}
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
if layer, ok := r.byname[id]; ok {
id = layer.ID
}
if _, ok := r.byid[id]; !ok {
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
}
layer := r.byid[id]
layer.Flags[flag] = value
return r.Save()
}
@ -348,8 +359,10 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o
if err := os.MkdirAll(r.layerdir, 0700); err != nil {
return nil, -1, err
}
if parentLayer, ok := r.byname[parent]; ok {
parent = parentLayer.ID
if parent != "" {
if parentLayer, ok := r.lookup(parent); ok {
parent = parentLayer.ID
}
}
if id == "" {
id = stringid.GenerateRandomID()
@ -382,6 +395,7 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o
}
r.layers = append(r.layers, newLayer)
layer = &r.layers[len(r.layers)-1]
r.idindex.Add(id)
r.byid[id] = layer
for _, name := range names {
r.byname[name] = layer
@ -436,48 +450,39 @@ func (r *layerStore) Create(id, parent string, names []string, mountLabel string
}
func (r *layerStore) Mount(id, mountLabel string) (string, error) {
if layer, ok := r.byname[id]; ok {
id = layer.ID
}
if _, ok := r.byid[id]; !ok {
layer, ok := r.lookup(id)
if !ok {
return "", ErrLayerUnknown
}
layer := r.byid[id]
if layer.MountCount > 0 {
layer.MountCount++
return layer.MountPoint, r.Save()
}
if mountLabel == "" {
if layer, ok := r.byid[id]; ok {
mountLabel = layer.MountLabel
}
mountLabel = layer.MountLabel
}
mountpoint, err := r.driver.Get(id, mountLabel)
if mountpoint != "" && err == nil {
if layer, ok := r.byid[id]; ok {
if layer.MountPoint != "" {
delete(r.bymount, layer.MountPoint)
}
layer.MountPoint = filepath.Clean(mountpoint)
layer.MountCount++
r.bymount[layer.MountPoint] = layer
err = r.Save()
if layer.MountPoint != "" {
delete(r.bymount, layer.MountPoint)
}
layer.MountPoint = filepath.Clean(mountpoint)
layer.MountCount++
r.bymount[layer.MountPoint] = layer
err = r.Save()
}
return mountpoint, err
}
func (r *layerStore) Unmount(id string) error {
if layer, ok := r.bymount[filepath.Clean(id)]; ok {
id = layer.ID
layer, ok := r.lookup(id)
if !ok {
layerByMount, ok := r.bymount[filepath.Clean(id)]
if !ok {
return ErrLayerUnknown
}
layer = layerByMount
}
if layer, ok := r.byname[id]; ok {
id = layer.ID
}
if _, ok := r.byid[id]; !ok {
return ErrLayerUnknown
}
layer := r.byid[id]
if layer.MountCount > 1 {
layer.MountCount--
return r.Save()
@ -495,20 +500,11 @@ func (r *layerStore) Unmount(id string) error {
}
func (r *layerStore) removeName(layer *Layer, name string) {
newNames := []string{}
for _, oldName := range layer.Names {
if oldName != name {
newNames = append(newNames, oldName)
}
}
layer.Names = newNames
layer.Names = stringSliceWithoutValue(layer.Names, name)
}
func (r *layerStore) SetNames(id string, names []string) error {
if layer, ok := r.byname[id]; ok {
id = layer.ID
}
if layer, ok := r.byid[id]; ok {
if layer, ok := r.lookup(id); ok {
for _, name := range layer.Names {
delete(r.byname, name)
}
@ -525,20 +521,14 @@ func (r *layerStore) SetNames(id string, names []string) error {
}
func (r *layerStore) GetMetadata(id string) (string, error) {
if layer, ok := r.byname[id]; ok {
id = layer.ID
}
if layer, ok := r.byid[id]; ok {
if layer, ok := r.lookup(id); ok {
return layer.Metadata, nil
}
return "", ErrLayerUnknown
}
func (r *layerStore) SetMetadata(id, metadata string) error {
if layer, ok := r.byname[id]; ok {
id = layer.ID
}
if layer, ok := r.byid[id]; ok {
if layer, ok := r.lookup(id); ok {
layer.Metadata = metadata
return r.Save()
}
@ -550,13 +540,12 @@ func (r *layerStore) tspath(id string) string {
}
func (r *layerStore) Delete(id string) error {
if layer, ok := r.byname[id]; ok {
id = layer.ID
}
if _, ok := r.byid[id]; !ok {
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
}
for r.byid[id].MountCount > 0 {
id = layer.ID
for layer.MountCount > 0 {
if err := r.Unmount(id); err != nil {
return err
}
@ -564,66 +553,55 @@ func (r *layerStore) Delete(id string) error {
err := r.driver.Remove(id)
if err == nil {
os.Remove(r.tspath(id))
if layer, ok := r.byid[id]; ok {
pslice := r.byparent[layer.Parent]
newPslice := []*Layer{}
for _, candidate := range pslice {
if candidate.ID != id {
newPslice = append(newPslice, candidate)
}
pslice := r.byparent[layer.Parent]
newPslice := []*Layer{}
for _, candidate := range pslice {
if candidate.ID != id {
newPslice = append(newPslice, candidate)
}
delete(r.byid, layer.ID)
if len(newPslice) > 0 {
r.byparent[layer.Parent] = newPslice
} else {
delete(r.byparent, layer.Parent)
}
for _, name := range layer.Names {
delete(r.byname, name)
}
if layer.MountPoint != "" {
delete(r.bymount, layer.MountPoint)
}
newLayers := []Layer{}
for _, candidate := range r.layers {
if candidate.ID != id {
newLayers = append(newLayers, candidate)
}
}
r.layers = newLayers
if err = r.Save(); err != nil {
return err
}
delete(r.byid, id)
r.idindex.Delete(id)
if len(newPslice) > 0 {
r.byparent[layer.Parent] = newPslice
} else {
delete(r.byparent, layer.Parent)
}
for _, name := range layer.Names {
delete(r.byname, name)
}
if layer.MountPoint != "" {
delete(r.bymount, layer.MountPoint)
}
newLayers := []Layer{}
for _, candidate := range r.layers {
if candidate.ID != id {
newLayers = append(newLayers, candidate)
}
}
r.layers = newLayers
if err = r.Save(); err != nil {
return err
}
}
return err
}
func (r *layerStore) Lookup(name string) (id string, err error) {
layer, ok := r.byname[name]
if !ok {
layer, ok = r.byid[name]
if !ok {
return "", ErrLayerUnknown
}
if layer, ok := r.lookup(name); ok {
return layer.ID, nil
}
return layer.ID, nil
return "", ErrLayerUnknown
}
func (r *layerStore) Exists(id string) bool {
if layer, ok := r.byname[id]; ok {
id = layer.ID
}
l, exists := r.byid[id]
return l != nil && exists
_, ok := r.lookup(id)
return ok
}
func (r *layerStore) Get(id string) (*Layer, error) {
if l, ok := r.byname[id]; ok {
return l, nil
}
if l, ok := r.byid[id]; ok {
return l, nil
if layer, ok := r.lookup(id); ok {
return layer, nil
}
return nil, ErrLayerUnknown
}
@ -641,22 +619,32 @@ func (r *layerStore) Wipe() error {
return nil
}
func (r *layerStore) Changes(from, to string) ([]archive.Change, error) {
if layer, ok := r.byname[from]; ok {
from = layer.ID
}
if layer, ok := r.byname[to]; ok {
to = layer.ID
func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, fromLayer *Layer, toLayer *Layer, err error) {
var ok bool
toLayer, ok = r.lookup(to)
if !ok {
return "", "", nil, nil, ErrLayerUnknown
}
to = toLayer.ID
if from == "" {
if layer, ok := r.byid[to]; ok {
from = layer.Parent
from = toLayer.Parent
}
if from != "" {
fromLayer, ok = r.lookup(from)
if !ok {
fromLayer, ok = r.lookup(toLayer.Parent)
if !ok {
return "", "", nil, nil, ErrParentUnknown
}
}
from = fromLayer.ID
}
if to == "" {
return nil, ErrLayerUnknown
}
if _, ok := r.byid[to]; !ok {
return from, to, fromLayer, toLayer, nil
}
func (r *layerStore) Changes(from, to string) ([]archive.Change, error) {
from, to, _, _, err := r.findParentAndLayer(from, to)
if err != nil {
return nil, ErrLayerUnknown
}
return r.driver.Changes(to, from)
@ -694,32 +682,19 @@ func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
func (r *layerStore) Diff(from, to string) (io.ReadCloser, error) {
var metadata storage.Unpacker
if layer, ok := r.byname[from]; ok {
from = layer.ID
}
if layer, ok := r.byname[to]; ok {
to = layer.ID
}
if from == "" {
if layer, ok := r.byid[to]; ok {
from = layer.Parent
}
}
if to == "" {
return nil, ErrParentUnknown
}
if _, ok := r.byid[to]; !ok {
from, to, _, toLayer, err := r.findParentAndLayer(from, to)
if err != nil {
return nil, ErrLayerUnknown
}
compression := archive.Uncompressed
if cflag, ok := r.byid[to].Flags[compressionFlag]; ok {
if cflag, ok := toLayer.Flags[compressionFlag]; ok {
if ctype, ok := cflag.(float64); ok {
compression = archive.Compression(ctype)
} else if ctype, ok := cflag.(archive.Compression); ok {
compression = archive.Compression(ctype)
}
}
if from != r.byid[to].Parent {
if from != toLayer.Parent {
diff, err := r.driver.Diff(to, from)
if err == nil && (compression != archive.Uncompressed) {
preader, pwriter := io.Pipe()
@ -797,31 +772,15 @@ func (r *layerStore) Diff(from, to string) (io.ReadCloser, error) {
}
func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
if layer, ok := r.byname[from]; ok {
from = layer.ID
}
if layer, ok := r.byname[to]; ok {
to = layer.ID
}
if from == "" {
if layer, ok := r.byid[to]; ok {
from = layer.Parent
}
}
if to == "" {
return -1, ErrParentUnknown
}
if _, ok := r.byid[to]; !ok {
from, to, _, _, err = r.findParentAndLayer(from, to)
if err != nil {
return -1, ErrLayerUnknown
}
return r.driver.DiffSize(to, from)
}
func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err error) {
if layer, ok := r.byname[to]; ok {
to = layer.ID
}
layer, ok := r.byid[to]
layer, ok := r.lookup(to)
if !ok {
return -1, ErrLayerUnknown
}

View file

@ -4,9 +4,10 @@ import (
"os"
"path/filepath"
"sync"
"syscall"
"time"
"golang.org/x/sys/unix"
"github.com/containers/storage/pkg/stringid"
)
@ -51,7 +52,7 @@ func GetLockfile(path string) (Locker, error) {
if locker, ok := lockfiles[filepath.Clean(path)]; ok {
return locker, nil
}
fd, err := syscall.Open(filepath.Clean(path), os.O_RDWR|os.O_CREATE, syscall.S_IRUSR|syscall.S_IWUSR)
fd, err := unix.Open(filepath.Clean(path), os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
if err != nil {
return nil, err
}
@ -61,28 +62,28 @@ func GetLockfile(path string) (Locker, error) {
}
func (l *lockfile) Lock() {
lk := syscall.Flock_t{
Type: syscall.F_WRLCK,
lk := unix.Flock_t{
Type: unix.F_WRLCK,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
Pid: int32(os.Getpid()),
}
l.mu.Lock()
for syscall.FcntlFlock(l.fd, syscall.F_SETLKW, &lk) != nil {
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
func (l *lockfile) Unlock() {
lk := syscall.Flock_t{
Type: syscall.F_UNLCK,
lk := unix.Flock_t{
Type: unix.F_UNLCK,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
Pid: int32(os.Getpid()),
}
for syscall.FcntlFlock(l.fd, syscall.F_SETLKW, &lk) != nil {
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
l.mu.Unlock()
@ -91,18 +92,18 @@ func (l *lockfile) Unlock() {
func (l *lockfile) Touch() error {
l.lw = stringid.GenerateRandomID()
id := []byte(l.lw)
_, err := syscall.Seek(int(l.fd), 0, os.SEEK_SET)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return err
}
n, err := syscall.Write(int(l.fd), id)
n, err := unix.Write(int(l.fd), id)
if err != nil {
return err
}
if n != len(id) {
return syscall.ENOSPC
return unix.ENOSPC
}
err = syscall.Fsync(int(l.fd))
err = unix.Fsync(int(l.fd))
if err != nil {
return err
}
@ -111,16 +112,16 @@ func (l *lockfile) Touch() error {
func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw)
_, err := syscall.Seek(int(l.fd), 0, os.SEEK_SET)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return true, err
}
n, err := syscall.Read(int(l.fd), id)
n, err := unix.Read(int(l.fd), id)
if err != nil {
return true, err
}
if n != len(id) {
return true, syscall.ENOSPC
return true, unix.ENOSPC
}
lw := l.lw
l.lw = string(id)
@ -128,11 +129,11 @@ func (l *lockfile) Modified() (bool, error) {
}
func (l *lockfile) TouchedSince(when time.Time) bool {
st := syscall.Stat_t{}
err := syscall.Fstat(int(l.fd), &st)
st := unix.Stat_t{}
err := unix.Fstat(int(l.fd), &st)
if err != nil {
return true
}
touched := time.Unix(st.Mtim.Unix())
touched := time.Unix(statTMtimeUnix(st))
return when.Before(touched)
}

View file

@ -0,0 +1,11 @@
// +build linux solaris
package storage
import (
"golang.org/x/sys/unix"
)
func statTMtimeUnix(st unix.Stat_t) (int64, int64) {
return st.Mtim.Unix()
}

View file

@ -0,0 +1,11 @@
// +build !linux,!solaris
package storage
import (
"golang.org/x/sys/unix"
)
func statTMtimeUnix(st unix.Stat_t) (int64, int64) {
return st.Mtimespec.Unix()
}

View file

@ -2176,6 +2176,17 @@ func makeBigDataBaseName(key string) string {
return key
}
func stringSliceWithoutValue(slice []string, value string) []string {
modified := []string{}
for _, v := range slice {
if v == value {
continue
}
modified = append(modified, v)
}
return modified
}
func init() {
DefaultStoreOptions.RunRoot = "/var/run/containers/storage"
DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage"

View file

@ -10,6 +10,7 @@ github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
github.com/tchap/go-patricia v2.2.6
github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d