diff --git a/Dockerfile b/Dockerfile index 86ea8d7b..6f170b26 100644 --- a/Dockerfile +++ b/Dockerfile @@ -97,7 +97,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install crictl -ENV CRICTL_COMMIT 9ff5e8f78a4182ab8d5ba9bcccdda5f338600eab +ENV CRICTL_COMMIT b42fc3f364dd48f649d55926c34492beeb9b2e99 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/kubernetes-incubator/cri-tools.git "$GOPATH/src/github.com/kubernetes-incubator/cri-tools" \ diff --git a/Makefile b/Makefile index 5c669831..c458fd37 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,7 @@ MANDIR ?= ${PREFIX}/share/man ETCDIR ?= ${DESTDIR}/etc ETCDIR_CRIO ?= ${ETCDIR}/crio BUILDTAGS ?= seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_tag.sh) $(shell hack/btrfs_installed_tag.sh) $(shell hack/ostree_tag.sh) $(shell hack/selinux_tag.sh) +CRICTL_CONFIG_DIR=${DESTDIR}/etc BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions OCIUMOUNTINSTALLDIR=$(PREFIX)/share/oci-umount/oci-umount.d @@ -152,6 +153,7 @@ install.config: install ${SELINUXOPT} -D -m 644 crio.conf $(ETCDIR_CRIO)/crio.conf install ${SELINUXOPT} -D -m 644 seccomp.json $(ETCDIR_CRIO)/seccomp.json install ${SELINUXOPT} -D -m 644 crio-umount.conf $(OCIUMOUNTINSTALLDIR)/crio-umount.conf + install ${SELINUXOPT} -D -m 644 crictl.yaml $(CRICTL_CONFIG_DIR) install.completions: install ${SELINUXOPT} -d -m 755 ${BASHINSTALLDIR} diff --git a/cmd/crioctl/container.go b/cmd/crioctl/container.go index 126e0bc6..0ce783af 100644 --- a/cmd/crioctl/container.go +++ b/cmd/crioctl/container.go @@ -471,7 +471,10 @@ func ContainerStatus(client pb.RuntimeServiceClient, ID string) error { if r.Status.Image != nil { fmt.Printf("Image: %v\n", r.Status.Image.Image) } - fmt.Printf("ImageRef: %v\n", r.Status.ImageRef) + // + // TODO: https://github.com/kubernetes-incubator/cri-o/issues/531 + // + //fmt.Printf("ImageRef: %v\n", r.Status.ImageRef) return nil } diff --git a/contrib/test/integration/build/cri-tools.yml b/contrib/test/integration/build/cri-tools.yml index 9a117f3c..5d748112 100644 --- a/contrib/test/integration/build/cri-tools.yml +++ b/contrib/test/integration/build/cri-tools.yml @@ -4,7 +4,7 @@ git: repo: "https://github.com/kubernetes-incubator/cri-tools.git" dest: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-tools" - version: "9ff5e8f78a4182ab8d5ba9bcccdda5f338600eab" + version: "b42fc3f364dd48f649d55926c34492beeb9b2e99" - name: install crictl command: "/usr/bin/go install github.com/kubernetes-incubator/cri-tools/cmd/crictl" diff --git a/crictl.yaml b/crictl.yaml new file mode 100644 index 00000000..841cbe47 --- /dev/null +++ b/crictl.yaml @@ -0,0 +1 @@ +runtime-endpoint: /var/run/crio.sock diff --git a/pkg/storage/image.go b/pkg/storage/image.go index 011e5a5d..fbe36f28 100644 --- a/pkg/storage/image.go +++ b/pkg/storage/image.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" "net" - "path" + "path/filepath" "regexp" "strings" @@ -23,11 +23,9 @@ import ( // ImageResult wraps a subset of information about an image: its ID, its names, // and the size, if known, or nil if it isn't. type ImageResult struct { - ID string - Names []string - Digests []string - Size *uint64 - ImageRef string + ID string + Names []string + Size *uint64 // TODO(runcom): this is an hack for https://github.com/kubernetes-incubator/cri-o/pull/1136 // drop this when we have proper image IDs (as in, image IDs should be just // the config blog digest which is stable across same images). @@ -59,9 +57,6 @@ type ImageServer interface { PrepareImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.Image, error) // PullImage imports an image from the specified location. PullImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.ImageReference, error) - // UntagImage removes a name from the specified image, and if it was - // the only name the image had, removes the image. - UntagImage(systemContext *types.SystemContext, imageName string) error // RemoveImage deletes the specified image. RemoveImage(systemContext *types.SystemContext, imageName string) error // GetStore returns the reference to the storage library Store which @@ -162,22 +157,12 @@ func (svc *imageService) ImageStatus(systemContext *types.SystemContext, nameOrI defer img.Close() size := imageSize(img) - result := ImageResult{ + return &ImageResult{ ID: image.ID, Names: image.Names, Size: size, ConfigDigest: img.ConfigInfo().Digest, - } - if len(image.Names) > 0 { - result.ImageRef = image.Names[0] - if ref2, err2 := istorage.Transport.ParseStoreReference(svc.store, image.Names[0]); err2 == nil { - if dref := ref2.DockerReference(); dref != nil { - result.ImageRef = reference.FamiliarString(dref) - } - } - } - - return &result, nil + }, nil } func imageSize(img types.Image) *uint64 { @@ -287,57 +272,6 @@ func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName return destRef, nil } -func (svc *imageService) UntagImage(systemContext *types.SystemContext, nameOrID string) error { - ref, err := alltransports.ParseImageName(nameOrID) - if err != nil { - ref2, err2 := istorage.Transport.ParseStoreReference(svc.store, "@"+nameOrID) - if err2 != nil { - ref3, err3 := istorage.Transport.ParseStoreReference(svc.store, nameOrID) - if err3 != nil { - return err - } - ref2 = ref3 - } - ref = ref2 - } - - img, err := istorage.Transport.GetStoreImage(svc.store, ref) - if err != nil { - return err - } - - if nameOrID != img.ID { - namedRef, err := svc.prepareReference(nameOrID, ©.Options{}) - if err != nil { - return err - } - - name := nameOrID - if namedRef.DockerReference() != nil { - name = namedRef.DockerReference().Name() - if tagged, ok := namedRef.DockerReference().(reference.NamedTagged); ok { - name = name + ":" + tagged.Tag() - } - if canonical, ok := namedRef.DockerReference().(reference.Canonical); ok { - name = name + "@" + canonical.Digest().String() - } - } - - prunedNames := make([]string, 0, len(img.Names)) - for _, imgName := range img.Names { - if imgName != name && imgName != nameOrID { - prunedNames = append(prunedNames, imgName) - } - } - - if len(prunedNames) > 0 { - return svc.store.SetNames(img.ID, prunedNames) - } - } - - return ref.DeleteImage(systemContext) -} - func (svc *imageService) RemoveImage(systemContext *types.SystemContext, nameOrID string) error { ref, err := alltransports.ParseImageName(nameOrID) if err != nil { @@ -515,7 +449,7 @@ func (svc *imageService) ResolveNames(imageName string) ([]string, error) { _, rest := splitDomain(r.Name()) images := []string{} for _, r := range svc.registries { - images = append(images, path.Join(r, rest)) + images = append(images, filepath.Join(r, rest)) } return images, nil } diff --git a/server/container_create.go b/server/container_create.go index 9b052639..e69adef5 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -720,6 +720,10 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, labels := containerConfig.GetLabels() + if err := validateLabels(labels); err != nil { + return nil, err + } + metadata := containerConfig.GetMetadata() kubeAnnotations := containerConfig.GetAnnotations() diff --git a/server/container_list.go b/server/container_list.go index 8e5f0210..d32eea2d 100644 --- a/server/container_list.go +++ b/server/container_list.go @@ -38,41 +38,44 @@ func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersReque logrus.Debugf("ListContainersRequest %+v", req) var ctrs []*pb.Container - filter := req.Filter + filter := req.GetFilter() ctrList, err := s.ContainerServer.ListContainers() if err != nil { return nil, err } - // Filter using container id and pod id first. - if filter.Id != "" { - id, err := s.CtrIDIndex().Get(filter.Id) - if err != nil { - // If we don't find a container ID with a filter, it should not - // be considered an error. Log a warning and return an empty struct - logrus.Warn("unable to find container ID %s", filter.Id) - return &pb.ListContainersResponse{}, nil - } - c := s.ContainerServer.GetContainer(id) - if c != nil { - if filter.PodSandboxId != "" { - if c.Sandbox() == filter.PodSandboxId { - ctrList = []*oci.Container{c} - } else { - ctrList = []*oci.Container{} - } + if filter != nil { - } else { - ctrList = []*oci.Container{c} + // Filter using container id and pod id first. + if filter.Id != "" { + id, err := s.CtrIDIndex().Get(filter.Id) + if err != nil { + // If we don't find a container ID with a filter, it should not + // be considered an error. Log a warning and return an empty struct + logrus.Warn("unable to find container ID %s", filter.Id) + return &pb.ListContainersResponse{}, nil } - } - } else { - if filter.PodSandboxId != "" { - pod := s.ContainerServer.GetSandbox(filter.PodSandboxId) - if pod == nil { - ctrList = []*oci.Container{} - } else { - ctrList = pod.Containers().List() + c := s.ContainerServer.GetContainer(id) + if c != nil { + if filter.PodSandboxId != "" { + if c.Sandbox() == filter.PodSandboxId { + ctrList = []*oci.Container{c} + } else { + ctrList = []*oci.Container{} + } + + } else { + ctrList = []*oci.Container{c} + } + } + } else { + if filter.PodSandboxId != "" { + pod := s.ContainerServer.GetSandbox(filter.PodSandboxId) + if pod == nil { + ctrList = []*oci.Container{} + } else { + ctrList = pod.Containers().List() + } } } } diff --git a/server/container_status.go b/server/container_status.go index 4b524a55..f81be56f 100644 --- a/server/container_status.go +++ b/server/container_status.go @@ -53,14 +53,6 @@ func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusReq cState := s.Runtime().ContainerStatus(c) rStatus := pb.ContainerState_CONTAINER_UNKNOWN - imageName := c.Image() - status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), imageName) - if err != nil { - return nil, err - } - - resp.Status.ImageRef = status.ImageRef - // If we defaulted to exit code -1 earlier then we attempt to // get the exit code from the exit file again. if cState.ExitCode == -1 { diff --git a/server/image_remove.go b/server/image_remove.go index 26c0e0e0..2b2b3687 100644 --- a/server/image_remove.go +++ b/server/image_remove.go @@ -41,7 +41,7 @@ func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (r } } for _, img := range images { - err = s.StorageImageServer().UntagImage(s.ImageContext(), img) + err = s.StorageImageServer().RemoveImage(s.ImageContext(), img) if err != nil { logrus.Debugf("error deleting image %s: %v", img, err) continue diff --git a/server/image_status.go b/server/image_status.go index 19e95178..df06a3ce 100644 --- a/server/image_status.go +++ b/server/image_status.go @@ -49,10 +49,10 @@ func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (r } resp = &pb.ImageStatusResponse{ Image: &pb.Image{ - Id: status.ID, - RepoTags: status.Names, - RepoDigests: status.Digests, - Size_: *status.Size, + Id: status.ID, + RepoTags: status.Names, + Size_: *status.Size, + // TODO: https://github.com/kubernetes-incubator/cri-o/issues/531 }, } logrus.Debugf("ImageStatusResponse: %+v", resp) diff --git a/server/sandbox_run.go b/server/sandbox_run.go index e31a3d3a..4f9ced22 100644 --- a/server/sandbox_run.go +++ b/server/sandbox_run.go @@ -224,6 +224,10 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest // add labels labels := req.GetConfig().GetLabels() + if err := validateLabels(labels); err != nil { + return nil, err + } + // Add special container name label for the infra container labelsJSON := []byte{} if labels != nil { diff --git a/server/utils.go b/server/utils.go index 1e98aef2..2a15ab42 100644 --- a/server/utils.go +++ b/server/utils.go @@ -18,6 +18,8 @@ const ( // According to http://man7.org/linux/man-pages/man5/resolv.conf.5.html: // "The search list is currently limited to six domains with a total of 256 characters." maxDNSSearches = 6 + + maxLabelSize = 4096 ) func copyFile(src, dest string) error { @@ -196,3 +198,15 @@ func recordError(operation string, err error) { metrics.CRIOOperationsErrors.WithLabelValues(operation).Inc() } } + +func validateLabels(labels map[string]string) error { + for k, v := range labels { + if (len(k) + len(v)) > maxLabelSize { + if len(k) > 10 { + k = k[:10] + } + return fmt.Errorf("label key and value greater than maximum size (%d bytes), key: %s", maxLabelSize, k) + } + } + return nil +} diff --git a/test/helpers.bash b/test/helpers.bash index 35607bb3..21488e1f 100644 --- a/test/helpers.bash +++ b/test/helpers.bash @@ -101,7 +101,7 @@ cp "$CONMON_BINARY" "$TESTDIR/conmon" PATH=$PATH:$TESTDIR -# Make sure we have a copy of the redis:alpine image. +# Make sure we have a copy of the redis:latest image. if ! [ -d "$ARTIFACTS_PATH"/redis-image ]; then mkdir -p "$ARTIFACTS_PATH"/redis-image if ! "$COPYIMG_BINARY" --import-from=docker://redis:alpine --export-to=dir:"$ARTIFACTS_PATH"/redis-image --signature-policy="$INTEGRATION_ROOT"/policy.json ; then @@ -111,10 +111,10 @@ if ! [ -d "$ARTIFACTS_PATH"/redis-image ]; then fi fi -# TODO: remove the code below for pulling redis:alpine using a canonical reference once -# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete and we can -# pull the image using a tagged reference and then subsequently find the image without -# having to explicitly record the canonical reference as one of the image's names +# TODO: remove the code below for redis digested image id when +# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete +# as the digested reference will be auto-stored when pulling the tag +# above if ! [ -d "$ARTIFACTS_PATH"/redis-image-digest ]; then mkdir -p "$ARTIFACTS_PATH"/redis-image-digest if ! "$COPYIMG_BINARY" --import-from=docker://redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --export-to=dir:"$ARTIFACTS_PATH"/redis-image-digest --signature-policy="$INTEGRATION_ROOT"/policy.json ; then @@ -207,9 +207,9 @@ function retry() { false } -# Waits until the given crio becomes reachable. +# Waits until crio becomes reachable. function wait_until_reachable() { - retry 15 1 crictl status + retry 15 1 crictl info } # Start crio. @@ -230,16 +230,16 @@ function start_crio() { if ! [ "$3" = "--no-pause-image" ] ; then "$BIN2IMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --source-binary "$PAUSE_BINARY" fi - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --signature-policy="$INTEGRATION_ROOT"/policy.json -# TODO: remove the code below for copying redis:alpine in using a canonical reference once -# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete and we can -# copy the image using a tagged reference and then subsequently find the image without -# having to explicitly record the canonical reference as one of the image's names - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --import-from=dir:"$ARTIFACTS_PATH"/redis-image-digest --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/oom --import-from=dir:"$ARTIFACTS_PATH"/oom-image --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/mrunalp/image-volume-test --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --add-name=docker.io/library/redis:alpine --signature-policy="$INTEGRATION_ROOT"/policy.json +# TODO: remove the code below for redis:alpine digested image id when +# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete +# as the digested reference will be auto-stored when pulling the tag +# above + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --import-from=dir:"$ARTIFACTS_PATH"/redis-image-digest --add-name=docker.io/library/redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/oom --import-from=dir:"$ARTIFACTS_PATH"/oom-image --add-name=docker.io/library/mrunalp/oom --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/image-volume-test --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --add-name=docker.io/library/mrunalp/image-volume-test --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --add-name=docker.io/library/busybox:latest --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --add-name=docker.io/runcom/stderr-test:latest --signature-policy="$INTEGRATION_ROOT"/policy.json "$CRIO_BINARY" ${DEFAULT_MOUNTS_OPTS} ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTIONS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --cni-plugin-dir "$CRIO_CNI_PLUGIN" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --log-size-max "$LOG_SIZE_MAX_LIMIT" --config /dev/null config >$CRIO_CONFIG # Prepare the CNI configuration files, we're running with non host networking by default @@ -258,17 +258,29 @@ function start_crio() { crictl pull redis:alpine fi REDIS_IMAGEID=$(crictl inspecti redis:alpine | head -1 | sed -e "s/ID: //g") - run crictl inspecti redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b - if [ "$status" -ne 0 ] ; then - crictl pull redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b - fi - REDIS_IMAGEID_DIGESTED=$(crictl inspecti redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b | head -1 | sed -e "s/ID: //g") run crictl inspecti mrunalp/oom if [ "$status" -ne 0 ] ; then crictl pull mrunalp/oom fi - OOM_IMAGEID=$(crictl inspecti mrunalp/oom | head -1 | sed -e "s/ID: //g") - run crioctl image status --id=runcom/stderr-test + # + # + # + # TODO: remove the code below for redis digested image id when + # https://github.com/kubernetes-incubator/cri-o/issues/531 is complete + # as the digested reference will be auto-stored when pulling the tag + # above + # + # + # + REDIS_IMAGEID_DIGESTED="redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b" + run crictl inspecti $REDIS_IMAGEID_DIGESTED + if [ "$status" -ne 0 ]; then + crictl pull $REDIS_IMAGEID_DIGESTED + fi + # + # + # + run crictl inspecti runcom/stderr-test if [ "$status" -ne 0 ] ; then crictl pull runcom/stderr-test:latest fi diff --git a/test/image.bats b/test/image.bats index 5458fe13..e62674a7 100644 --- a/test/image.bats +++ b/test/image.bats @@ -50,7 +50,9 @@ function teardown() { stop_crio } -@test "container status return image@digest if created by image ID" { +@test "container status return image@digest if created by image ID and digest available" { + skip "depends on https://github.com/kubernetes-incubator/cri-o/issues/531" + start_crio run crioctl pod run --config "$TESTDATA"/sandbox_config.json @@ -75,27 +77,11 @@ function teardown() { stop_crio } -@test "image pull and list" { +@test "image pull" { start_crio "" "" --no-pause-image run crioctl image pull "$IMAGE" echo "$output" [ "$status" -eq 0 ] - - run crioctl image list --quiet "$IMAGE" - [ "$status" -eq 0 ] - echo "$output" - [ "$output" != "" ] - imageid="$output" - - run crioctl image list --quiet @"$imageid" - [ "$status" -eq 0 ] - echo "$output" - [ "$output" != "" ] - - run crioctl image list --quiet "$imageid" - [ "$status" -eq 0 ] - echo "$output" - [ "$output" != "" ] cleanup_images stop_crio } @@ -118,32 +104,7 @@ function teardown() { stop_crio } -@test "image pull and list by tag and ID" { - start_crio "" "" --no-pause-image - run crioctl image pull "$IMAGE:go" - echo "$output" - [ "$status" -eq 0 ] - - run crioctl image list --quiet "$IMAGE:go" - [ "$status" -eq 0 ] - echo "$output" - [ "$output" != "" ] - imageid="$output" - - run crioctl image list --quiet @"$imageid" - [ "$status" -eq 0 ] - echo "$output" - [ "$output" != "" ] - - run crioctl image list --quiet "$imageid" - [ "$status" -eq 0 ] - echo "$output" - [ "$output" != "" ] - cleanup_images - stop_crio -} - -@test "image pull and list by digest and ID" { +@test "image pull and list by digest" { start_crio "" "" --no-pause-image run crioctl image pull nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc echo "$output" @@ -153,14 +114,18 @@ function teardown() { [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] - imageid="$output" - run crioctl image list --quiet @"$imageid" + run crioctl image list --quiet nginx@33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] - run crioctl image list --quiet "$imageid" + run crioctl image list --quiet @33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] + + run crioctl image list --quiet 33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] diff --git a/test/image_remove.bats b/test/image_remove.bats deleted file mode 100644 index ca2017d0..00000000 --- a/test/image_remove.bats +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -IMAGE=docker.io/kubernetes/pause - -function teardown() { - cleanup_test -} - -@test "image remove with multiple names, by name" { - start_crio "" "" --no-pause-image - # Pull the image, giving it one name. - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - # Add a second name to the image. - run "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name="$IMAGE":latest --add-name="$IMAGE":othertag --signature-policy="$INTEGRATION_ROOT"/policy.json - echo "$output" - [ "$status" -eq 0 ] - # Get the list of image names and IDs. - run crioctl image list - echo "$output" - [ "$status" -eq 0 ] - [ "$output" != "" ] - # Cycle through each name, removing it by name. The image that we assigned a second - # name to should still be around when we get to removing its second name. - grep ^Tag: <<< "$output" | while read -r header tag ; do - run crioctl image remove --id "$tag" - echo "$output" - [ "$status" -eq 0 ] - done - # List all images and their names. There should be none now. - run crioctl image list --quiet - echo "$output" - [ "$status" -eq 0 ] - [ "$output" = "" ] - printf '%s\n' "$output" | while IFS= read -r id; do - echo "$id" - done - # All done. - cleanup_images - stop_crio -} - -@test "image remove with multiple names, by ID" { - start_crio "" "" --no-pause-image - # Pull the image, giving it one name. - run crioctl image pull "$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - # Add a second name to the image. - run "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name="$IMAGE":latest --add-name="$IMAGE":othertag --signature-policy="$INTEGRATION_ROOT"/policy.json - echo "$output" - [ "$status" -eq 0 ] - # Get the image ID of the image we just saved. - run crioctl image status --id="$IMAGE" - echo "$output" - [ "$status" -eq 0 ] - [ "$output" != "" ] - # Try to remove the image using its ID. That should succeed because removing by ID always works. - grep ^ID: <<< "$output" | while read -r header id ; do - run crioctl image remove --id "$id" - echo "$output" - [ "$status" -eq 0 ] - done - # The image should be gone. - run crioctl image status --id="$IMAGE" - echo "$output" - [ "$status" -ne 0 ] - # All done. - cleanup_images - stop_crio -} diff --git a/vendor.conf b/vendor.conf index 109d7ed2..13cda0d4 100644 --- a/vendor.conf +++ b/vendor.conf @@ -12,10 +12,10 @@ github.com/gregjones/httpcache 787624de3eb7bd915c329cba748687a3b22666a6 github.com/json-iterator/go 1.0.0 github.com/peterbourgon/diskv v2.0.1 github.com/sirupsen/logrus v1.0.0 -github.com/containers/image storage-update https://github.com/nalind/image +github.com/containers/image 57b257d128d6075ea3287991ee408d24c7bd2758 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/ostreedev/ostree-go master -github.com/containers/storage 9e0c323a4b425557f8310ee8d125634acd39d8f5 +github.com/containers/storage d7921c6facc516358070a1306689eda18adaa20a github.com/containernetworking/cni v0.4.0 google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index 0380bf72..590b3787 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -320,15 +320,6 @@ func (ic *imageCopier) copyLayers() error { srcInfos := ic.src.LayerInfos() destInfos := []types.BlobInfo{} diffIDs := []digest.Digest{} - updatedSrcInfos := ic.src.UpdatedLayerInfos() - srcInfosUpdated := false - if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { - if !ic.canModifyManifest { - return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") - } - srcInfos = updatedSrcInfos - srcInfosUpdated = true - } for _, srcLayer := range srcInfos { var ( destInfo types.BlobInfo @@ -357,7 +348,7 @@ func (ic *imageCopier) copyLayers() error { if ic.diffIDsAreNeeded { ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs } - if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { + if layerDigestsDiffer(srcInfos, destInfos) { ic.manifestUpdates.LayerInfos = destInfos } return nil diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go index 705e289b..fddc1c52 100644 --- a/vendor/github.com/containers/image/directory/directory_src.go +++ b/vendor/github.com/containers/image/directory/directory_src.go @@ -74,8 +74,3 @@ func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { } return signatures, nil } - -// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *dirImageSource) UpdatedLayerInfos() []types.BlobInfo { - return nil -} diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go index b9941dfc..aebcaa82 100644 --- a/vendor/github.com/containers/image/docker/archive/src.go +++ b/vendor/github.com/containers/image/docker/archive/src.go @@ -34,8 +34,3 @@ func (s *archiveImageSource) Reference() types.ImageReference { func (s *archiveImageSource) Close() error { return nil } - -// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *archiveImageSource) UpdatedLayerInfos() []types.BlobInfo { - return nil -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go index 3d059da9..644dbeec 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_src.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_src.go @@ -83,8 +83,3 @@ func (s *daemonImageSource) Reference() types.ImageReference { func (s *daemonImageSource) Close() error { return os.Remove(s.tarCopyPath) } - -// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *daemonImageSource) UpdatedLayerInfos() []types.BlobInfo { - return nil -} diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go index 217e9dcb..24b82d6f 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -8,6 +8,7 @@ import ( "io" "io/ioutil" "net/http" + "os" "path/filepath" "strings" "time" @@ -124,6 +125,69 @@ func dockerCertDir(ctx *types.SystemContext, hostPort string) string { return filepath.Join(hostCertDir, hostPort) } +func setupCertificates(dir string, tlsc *tls.Config) error { + logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) + fs, err := ioutil.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + if os.IsPermission(err) { + logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) + return nil + } + return err + } + + for _, f := range fs { + fullPath := filepath.Join(dir, f.Name()) + if strings.HasSuffix(f.Name(), ".crt") { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return errors.Wrap(err, "unable to get system cert pool") + } + tlsc.RootCAs = systemPool + logrus.Debugf(" crt: %s", fullPath) + data, err := ioutil.ReadFile(fullPath) + if err != nil { + return err + } + tlsc.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf(" cert: %s", fullPath) + if !hasFile(fs, keyName) { + return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) + if err != nil { + return err + } + tlsc.Certificates = append(tlsc.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf(" key: %s", fullPath) + if !hasFile(fs, certName) { + return errors.Errorf("missing client certificate %s for key %s", certName, keyName) + } + } + } + return nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClientFromRef(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go index 14e3c2b5..232c3cf9 100644 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/docker/docker_image_src.go @@ -52,11 +52,6 @@ func (s *dockerImageSource) Close() error { return nil } -// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *dockerImageSource) UpdatedLayerInfos() []types.BlobInfo { - return nil -} - // simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) // Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. func simplifyContentType(contentType string) string { diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go index aab4a6d9..72c85c70 100644 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/docker/tarfile/dest.go @@ -168,7 +168,7 @@ func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { func (d *Destination) PutManifest(m []byte) error { // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, // so the caller trying a different manifest kind would be pointless. - var man manifest.Schema2 + var man schema2Manifest if err := json.Unmarshal(m, &man); err != nil { return errors.Wrap(err, "Error parsing manifest") } @@ -177,12 +177,12 @@ func (d *Destination) PutManifest(m []byte) error { } layerPaths := []string{} - for _, l := range man.LayersDescriptors { + for _, l := range man.Layers { layerPaths = append(layerPaths, l.Digest.String()) } items := []ManifestItem{{ - Config: man.ConfigDescriptor.Digest.String(), + Config: man.Config.Digest.String(), RepoTags: []string{d.repoTag}, Layers: layerPaths, Parent: "", diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go index 34d5ff32..f77cb713 100644 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ b/vendor/github.com/containers/image/docker/tarfile/src.go @@ -254,22 +254,22 @@ func (s *Source) GetManifest() ([]byte, string, error) { if err := s.ensureCachedDataIsPresent(); err != nil { return nil, "", err } - m := manifest.Schema2{ + m := schema2Manifest{ SchemaVersion: 2, MediaType: manifest.DockerV2Schema2MediaType, - ConfigDescriptor: manifest.Schema2Descriptor{ + Config: distributionDescriptor{ MediaType: manifest.DockerV2Schema2ConfigMediaType, Size: int64(len(s.configBytes)), Digest: s.configDigest, }, - LayersDescriptors: []manifest.Schema2Descriptor{}, + Layers: []distributionDescriptor{}, } for _, diffID := range s.orderedDiffIDList { li, ok := s.knownLayers[diffID] if !ok { return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) } - m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ + m.Layers = append(m.Layers, distributionDescriptor{ Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball MediaType: manifest.DockerV2Schema2LayerMediaType, Size: li.size, diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go index 4780d66c..f16cc8c6 100644 --- a/vendor/github.com/containers/image/docker/tarfile/types.go +++ b/vendor/github.com/containers/image/docker/tarfile/types.go @@ -1,9 +1,6 @@ package tarfile -import ( - "github.com/containers/image/manifest" - "github.com/opencontainers/go-digest" -) +import "github.com/opencontainers/go-digest" // Various data structures. @@ -21,13 +18,30 @@ type ManifestItem struct { Config string RepoTags []string Layers []string - Parent imageID `json:",omitempty"` - LayerSources map[diffID]manifest.Schema2Descriptor `json:",omitempty"` + Parent imageID `json:",omitempty"` + LayerSources map[diffID]distributionDescriptor `json:",omitempty"` } type imageID string type diffID digest.Digest +// Based on github.com/docker/distribution/blobs.go +type distributionDescriptor struct { + MediaType string `json:"mediaType,omitempty"` + Size int64 `json:"size,omitempty"` + Digest digest.Digest `json:"digest,omitempty"` + URLs []string `json:"urls,omitempty"` +} + +// Based on github.com/docker/distribution/manifest/schema2/manifest.go +// FIXME: We are repeating this all over the place; make a public copy? +type schema2Manifest struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType,omitempty"` + Config distributionDescriptor `json:"config"` + Layers []distributionDescriptor `json:"layers"` +} + // Based on github.com/docker/docker/image/image.go // MOST CONTENT OMITTED AS UNNECESSARY type image struct { diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go index 4b152d26..c79adacc 100644 --- a/vendor/github.com/containers/image/image/docker_list.go +++ b/vendor/github.com/containers/image/image/docker_list.go @@ -21,7 +21,7 @@ type platformSpec struct { // A manifestDescriptor references a platform-specific manifest. type manifestDescriptor struct { - manifest.Schema2Descriptor + descriptor Platform platformSpec `json:"platform"` } diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go index 86e30b3e..4152b3cd 100644 --- a/vendor/github.com/containers/image/image/docker_schema1.go +++ b/vendor/github.com/containers/image/image/docker_schema1.go @@ -2,7 +2,9 @@ package image import ( "encoding/json" + "regexp" "strings" + "time" "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" @@ -12,25 +14,87 @@ import ( "github.com/pkg/errors" ) -type manifestSchema1 struct { - m *manifest.Schema1 +var ( + validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) +) + +type fsLayersSchema1 struct { + BlobSum digest.Digest `json:"blobSum"` } -func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema1FromManifest(manifestBlob) - if err != nil { +type historySchema1 struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field. +type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +type manifestSchema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []fsLayersSchema1 `json:"fsLayers"` + History []historySchema1 `json:"history"` + SchemaVersion int `json:"schemaVersion"` +} + +func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) { + mschema1 := &manifestSchema1{} + if err := json.Unmarshal(manifest, mschema1); err != nil { return nil, err } - return &manifestSchema1{m: m}, nil + if mschema1.SchemaVersion != 1 { + return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion) + } + if len(mschema1.FSLayers) != len(mschema1.History) { + return nil, errors.New("length of history not equal to number of layers") + } + if len(mschema1.FSLayers) == 0 { + return nil, errors.New("no FSLayers in manifest") + } + + if err := fixManifestLayers(mschema1); err != nil { + return nil, err + } + return mschema1, nil } // manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest { - return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)} +func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + return &manifestSchema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } } func (m *manifestSchema1) serialize() ([]byte, error) { - return m.m.Serialize() + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return manifest.AddDummyV2S1Signature(unsigned) } func (m *manifestSchema1) manifestMIMEType() string { @@ -40,7 +104,7 @@ func (m *manifestSchema1) manifestMIMEType() string { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() + return types.BlobInfo{} } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. @@ -64,7 +128,11 @@ func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - return m.m.LayerInfos() + layers := make([]types.BlobInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} + } + return layers } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -85,25 +153,22 @@ func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) } else { tag = "" } - return m.m.Name != name || m.m.Tag != tag + return m.Name != name || m.Tag != tag } func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) { v1 := &v1Image{} - if err := json.Unmarshal([]byte(m.m.History[0].V1Compatibility), v1); err != nil { + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil { return nil, err } - i := &types.ImageInspectInfo{ - Tag: m.m.Tag, + return &types.ImageInspectInfo{ + Tag: m.Tag, DockerVersion: v1.DockerVersion, Created: v1.Created, + Labels: v1.Config.Labels, Architecture: v1.Architecture, Os: v1.OS, - } - if v1.Config != nil { - i.Labels = v1.Config.Labels - } - return i, nil + }, nil } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -116,18 +181,25 @@ func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} + copy := *m if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err + // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. + if len(copy.FSLayers) != len(options.LayerInfos) { + return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos)) + } + for i, info := range options.LayerInfos { + // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest } } if options.EmbeddedDockerReference != nil { - copy.m.Name = reference.Path(options.EmbeddedDockerReference) + copy.Name = reference.Path(options.EmbeddedDockerReference) if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.m.Tag = tagged.Tag() + copy.Tag = tagged.Tag() } else { - copy.m.Tag = "" + copy.Tag = "" } } @@ -145,20 +217,78 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ return memoryImageFromManifest(©), nil } +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History), +// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates, +// both from manifest.History and manifest.FSLayers). +// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func fixManifestLayers(manifest *manifestSchema1) error { + type imageV1 struct { + ID string + Parent string + } + // Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History) + imgs := make([]*imageV1, len(manifest.FSLayers)) + for i := range manifest.FSLayers { + img := &imageV1{} + + if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := validateV1ID(img.ID); err != nil { + return err + } + } + if imgs[len(imgs)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...) + manifest.History = append(manifest.History[:i], manifest.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) + } + } + return nil +} + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return errors.Errorf("image ID %q is invalid", id) + } + return nil +} + // Based on github.com/docker/docker/distribution/pull_v2.go func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) { - if len(m.m.History) == 0 { + if len(m.History) == 0 { // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) } - if len(m.m.History) != len(m.m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers)) + if len(m.History) != len(m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers)) } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) { + return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers)) } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) { + return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers)) } rootFS := rootFS{ @@ -166,13 +296,13 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl DiffIDs: []digest.Digest{}, BaseLayer: "", } - var layers []manifest.Schema2Descriptor - history := make([]imageHistory, len(m.m.History)) - for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.m.History) - 1) - v1Index + var layers []descriptor + history := make([]imageHistory, len(m.History)) + for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.History) - 1) - v1Index - var v1compat manifest.Schema1V1Compatibility - if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil { + var v1compat v1Compatibility + if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil { return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index) } history[v2Index] = imageHistory{ @@ -192,19 +322,19 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl if layerDiffIDs != nil { d = layerDiffIDs[v2Index] } - layers = append(layers, manifest.Schema2Descriptor{ + layers = append(layers, descriptor{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: size, - Digest: m.m.FSLayers[v1Index].BlobSum, + Digest: m.FSLayers[v1Index].BlobSum, }) rootFS.DiffIDs = append(rootFS.DiffIDs, d) } } - configJSON, err := configJSONFromV1Config([]byte(m.m.History[0].V1Compatibility), rootFS, history) + configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history) if err != nil { return nil, err } - configDescriptor := manifest.Schema2Descriptor{ + configDescriptor := descriptor{ MediaType: "application/vnd.docker.container.image.v1+json", Size: int64(len(configJSON)), Digest: digest.FromBytes(configJSON), diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go index 7ccd061c..8cc3c495 100644 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ b/vendor/github.com/containers/image/image/docker_schema2.go @@ -29,44 +29,54 @@ var gzippedEmptyLayer = []byte{ // gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") -type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - m *manifest.Schema2 +type descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` } -func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema2FromManifest(manifestBlob) - if err != nil { +type manifestSchema2 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor descriptor `json:"config"` + LayersDescriptors []descriptor `json:"layers"` +} + +func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { + v2s2 := manifestSchema2{src: src} + if err := json.Unmarshal(manifest, &v2s2); err != nil { return nil, err } - return &manifestSchema2{ - src: src, - m: m, - }, nil + return &v2s2, nil } // manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { +func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest { return &manifestSchema2{ - src: src, - configBlob: configBlob, - m: manifest.Schema2FromComponents(config, layers), + src: src, + configBlob: configBlob, + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, } } func (m *manifestSchema2) serialize() ([]byte, error) { - return m.m.Serialize() + return json.Marshal(*m) } func (m *manifestSchema2) manifestMIMEType() string { - return m.m.MediaType + return m.MediaType } // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() + return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} } // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about @@ -95,9 +105,9 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") } stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.m.ConfigDescriptor.Digest, - Size: m.m.ConfigDescriptor.Size, - URLs: m.m.ConfigDescriptor.URLs, + Digest: m.ConfigDescriptor.Digest, + Size: m.ConfigDescriptor.Size, + URLs: m.ConfigDescriptor.URLs, }) if err != nil { return nil, err @@ -108,8 +118,8 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { return nil, err } computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) + if computedDigest != m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) } m.configBlob = blob } @@ -120,7 +130,15 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - return m.m.LayerInfos() + blobs := []types.BlobInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, types.BlobInfo{ + Digest: layer.Digest, + Size: layer.Size, + URLs: layer.URLs, + }) + } + return blobs } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -139,16 +157,13 @@ func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) { if err := json.Unmarshal(config, v1); err != nil { return nil, err } - i := &types.ImageInspectInfo{ + return &types.ImageInspectInfo{ DockerVersion: v1.DockerVersion, Created: v1.Created, + Labels: v1.Config.Labels, Architecture: v1.Architecture, Os: v1.OS, - } - if v1.Config != nil { - i.Labels = v1.Config.Labels - } - return i, nil + }, nil } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -161,14 +176,17 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.Schema2Clone(m.m), - } + copy := *m // NOTE: This is not a deep copy, it still shares slices etc. if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err + if len(copy.LayersDescriptors) != len(options.LayerInfos) { + return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) + } + copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) + for i, info := range options.LayerInfos { + copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType + copy.LayersDescriptors[i].Digest = info.Digest + copy.LayersDescriptors[i].Size = info.Size + copy.LayersDescriptors[i].URLs = info.URLs } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. @@ -186,15 +204,6 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ return memoryImageFromManifest(©), nil } -func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { - return imgspecv1.Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { configOCI, err := m.OCIConfig() if err != nil { @@ -205,16 +214,18 @@ func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { return nil, err } - config := imgspecv1.Descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), + config := descriptorOCI1{ + descriptor: descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), + }, } - layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) + layers := make([]descriptorOCI1, len(m.LayersDescriptors)) for idx := range layers { - layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) - if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { + layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]} + if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable } else { // we assume layers are gzip'ed because docker v2s2 only deals with @@ -239,8 +250,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) - history := make([]manifest.Schema1History, len(imageConfig.History)) + fsLayers := make([]fsLayersSchema1, len(imageConfig.History)) + history := make([]historySchema1, len(imageConfig.History)) nonemptyLayerIndex := 0 var parentV1ID string // Set in the loop v1ID := "" @@ -268,10 +279,10 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } blobDigest = gzippedEmptyLayerDigest } else { - if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { - return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) + if nonemptyLayerIndex >= len(m.LayersDescriptors) { + return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors)) } - blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest + blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest nonemptyLayerIndex++ } @@ -282,7 +293,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } v1ID = v - fakeImage := manifest.Schema1V1Compatibility{ + fakeImage := v1Compatibility{ ID: v1ID, Parent: parentV1ID, Comment: historyEntry.Comment, @@ -296,8 +307,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) } - fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} - history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} + fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest} + history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)} // Note that parentV1ID of the top layer is preserved when exiting this loop } diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go index 4a79ac27..75c9e711 100644 --- a/vendor/github.com/containers/image/image/manifest.go +++ b/vendor/github.com/containers/image/image/manifest.go @@ -1,7 +1,6 @@ package image import ( - "fmt" "time" "github.com/containers/image/docker/reference" @@ -89,8 +88,11 @@ type genericManifest interface { } func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch manifest.NormalizedMIMEType(mt) { - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + switch mt { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json": return manifestSchema1FromManifest(manblob) case imgspecv1.MediaTypeImageManifest: return manifestOCI1FromManifest(src, manblob) @@ -98,8 +100,17 @@ func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) return manifestSchema2FromManifest(src, manblob) case manifest.DockerV2ListMediaType: return manifestSchema2FromManifestList(src, manblob) - default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return manifestSchema1FromManifest(manblob) } } diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go index 1e8bb3a4..62995f61 100644 --- a/vendor/github.com/containers/image/image/memory.go +++ b/vendor/github.com/containers/image/image/memory.go @@ -71,10 +71,3 @@ func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) { func (i *memoryImage) IsMultiImage() bool { return false } - -// UpdatedLayerInfos returns an updated set of layer blob information which may not match the manifest. -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (i *memoryImage) UpdatedLayerInfos() []types.BlobInfo { - return i.LayerInfos() -} diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go index 77ddedae..5f7c0728 100644 --- a/vendor/github.com/containers/image/image/oci.go +++ b/vendor/github.com/containers/image/image/oci.go @@ -12,34 +12,41 @@ import ( "github.com/pkg/errors" ) -type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of m.Config. - m *manifest.OCI1 +type descriptorOCI1 struct { + descriptor + Annotations map[string]string `json:"annotations,omitempty"` } -func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.OCI1FromManifest(manifestBlob) - if err != nil { +type manifestOCI1 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + SchemaVersion int `json:"schemaVersion"` + ConfigDescriptor descriptorOCI1 `json:"config"` + LayersDescriptors []descriptorOCI1 `json:"layers"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { + oci := manifestOCI1{src: src} + if err := json.Unmarshal(manifest, &oci); err != nil { return nil, err } - return &manifestOCI1{ - src: src, - m: m, - }, nil + return &oci, nil } // manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { +func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest { return &manifestOCI1{ - src: src, - configBlob: configBlob, - m: manifest.OCI1FromComponents(config, layers), + src: src, + configBlob: configBlob, + SchemaVersion: 2, + ConfigDescriptor: config, + LayersDescriptors: layers, } } func (m *manifestOCI1) serialize() ([]byte, error) { - return m.m.Serialize() + return json.Marshal(*m) } func (m *manifestOCI1) manifestMIMEType() string { @@ -49,7 +56,7 @@ func (m *manifestOCI1) manifestMIMEType() string { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() + return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, Annotations: m.ConfigDescriptor.Annotations} } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. @@ -60,9 +67,9 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") } stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.m.Config.Digest, - Size: m.m.Config.Size, - URLs: m.m.Config.URLs, + Digest: m.ConfigDescriptor.Digest, + Size: m.ConfigDescriptor.Size, + URLs: m.ConfigDescriptor.URLs, }) if err != nil { return nil, err @@ -73,8 +80,8 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) { return nil, err } computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.Config.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) + if computedDigest != m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) } m.configBlob = blob } @@ -100,7 +107,11 @@ func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - return m.m.LayerInfos() + blobs := []types.BlobInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs}) + } + return blobs } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -119,16 +130,13 @@ func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) { if err := json.Unmarshal(config, v1); err != nil { return nil, err } - i := &types.ImageInspectInfo{ + return &types.ImageInspectInfo{ DockerVersion: v1.DockerVersion, Created: v1.Created, + Labels: v1.Config.Labels, Architecture: v1.Architecture, Os: v1.OS, - } - if v1.Config != nil { - i.Labels = v1.Config.Labels - } - return i, nil + }, nil } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -141,14 +149,18 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.OCI1Clone(m.m), - } + copy := *m // NOTE: This is not a deep copy, it still shares slices etc. if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err + if len(copy.LayersDescriptors) != len(options.LayerInfos) { + return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) + } + copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos)) + for i, info := range options.LayerInfos { + copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType + copy.LayersDescriptors[i].Digest = info.Digest + copy.LayersDescriptors[i].Size = info.Size + copy.LayersDescriptors[i].Annotations = info.Annotations + copy.LayersDescriptors[i].URLs = info.URLs } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. @@ -164,26 +176,17 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types. return memoryImageFromManifest(©), nil } -func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { - return manifest.Schema2Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { // Create a copy of the descriptor. - config := schema2DescriptorFromOCI1Descriptor(m.m.Config) + config := m.ConfigDescriptor.descriptor // The only difference between OCI and DockerSchema2 is the mediatypes. The // media type of the manifest is handled by manifestSchema2FromComponents. config.MediaType = manifest.DockerV2Schema2ConfigMediaType - layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) + layers := make([]descriptor, len(m.LayersDescriptors)) for idx := range layers { - layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) + layers[idx] = m.LayersDescriptors[idx].descriptor layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType } diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go index 1293f7d3..ef35b3c3 100644 --- a/vendor/github.com/containers/image/image/sourced.go +++ b/vendor/github.com/containers/image/image/sourced.go @@ -88,7 +88,3 @@ func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) { func (i *sourcedImage) IsMultiImage() bool { return i.manifestMIMEType == manifest.DockerV2ListMediaType } - -func (i *sourcedImage) UpdatedLayerInfos() []types.BlobInfo { - return i.UnparsedImage.UpdatedLayerInfos() -} diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go index 7bcac06e..483cfd04 100644 --- a/vendor/github.com/containers/image/image/unparsed.go +++ b/vendor/github.com/containers/image/image/unparsed.go @@ -83,10 +83,3 @@ func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { } return i.cachedSignatures, nil } - -// UpdatedLayerInfos returns an updated set of layer blob information which may not match the manifest. -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (i *UnparsedImage) UpdatedLayerInfos() []types.BlobInfo { - return i.src.UpdatedLayerInfos() -} diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go deleted file mode 100644 index f4ce7320..00000000 --- a/vendor/github.com/containers/image/manifest/docker_schema1.go +++ /dev/null @@ -1,212 +0,0 @@ -package manifest - -import ( - "encoding/json" - "regexp" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. -type Schema1FSLayers struct { - BlobSum digest.Digest `json:"blobSum"` -} - -// Schema1History is an entry of the "history" array in docker/distribution schema 1. -type Schema1History struct { - V1Compatibility string `json:"v1Compatibility"` -} - -// Schema1 is a manifest in docker/distribution schema 1. -type Schema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []Schema1FSLayers `json:"fsLayers"` - History []Schema1History `json:"history"` - SchemaVersion int `json:"schemaVersion"` -} - -// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. -type Schema1V1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - -// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. -// (NOTE: The instance is not necessary a literal representation of the original blob, -// layers with duplicate IDs are eliminated.) -func Schema1FromManifest(manifest []byte) (*Schema1, error) { - s1 := Schema1{} - if err := json.Unmarshal(manifest, &s1); err != nil { - return nil, err - } - if s1.SchemaVersion != 1 { - return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) - } - if len(s1.FSLayers) != len(s1.History) { - return nil, errors.New("length of history not equal to number of layers") - } - if len(s1.FSLayers) == 0 { - return nil, errors.New("no FSLayers in manifest") - } - if err := s1.fixManifestLayers(); err != nil { - return nil, err - } - return &s1, nil -} - -// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. -func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 { - var name, tag string - if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. - name = reference.Path(ref) - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - } - return &Schema1{ - Name: name, - Tag: tag, - Architecture: architecture, - FSLayers: fsLayers, - History: history, - SchemaVersion: 1, - } -} - -// Schema1Clone creates a copy of the supplied Schema1 manifest. -func Schema1Clone(src *Schema1) *Schema1 { - copy := *src - return © -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{} -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema1) LayerInfos() []types.BlobInfo { - layers := make([]types.BlobInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} - } - return layers -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. - if len(m.FSLayers) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) - } - for i, info := range layerInfos { - // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, - // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. - // So, we don't bother recomputing the IDs in m.History.V1Compatibility. - m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema1) Serialize() ([]byte, error) { - // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. - unsigned, err := json.Marshal(*m) - if err != nil { - return nil, err - } - return AddDummyV2S1Signature(unsigned) -} - -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), -// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, -// both from m.History and m.FSLayers). -// Note that even after this succeeds, m.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func (m *Schema1) fixManifestLayers() error { - type imageV1 struct { - ID string - Parent string - } - // Per the specification, we can assume that len(m.FSLayers) == len(m.History) - imgs := make([]*imageV1, len(m.FSLayers)) - for i := range m.FSLayers { - img := &imageV1{} - - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := validateV1ID(img.ID); err != nil { - return err - } - } - if imgs[len(imgs)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image") - } - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - var lastID string - for _, img := range imgs { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue - m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) - m.History = append(m.History[:i], m.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) - } - } - return nil -} - -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return errors.Errorf("image ID %q is invalid", id) - } - return nil -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - s1 := &Schema1V1Compatibility{} - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - Tag: m.Tag, - Created: s1.Created, - DockerVersion: "", - Labels: make(map[string]string), - Architecture: "", - Os: "", - Layers: []string{}, - }, nil -} diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go deleted file mode 100644 index a44e561b..00000000 --- a/vendor/github.com/containers/image/manifest/docker_schema2.go +++ /dev/null @@ -1,241 +0,0 @@ -package manifest - -import ( - "encoding/json" - "time" - - "github.com/containers/image/pkg/strslice" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. -type Schema2Descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} - -// Schema2 is a manifest in docker/distribution schema 2. -type Schema2 struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - ConfigDescriptor Schema2Descriptor `json:"config"` - LayersDescriptors []Schema2Descriptor `json:"layers"` -} - -// Schema2Port is a Port, a string containing port number and protocol in the -// format "80/tcp", from docker/go-connections/nat. -type Schema2Port string - -// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from -// docker/go-connections/nat. -type Schema2PortSet map[Schema2Port]struct{} - -// Schema2HealthConfig is a HealthConfig, which holds configuration settings -// for the HEALTHCHECK feature, from docker/docker/api/types/container. -type Schema2HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Schema2Config is a Config in docker/docker/api/types/container. -type Schema2Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// Schema2V1Image is a V1Image in docker/docker/image. -type Schema2V1Image struct { - // ID is a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent is the ID of the parent image - Parent string `json:"parent,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig Schema2Config `json:"container_config,omitempty"` - // DockerVersion specifies the version of Docker that was used to build the image - DockerVersion string `json:"docker_version,omitempty"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *Schema2Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. -type Schema2RootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` -} - -// Schema2History stores build commands that were used to create an image, from docker/docker/image. -type Schema2History struct { - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building the image - CreatedBy string `json:"created_by,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Schema2Image is an Image in docker/docker/image. -type Schema2Image struct { - Schema2V1Image - Parent digest.Digest `json:"parent,omitempty"` - RootFS *Schema2RootFS `json:"rootfs,omitempty"` - History []Schema2History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - - // rawJSON caches the immutable JSON associated with this image. - rawJSON []byte - - // computedID is the ID computed from the hash of the image config. - // Not to be confused with the legacy V1 ID in V1Image. - computedID digest.Digest -} - -// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. -func Schema2FromManifest(manifest []byte) (*Schema2, error) { - s2 := Schema2{} - if err := json.Unmarshal(manifest, &s2); err != nil { - return nil, err - } - return &s2, nil -} - -// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. -func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { - return &Schema2{ - SchemaVersion: 2, - MediaType: DockerV2Schema2MediaType, - ConfigDescriptor: config, - LayersDescriptors: layers, - } -} - -// Schema2Clone creates a copy of the supplied Schema2 manifest. -func Schema2Clone(src *Schema2) *Schema2 { - copy := *src - return © -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema2) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, MediaType: DockerV2Schema2ConfigMediaType} -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema2) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{ - Digest: layer.Digest, - Size: layer.Size, - URLs: layer.URLs, - MediaType: layer.MediaType, - }) - } - return blobs -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.LayersDescriptors) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) - } - original := m.LayersDescriptors - m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) - for i, info := range layerInfos { - m.LayersDescriptors[i].MediaType = original[i].MediaType - m.LayersDescriptors[i].Digest = info.Digest - m.LayersDescriptors[i].Size = info.Size - m.LayersDescriptors[i].URLs = info.URLs - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema2) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - s2 := &Schema2Image{} - if err := json.Unmarshal(config, s2); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - Tag: "", - Created: s2.Created, - DockerVersion: s2.DockerVersion, - Labels: s2.Config.Labels, - Architecture: s2.Architecture, - Os: s2.OS, - Layers: []string{}, - }, nil -} diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go index 01f57a32..e329ee57 100644 --- a/vendor/github.com/containers/image/manifest/manifest.go +++ b/vendor/github.com/containers/image/manifest/manifest.go @@ -2,9 +2,7 @@ package manifest import ( "encoding/json" - "fmt" - "github.com/containers/image/types" "github.com/docker/libtrust" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -40,33 +38,6 @@ var DefaultRequestedManifestMIMETypes = []string{ // DockerV2ListMediaType, // FIXME: Restore this ASAP } -// Manifest is an interface for parsing, modifying image manifests in isolation. -// Callers can either use this abstract interface without understanding the details of the formats, -// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members -// directly. -// -// See types.Image for functionality not limited to manifests, including format conversions and config parsing. -// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. -type Manifest interface { - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - ConfigInfo() types.BlobInfo - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []types.BlobInfo - // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) - UpdateLayerInfos(layerInfos []types.BlobInfo) error - - // Inspect returns various information for (skopeo inspect) parsed from the manifest, - // incorporating information from a configuration blob returned by configGetter, if - // the underlying image format is expected to include a configuration blob. - Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) - - // Serialize returns the manifest in a blob format. - // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! - Serialize() ([]byte, error) -} - // GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. // FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, // but we may not have such metadata available (e.g. when the manifest is a local file). @@ -171,47 +142,3 @@ func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { } return js.PrettySignature("signatures") } - -// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, -// centralizing various workarounds. -func NormalizedMIMEType(input string) string { - switch input { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case "application/json": - return DockerV2Schema1SignedMediaType - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, - imgspecv1.MediaTypeImageManifest, - DockerV2Schema2MediaType, - DockerV2ListMediaType: - return input - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return DockerV2Schema1SignedMediaType - } -} - -// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type -func FromBlob(manblob []byte, mt string) (Manifest, error) { - switch NormalizedMIMEType(mt) { - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: - return Schema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return OCI1FromManifest(manblob) - case DockerV2Schema2MediaType: - return Schema2FromManifest(manblob) - case DockerV2ListMediaType: - return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") - default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) - } -} diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go deleted file mode 100644 index 18e27d23..00000000 --- a/vendor/github.com/containers/image/manifest/oci.go +++ /dev/null @@ -1,108 +0,0 @@ -package manifest - -import ( - "encoding/json" - "time" - - "github.com/containers/image/types" - "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// OCI1 is a manifest.Manifest implementation for OCI images. -// The underlying data from imgspecv1.Manifest is also available. -type OCI1 struct { - imgspecv1.Manifest -} - -// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. -func OCI1FromManifest(manifest []byte) (*OCI1, error) { - oci1 := OCI1{} - if err := json.Unmarshal(manifest, &oci1); err != nil { - return nil, err - } - return &oci1, nil -} - -// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. -func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { - return &OCI1{ - imgspecv1.Manifest{ - Versioned: specs.Versioned{SchemaVersion: 2}, - Config: config, - Layers: layers, - }, - } -} - -// OCI1Clone creates a copy of the supplied OCI1 manifest. -func OCI1Clone(src *OCI1) *OCI1 { - return &OCI1{ - Manifest: src.Manifest, - } -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *OCI1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations, MediaType: imgspecv1.MediaTypeImageConfig} -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *OCI1) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.Layers { - blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType}) - } - return blobs -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.Layers) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) - } - original := m.Layers - m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) - for i, info := range layerInfos { - m.Layers[i].MediaType = original[i].MediaType - m.Layers[i].Digest = info.Digest - m.Layers[i].Size = info.Size - m.Layers[i].Annotations = info.Annotations - m.Layers[i].URLs = info.URLs - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *OCI1) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - v1 := &imgspecv1.Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - created := time.Time{} - if v1.Created != nil { - created = *v1.Created - } - return &types.ImageInspectInfo{ - Tag: "", - Created: created, - DockerVersion: "", - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - Layers: []string{}, - }, nil -} diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go index fd437f5a..8644202f 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/oci/archive/oci_src.go @@ -86,7 +86,3 @@ func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int func (s *ociArchiveImageSource) GetSignatures(c context.Context) ([][]byte, error) { return s.unpackedSrc.GetSignatures(c) } - -func (s *ociArchiveImageSource) UpdatedLayerInfos() []types.BlobInfo { - return nil -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go index 67f0c3b8..be8a2aa7 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/oci/layout/oci_src.go @@ -133,11 +133,6 @@ func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, e return nil, 0, errWrap } -// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *ociImageSource) UpdatedLayerInfos() []types.BlobInfo { - return nil -} - func getBlobSize(resp *http.Response) int64 { size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if err != nil { diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index 794521b1..0117f2e0 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -242,10 +242,6 @@ func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, err return sigs, nil } -func (s *openshiftImageSource) UpdatedLayerInfos() []types.BlobInfo { - return nil -} - // ensureImageIsResolved sets up s.docker and s.imageStreamImageName func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { if s.docker != nil { diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index 185e5f49..08fa71b5 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -4,13 +4,11 @@ import ( "bytes" "context" "encoding/json" - "fmt" "io" "io/ioutil" - "os" - "path/filepath" - "strings" - "sync/atomic" + "time" + + "github.com/pkg/errors" "github.com/containers/image/image" "github.com/containers/image/manifest" @@ -18,15 +16,10 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" - "github.com/docker/docker/api/types/versions" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" + ddigest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) -const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. - var ( // ErrBlobDigestMismatch is returned when PutBlob() is given a blob // with a digest-based name that doesn't match its contents. @@ -34,7 +27,8 @@ var ( // ErrBlobSizeMismatch is returned when PutBlob() is given a blob // with an expected size that doesn't match the reader. ErrBlobSizeMismatch = errors.New("blob size mismatch") - // ErrNoManifestLists is returned when GetTargetManifest() is called. + // ErrNoManifestLists is returned when GetTargetManifest() is + // called. ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") // ErrNoSuchImage is returned when we attempt to access an image which // doesn't exist in the storage area. @@ -43,23 +37,32 @@ var ( type storageImageSource struct { imageRef storageReference - ID string - layerPosition map[digest.Digest]int // Where we are in reading a blob's layers - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + Tag string `json:"tag,omitempty"` + Created time.Time `json:"created-time,omitempty"` + ID string `json:"id"` + BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle + Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs + LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers + SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice } type storageImageDestination struct { - imageRef storageReference // The reference we'll use to name the image - publicRef storageReference // The reference we return when asked about the name we'll give to the image - directory string // Temporary directory where we store blobs until Commit() time - nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - signatures []byte // Signature contents, temporary - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + imageRef storageReference + Tag string `json:"tag,omitempty"` + Created time.Time `json:"created-time,omitempty"` + ID string `json:"id"` + BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle + Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs + BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary + Manifest []byte `json:"-"` // Manifest contents, temporary + Signatures []byte `json:"-"` // Signature contents, temporary + SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice +} + +type storageLayerMetadata struct { + Digest string `json:"digest,omitempty"` + Size int64 `json:"size"` + CompressedSize int64 `json:"compressed-size,omitempty"` } type storageImage struct { @@ -67,19 +70,19 @@ type storageImage struct { size int64 } -// newImageSource sets up an image for reading. +// newImageSource sets us up to read out an image, which needs to already exist. func newImageSource(imageRef storageReference) (*storageImageSource, error) { - // First, locate the image. img, err := imageRef.resolveImage() if err != nil { return nil, err } - - // Build the reader object. image := &storageImageSource{ imageRef: imageRef, + Created: time.Now(), ID: img.ID, - layerPosition: make(map[digest.Digest]int), + BlobList: []types.BlobInfo{}, + Layers: make(map[ddigest.Digest][]string), + LayerPosition: make(map[ddigest.Digest]int), SignatureSizes: []int{}, } if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { @@ -88,266 +91,202 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) { return image, nil } -// Reference returns the image reference that we used to find this image. -func (s storageImageSource) Reference() types.ImageReference { - return s.imageRef -} - -// Close cleans up any resources we tied up while reading the image. -func (s storageImageSource) Close() error { - return nil -} - -// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given. -func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { - rc, n, _, err = s.getBlobAndLayerID(info) - return rc, n, err -} - -// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { - var layer storage.Layer - var diffOptions *storage.DiffOptions - // We need a valid digest value. - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - // Check if the blob corresponds to a diff that was used to initialize any layers. Our - // callers should try to retrieve layers using their uncompressed digests, so no need to - // check if they're using one of the compressed digests, which we can't reproduce anyway. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) - // If it's not a layer, then it must be a data item. - if len(layers) == 0 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil - } - // Step through the list of matching layers. Tests may want to verify that if we have multiple layers - // which claim to have the same contents, that we actually do have multiple layers, otherwise we could - // just go ahead and use the first one every time. - i := s.layerPosition[info.Digest] - s.layerPosition[info.Digest] = i + 1 - if len(layers) > 0 { - layer = layers[i%len(layers)] - } - // Force the storage layer to not try to match any compression that was used when the layer was first - // handed to it. - noCompression := archive.Uncompressed - diffOptions = &storage.DiffOptions{ - Compression: &noCompression, - } - if layer.UncompressedSize < 0 { - n = -1 - } else { - n = layer.UncompressedSize - } - logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) - rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) - if err != nil { - return nil, -1, "", err - } - return rc, n, layer.ID, err -} - -// GetManifest() reads the image's manifest. -func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { - if len(s.cachedManifest) == 0 { - cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "manifest") - if err != nil { - return nil, "", err - } - s.cachedManifest = cachedBlob - } - return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err -} - -// UpdatedLayerInfos() returns the list of layer blobs that make up the root filesystem of -// the image, after they've been decompressed. -func (s *storageImageSource) UpdatedLayerInfos() []types.BlobInfo { - simg, err := s.imageRef.transport.store.Image(s.ID) - if err != nil { - logrus.Errorf("error reading image %q: %v", s.ID, err) - return nil - } - updatedBlobInfos := []types.BlobInfo{} - layerID := simg.TopLayer - _, manifestType, err := s.GetManifest() - if err != nil { - logrus.Errorf("error reading image manifest for %q: %v", s.ID, err) - return nil - } - uncompressedLayerType := "" - switch manifestType { - case imgspecv1.MediaTypeImageManifest: - uncompressedLayerType = imgspecv1.MediaTypeImageLayer - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: - // This is actually a compressed type, but there's no uncompressed type defined - uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType - } - for layerID != "" { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - logrus.Errorf("error reading layer %q in image %q: %v", layerID, s.ID, err) - return nil - } - if layer.UncompressedDigest == "" { - logrus.Errorf("uncompressed digest for layer %q is unknown", layerID) - return nil - } - if layer.UncompressedSize < 0 { - logrus.Errorf("uncompressed size for layer %q is unknown", layerID) - return nil - } - blobInfo := types.BlobInfo{ - Digest: layer.UncompressedDigest, - Size: layer.UncompressedSize, - MediaType: uncompressedLayerType, - } - updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...) - layerID = layer.Parent - } - return updatedBlobInfos -} - -// GetTargetManifest() is not supported. -func (s *storageImageSource) GetTargetManifest(d digest.Digest) (manifestBlob []byte, MIMEType string, err error) { - return nil, "", ErrNoManifestLists -} - -// GetSignatures() parses the image's signatures blob into a slice of byte slices. -func (s *storageImageSource) GetSignatures(ctx context.Context) (signatures [][]byte, err error) { - var offset int - sigslice := [][]byte{} - signature := []byte{} - if len(s.SignatureSizes) > 0 { - signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") - if err != nil { - return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.ID) - } - signature = signatureBlob - } - for _, length := range s.SignatureSizes { - sigslice = append(sigslice, signature[offset:offset+length]) - offset += length - } - if offset != len(signature) { - return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) - } - return sigslice, nil -} - -// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until -// it's time to Commit() the image +// newImageDestination sets us up to write a new image. func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { - directory, err := ioutil.TempDir(temporaryDirectoryForBigFiles, "storage") - if err != nil { - return nil, errors.Wrapf(err, "error creating a temporary directory") - } - // Break reading of the reference we're writing, so that copy.Image() won't try to rewrite - // schema1 image manifests to remove embedded references, since that changes the manifest's - // digest, and that makes the image unusable if we subsequently try to access it using a - // reference that mentions the no-longer-correct digest. - publicRef := imageRef - publicRef.name = nil image := &storageImageDestination{ imageRef: imageRef, - publicRef: publicRef, - directory: directory, - blobDiffIDs: make(map[digest.Digest]digest.Digest), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), + Tag: imageRef.reference, + Created: time.Now(), + ID: imageRef.id, + BlobList: []types.BlobInfo{}, + Layers: make(map[ddigest.Digest][]string), + BlobData: make(map[ddigest.Digest][]byte), SignatureSizes: []int{}, } return image, nil } -// Reference returns a mostly-usable image reference that can't return a DockerReference, to -// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to -// remove image names that they contain which don't match the value we're using. +func (s storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + func (s storageImageDestination) Reference() types.ImageReference { - return s.publicRef + return s.imageRef } -// Close cleans up the temporary directory. -func (s *storageImageDestination) Close() error { - return os.RemoveAll(s.directory) +func (s storageImageSource) Close() error { + return nil +} + +func (s storageImageDestination) Close() error { + return nil } -// ShouldCompressLayers indicates whether or not a caller should compress not-already-compressed -// data when handing it to us. func (s storageImageDestination) ShouldCompressLayers() bool { - // We ultimately have to decompress layers to populate trees on disk, so callers shouldn't - // bother compressing them before handing them to us, if they're not already compressed. + // We ultimately have to decompress layers to populate trees on disk, + // so callers shouldn't bother compressing them before handing them to + // us, if they're not already compressed. return false } -// PutBlob stores a layer or data blob in our temporary directory, checking that any information -// in the blobinfo matches the incoming data. -func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { +// putBlob stores a layer or data blob, optionally enforcing that a digest in +// blobinfo matches the incoming data. +func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) { + blobSize := blobinfo.Size + digest := blobinfo.Digest errorBlobInfo := types.BlobInfo{ Digest: "", Size: -1, } - // Set up to digest the blob and count its size while saving it to a file. - hasher := digest.Canonical.Digester() - if blobinfo.Digest.Validate() == nil { - if a := blobinfo.Digest.Algorithm(); a.Available() { + // Try to read an initial snippet of the blob. + buf := [archive.HeaderSize]byte{} + n, err := io.ReadAtLeast(stream, buf[:], len(buf)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return errorBlobInfo, err + } + // Set up to read the whole blob (the initial snippet, plus the rest) + // while digesting it with either the default, or the passed-in digest, + // if one was specified. + hasher := ddigest.Canonical.Digester() + if digest.Validate() == nil { + if a := digest.Algorithm(); a.Available() { hasher = a.Digester() } } - diffID := digest.Canonical.Digester() - filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) - if err != nil { - return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) - } - defer file.Close() + hash := "" counter := ioutils.NewWriteCounter(hasher.Hash()) - reader := io.TeeReader(io.TeeReader(stream, counter), file) - decompressed, err := archive.DecompressStream(reader) - if err != nil { - return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") - } - // Copy the data to the file. - _, err = io.Copy(diffID.Hash(), decompressed) - decompressed.Close() - if err != nil { - return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) - } - // Ensure that any information that we were given about the blob is correct. - if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { - return errorBlobInfo, ErrBlobDigestMismatch - } - if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { - return errorBlobInfo, ErrBlobSizeMismatch - } - // Record information about the blob. - s.blobDiffIDs[hasher.Digest()] = diffID.Digest() - s.fileSizes[hasher.Digest()] = counter.Count - s.filenames[hasher.Digest()] = filename - blobDigest := blobinfo.Digest - if blobDigest.Validate() != nil { - blobDigest = hasher.Digest() - } - blobSize := blobinfo.Size - if blobSize < 0 { - blobSize = counter.Count + defragmented := io.MultiReader(bytes.NewBuffer(buf[:n]), stream) + multi := io.TeeReader(defragmented, counter) + if (n > 0) && archive.IsArchive(buf[:n]) { + // It's a filesystem layer. If it's not the first one in the + // image, we assume that the most recently added layer is its + // parent. + parentLayer := "" + for _, blob := range s.BlobList { + if layerList, ok := s.Layers[blob.Digest]; ok { + parentLayer = layerList[len(layerList)-1] + } + } + // If we have an expected content digest, generate a layer ID + // based on the parent's ID and the expected content digest. + id := "" + if digest.Validate() == nil { + id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex() + } + // Attempt to create the identified layer and import its contents. + layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi) + if err != nil && errors.Cause(err) != storage.ErrDuplicateID { + logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err) + return errorBlobInfo, err + } + if errors.Cause(err) == storage.ErrDuplicateID { + // We specified an ID, and there's already a layer with + // the same ID. Drain the input so that we can look at + // its length and digest. + _, err := io.Copy(ioutil.Discard, multi) + if err != nil && err != io.EOF { + logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err) + return errorBlobInfo, err + } + hash = hasher.Digest().String() + } else { + // Applied the layer with the specified ID. Note the + // size info and computed digest. + hash = hasher.Digest().String() + layerMeta := storageLayerMetadata{ + Digest: hash, + CompressedSize: counter.Count, + Size: uncompressedSize, + } + if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil { + s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata)) + } + // Hang on to the new layer's ID. + id = layer.ID + } + // Check if the size looks right. + if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count { + logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size) + if layer != nil { + // Something's wrong; delete the newly-created layer. + s.imageRef.transport.store.DeleteLayer(layer.ID) + } + return errorBlobInfo, ErrBlobSizeMismatch + } + // If the content digest was specified, verify it. + if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { + logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash) + if layer != nil { + // Something's wrong; delete the newly-created layer. + s.imageRef.transport.store.DeleteLayer(layer.ID) + } + return errorBlobInfo, ErrBlobDigestMismatch + } + // If we didn't get a blob size, return the one we calculated. + if blobSize == -1 { + blobSize = counter.Count + } + // If we didn't get a digest, construct one. + if digest == "" { + digest = ddigest.Digest(hash) + } + // Record that this layer blob is a layer, and the layer ID it + // ended up having. This is a list, in case the same blob is + // being applied more than once. + s.Layers[digest] = append(s.Layers[digest], id) + s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count}) + if layer != nil { + logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id) + } else { + logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id) + } + } else { + // It's just data. Finish scanning it in, check that our + // computed digest matches the passed-in digest, and store it, + // but leave it out of the blob-to-layer-ID map so that we can + // tell that it's not a layer. + blob, err := ioutil.ReadAll(multi) + if err != nil && err != io.EOF { + return errorBlobInfo, err + } + hash = hasher.Digest().String() + if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size { + logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size) + return errorBlobInfo, ErrBlobSizeMismatch + } + // If we were given a digest, verify that the content matches + // it. + if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { + logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) + return errorBlobInfo, ErrBlobDigestMismatch + } + // If we didn't get a blob size, return the one we calculated. + if blobSize == -1 { + blobSize = int64(len(blob)) + } + // If we didn't get a digest, construct one. + if digest == "" { + digest = ddigest.Digest(hash) + } + // Save the blob for when we Commit(). + s.BlobData[digest] = blob + s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))}) + logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest) } return types.BlobInfo{ - Digest: blobDigest, - Size: blobSize, - MediaType: blobinfo.MediaType, + Digest: digest, + Size: blobSize, }, nil } -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be -// reapplied using ReapplyBlob. -// +// PutBlob is used to both store filesystem layers and binary data that is part +// of the image. Filesystem layers are assumed to be imported in order, as +// that is required by some of the underlying storage drivers. +func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { + return s.putBlob(stream, blobinfo, true) +} + +// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); // it returns a non-nil error only on an unexpected failure. @@ -355,373 +294,93 @@ func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, if blobinfo.Digest == "" { return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`) } - if err := blobinfo.Digest.Validate(); err != nil { - return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`) + for _, blob := range s.BlobList { + if blob.Digest == blobinfo.Digest { + return true, blob.Size, nil + } } - // Check if we've already cached it in a file. - if size, ok := s.fileSizes[blobinfo.Digest]; ok { - return true, size, nil - } - // Check if we have a wasn't-compressed layer in storage that's based on that blob. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) - } - if len(layers) > 0 { - // Save this for completeness. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, layers[0].UncompressedSize, nil - } - // Check if we have a was-compressed layer in storage that's based on that blob. - layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) - } - if len(layers) > 0 { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, layers[0].CompressedSize, nil - } - // Nope, we don't have it. return false, -1, nil } -// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the -// same one when it walks the list in the manifest. func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) { - present, size, err := s.HasBlob(blobinfo) - if !present { - return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo) - } + err := blobinfo.Digest.Validate() if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo) + return types.BlobInfo{}, err } - blobinfo.Size = size - return blobinfo, nil -} - -// computeID computes a recommended image ID based on information we have so far. -func (s *storageImageDestination) computeID(m manifest.Manifest) string { - mb, err := m.Serialize() + if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 { + b, err := s.imageRef.transport.store.ImageBigData(s.ID, blobinfo.Digest.String()) + if err != nil { + return types.BlobInfo{}, err + } + return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil + } + layerList := s.Layers[blobinfo.Digest] + rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1]) if err != nil { - return "" + return types.BlobInfo{}, err } - switch manifest.GuessMIMEType(mb) { - case manifest.DockerV2Schema2MediaType, imgspecv1.MediaTypeImageManifest: - // For Schema2 and OCI1(?), the ID is just the hex part of the digest of the config blob. - logrus.Debugf("trivial image ID for configured blob") - configInfo := m.ConfigInfo() - if configInfo.Digest.Validate() == nil { - return configInfo.Digest.Hex() - } - return "" - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields - // that aren't directly comparable using info from the manifest. - logrus.Debugf("computing image ID using compat data") - s1, ok := m.(*manifest.Schema1) - if !ok { - logrus.Debugf("schema type was guessed wrong?") - return "" - } - if len(s1.History) == 0 { - logrus.Debugf("image has no layers") - return "" - } - s2 := struct { - manifest.Schema2Image - ID string `json:"id,omitempty"` - Parent string `json:"parent,omitempty"` - ParentID string `json:"parent_id,omitempty"` - LayerID string `json:"layer_id,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` - Size int64 `json:",omitempty"` - }{} - config := []byte(s1.History[0].V1Compatibility) - if json.Unmarshal(config, &s2) != nil { - logrus.Debugf("error decoding configuration") - return "" - } - // Images created with versions prior to 1.8.3 require us to rebuild the object. - if s2.DockerVersion != "" && versions.LessThan(s2.DockerVersion, "1.8.3") { - err = json.Unmarshal(config, &s2) - if err != nil { - logrus.Infof("error decoding compat image config %s: %v", string(config), err) - return "" - } - config, err = json.Marshal(&s2) - if err != nil { - logrus.Infof("error re-encoding compat image config %#v: %v", s2, err) - return "" - } - } - // Build the history. - for _, h := range s1.History { - compat := manifest.Schema1V1Compatibility{} - if json.Unmarshal([]byte(h.V1Compatibility), &compat) != nil { - logrus.Debugf("error decoding history information") - return "" - } - hitem := manifest.Schema2History{ - Created: compat.Created, - CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), - Comment: compat.Comment, - EmptyLayer: compat.ThrowAway, - } - s2.History = append([]manifest.Schema2History{hitem}, s2.History...) - } - // Build the rootfs information. We need the decompressed sums that we've been - // calculating to fill in the DiffIDs. - s2.RootFS = &manifest.Schema2RootFS{ - Type: "layers", - } - for _, fslayer := range s1.FSLayers { - blobSum := fslayer.BlobSum - diffID, ok := s.blobDiffIDs[blobSum] - if !ok { - logrus.Infof("error looking up diffID for blob %q", string(blobSum)) - return "" - } - s2.RootFS.DiffIDs = append([]digest.Digest{diffID}, s2.RootFS.DiffIDs...) - } - // And now for some raw manipulation. - raw := make(map[string]*json.RawMessage) - err = json.Unmarshal(config, &raw) - if err != nil { - logrus.Infof("error re-decoding compat image config %#v: %v", s2, err) - return "" - } - // Drop some fields. - delete(raw, "id") - delete(raw, "parent") - delete(raw, "parent_id") - delete(raw, "layer_id") - delete(raw, "throwaway") - delete(raw, "Size") - // Add the history and rootfs information. - rootfs, err := json.Marshal(s2.RootFS) - if err != nil { - logrus.Infof("error encoding rootfs information %#v: %v", s2.RootFS, err) - return "" - } - rawRootfs := json.RawMessage(rootfs) - raw["rootfs"] = &rawRootfs - history, err := json.Marshal(s2.History) - if err != nil { - logrus.Infof("error encoding history information %#v: %v", s2.History, err) - return "" - } - rawHistory := json.RawMessage(history) - raw["history"] = &rawHistory - // Encode the result, and take the digest of that result. - config, err = json.Marshal(raw) - if err != nil { - logrus.Infof("error re-encoding compat image config %#v: %v", s2, err) - return "" - } - return digest.FromBytes(config).Hex() - case manifest.DockerV2ListMediaType: - logrus.Debugf("no image ID for manifest list") - // FIXME - case imgspecv1.MediaTypeImageIndex: - logrus.Debugf("no image ID for manifest index") - // FIXME - default: - logrus.Debugf("no image ID for unrecognized manifest type %q", manifest.GuessMIMEType(mb)) - // FIXME - } - return "" -} - -// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig -// information out of it for Inspect(). -func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { - if info.Digest == "" { - return nil, errors.Errorf(`no digest supplied when reading blob`) - } - if err := info.Digest.Validate(); err != nil { - return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) - } - // Assume it's a file, since we're only calling this from a place that expects to read files. - if filename, ok := s.filenames[info.Digest]; ok { - contents, err2 := ioutil.ReadFile(filename) - if err2 != nil { - return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) - } - return contents, nil - } - // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. - return nil, errors.New("blob not found") + return s.putBlob(rc, blobinfo, false) } func (s *storageImageDestination) Commit() error { - // Find the list of layer blobs. - if len(s.manifest) == 0 { - return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") - } - man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) - if err != nil { - return errors.Wrapf(err, "error parsing manifest") - } - layerBlobs := man.LayerInfos() - // Extract or find the layers. + // Create the image record. lastLayer := "" - addedLayers := []string{} - for _, blob := range layerBlobs { - var diff io.ReadCloser - // Check if there's already a layer with the ID that we'd give to the result of applying - // this layer blob to its parent, if it has one, or the blob's hex value otherwise. - diffID, haveDiffID := s.blobDiffIDs[blob.Digest] - if !haveDiffID { - // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), - // or to even check if we had it. - logrus.Debugf("looking for diffID for blob %+v", blob.Digest) - has, _, err := s.HasBlob(blob) - if err != nil { - return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) - } - if !has { - return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) - } - diffID, haveDiffID = s.blobDiffIDs[blob.Digest] - if !haveDiffID { - return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) - } - } - id := diffID.Hex() - if lastLayer != "" { - id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() - } - if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { - // There's already a layer that should have the right contents, just reuse it. - lastLayer = layer.ID - continue - } - // Check if we cached a file with that blobsum. If we didn't already have a layer with - // the blob's contents, we should have gotten a copy. - if filename, ok := s.filenames[blob.Digest]; ok { - // Use the file's contents to initialize the layer. - file, err2 := os.Open(filename) - if err2 != nil { - return errors.Wrapf(err2, "error opening file %q", filename) - } - defer file.Close() - diff = file - } - if diff == nil { - // Try to find a layer with contents matching that blobsum. - layer := "" - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } - } - if layer == "" { - return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) - } - // Use the layer's contents to initialize the new layer. - noCompression := archive.Uncompressed - diffOptions := &storage.DiffOptions{ - Compression: &noCompression, - } - diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions) - if err2 != nil { - return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) - } - defer diff.Close() - } - if diff == nil { - // This shouldn't have happened. - return errors.Errorf("error applying blob %q: content not found", blob.Digest) - } - // Build the new layer using the diff, regardless of where it came from. - layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, diff) - if err != nil { - return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) - } - lastLayer = layer.ID - addedLayers = append([]string{lastLayer}, addedLayers...) - } - // If one of those blobs was a configuration blob, then we can try to dig out the date when the image - // was originally created, in case we're just copying it. If not, no harm done. - var options *storage.ImageOptions - if inspect, err := man.Inspect(s.getConfigBlob); err == nil { - logrus.Debugf("setting image creation date to %s", inspect.Created) - options = &storage.ImageOptions{ - CreationDate: inspect.Created, + for _, blob := range s.BlobList { + if layerList, ok := s.Layers[blob.Digest]; ok { + lastLayer = layerList[len(layerList)-1] } } - // Create the image record, pointing to the most-recently added layer. - intendedID := s.imageRef.id - if intendedID == "" { - intendedID = s.computeID(man) - } - oldNames := []string{} - img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) + img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil) if err != nil { if errors.Cause(err) != storage.ErrDuplicateID { logrus.Debugf("error creating image: %q", err) - return errors.Wrapf(err, "error creating image %q", intendedID) + return errors.Wrapf(err, "error creating image %q", s.ID) } - img, err = s.imageRef.transport.store.Image(intendedID) + img, err = s.imageRef.transport.store.Image(s.ID) if err != nil { - return errors.Wrapf(err, "error reading image %q", intendedID) + return errors.Wrapf(err, "error reading image %q", s.ID) } if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) - return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", s.ID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", s.ID) } logrus.Debugf("reusing image ID %q", img.ID) - oldNames = append(oldNames, img.Names...) } else { logrus.Debugf("created new image ID %q", img.ID) } - // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so - // we just need to screen out the ones that are actually layers to get the list of non-layers. - dataBlobs := make(map[digest.Digest]struct{}) - for blob := range s.filenames { - dataBlobs[blob] = struct{}{} + s.ID = img.ID + names := img.Names + if s.Tag != "" { + names = append(names, s.Tag) } - for _, layerBlob := range layerBlobs { - delete(dataBlobs, layerBlob.Digest) - } - for blob := range dataBlobs { - v, err := ioutil.ReadFile(s.filenames[blob]) - if err != nil { - return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) - } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) - return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) - } - } - // Set the reference's name on the image. - if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { - names := []string{} - if name != nil { - names = append(names, verboseName(name)) - } - if len(oldNames) > 0 { - names = append(names, oldNames...) - } + // We have names to set, so move those names to this image. + if len(names) > 0 { if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } - logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) - return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) + logrus.Debugf("error setting names on image %q: %v", img.ID, err) + return err } logrus.Debugf("set names of image %q to %v", img.ID, names) } - // Save the manifest. - if err := s.imageRef.transport.store.SetImageBigData(img.ID, "manifest", s.manifest); err != nil { + // Save the data blobs to disk, and drop their contents from memory. + keys := []ddigest.Digest{} + for k, v := range s.BlobData { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err) + return err + } + keys = append(keys, k) + } + for _, key := range keys { + delete(s.BlobData, key) + } + // Save the manifest, if we have one. + if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -729,14 +388,12 @@ func (s *storageImageDestination) Commit() error { return err } // Save the signatures, if we have any. - if len(s.signatures) > 0 { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return err + if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return err } // Save our metadata. metadata, err := json.Marshal(s) @@ -748,7 +405,7 @@ func (s *storageImageDestination) Commit() error { return err } if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { + if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -761,7 +418,7 @@ func (s *storageImageDestination) Commit() error { } var manifestMIMETypes = []string{ - imgspecv1.MediaTypeImageManifest, + // TODO(runcom): we'll add OCI as part of another PR here manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, @@ -771,20 +428,23 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string { return manifestMIMETypes } -// PutManifest writes the manifest to the destination. +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (s *storageImageDestination) PutManifest(manifest []byte) error { - s.manifest = make([]byte, len(manifest)) - copy(s.manifest, manifest) + s.Manifest = make([]byte, len(manifest)) + copy(s.Manifest, manifest) return nil } -// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was -// previously supplied to PutSignatures(). +// SupportsSignatures returns an error if we can't expect GetSignatures() to +// return data that was previously supplied to PutSignatures(). func (s *storageImageDestination) SupportsSignatures() error { return nil } -// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // uploaded to the image destination, true otherwise. func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { return false @@ -795,7 +455,6 @@ func (s *storageImageDestination) MustMatchRuntimeOS() bool { return true } -// PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { sizes := []int{} sigblob := []byte{} @@ -806,55 +465,139 @@ func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { copy(newblob[len(sigblob):], sig) sigblob = newblob } - s.signatures = sigblob + s.Signatures = sigblob s.SignatureSizes = sizes return nil } -// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the -// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + rc, n, _, err = s.getBlobAndLayerID(info) + return rc, n, err +} + +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 { + b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // If the blob was "put" more than once, we have multiple layer IDs + // which should all produce the same diff. For the sake of tests that + // want to make sure we created different layers each time the blob was + // "put", though, cycle through the layers. + layerList := s.Layers[info.Digest] + position, ok := s.LayerPosition[info.Digest] + if !ok { + position = 0 + } + s.LayerPosition[info.Digest] = (position + 1) % len(layerList) + logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest) + rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position]) + return rc, n, layerList[position], err +} + +func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) { + layer, err := store.Layer(layerID) + if err != nil { + return nil, -1, err + } + layerMeta := storageLayerMetadata{ + CompressedSize: -1, + } + if layer.Metadata != "" { + if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { + return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) + } + } + if layerMeta.CompressedSize <= 0 { + n = -1 + } else { + n = layerMeta.CompressedSize + } + diff, err := store.Diff("", layer.ID, nil) + if err != nil { + return nil, -1, err + } + return diff, n, nil +} + +func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { + manifestBlob, err = s.imageRef.transport.store.ImageBigData(s.ID, "manifest") + return manifestBlob, manifest.GuessMIMEType(manifestBlob), err +} + +func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) { + return nil, "", ErrNoManifestLists +} + +func (s *storageImageSource) GetSignatures(ctx context.Context) (signatures [][]byte, err error) { + var offset int + signature, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") + if err != nil { + return nil, err + } + sigslice := [][]byte{} + for _, length := range s.SignatureSizes { + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + } + return sigslice, nil +} + func (s *storageImageSource) getSize() (int64, error) { var sum int64 - // Size up the data blobs. - dataNames, err := s.imageRef.transport.store.ListImageBigData(s.ID) + names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id) if err != nil { - return -1, errors.Wrapf(err, "error reading image %q", s.ID) + return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id) } - for _, dataName := range dataNames { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.ID, dataName) + for _, name := range names { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.imageRef.id, name) if err != nil { - return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.ID) + return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id) } sum += bigSize } - // Add the signature sizes. for _, sigSize := range s.SignatureSizes { sum += int64(sigSize) } - // Prepare to walk the layer list. - img, err := s.imageRef.transport.store.Image(s.ID) - if err != nil { - return -1, errors.Wrapf(err, "error reading image info %q", s.ID) - } - // Walk the layer list. - layerID := img.TopLayer - for layerID != "" { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return -1, err + for _, layerList := range s.Layers { + for _, layerID := range layerList { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err + } + layerMeta := storageLayerMetadata{ + Size: -1, + } + if layer.Metadata != "" { + if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { + return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) + } + } + if layerMeta.Size < 0 { + return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layerMeta.Size } - if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { - return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) - } - sum += layer.UncompressedSize - if layer.Parent == "" { - break - } - layerID = layer.Parent } return sum, nil } +func (s *storageImage) Size() (int64, error) { + return s.size, nil +} + // newImage creates an image that also knows its size func newImage(s storageReference) (types.Image, error) { src, err := newImageSource(s) @@ -871,8 +614,3 @@ func newImage(s storageReference) (types.Image, error) { } return &storageImage{Image: img, size: size}, nil } - -// Size() returns the previously-computed size of the image, with no error. -func (s storageImage) Size() (int64, error) { - return s.size, nil -} diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go index fb8b0ccc..ded58705 100644 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ b/vendor/github.com/containers/image/storage/storage_reference.go @@ -6,7 +6,6 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/types" "github.com/containers/storage" - digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -19,11 +18,9 @@ type storageReference struct { reference string id string name reference.Named - tag string - digest digest.Digest } -func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference { +func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference { // We take a copy of the transport, which contains a pointer to the // store that it used for resolving this reference, so that the // transport that we'll return from Transport() won't be affected by @@ -33,8 +30,6 @@ func newReference(transport storageTransport, reference, id string, name referen reference: reference, id: id, name: name, - tag: tag, - digest: digest, } } @@ -81,21 +76,8 @@ func (s storageReference) Transport() types.ImageTransport { } } -// Return a name with a tag or digest, if we have either, else return it bare. +// Return a name with a tag, if we have a name to base them on. func (s storageReference) DockerReference() reference.Named { - if s.name == nil { - return nil - } - if s.tag != "" { - if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil { - return namedTagged - } - } - if s.digest != "" { - if canonical, err := reference.WithDigest(s.name, s.digest); err == nil { - return canonical - } - } return s.name } @@ -109,7 +91,7 @@ func (s storageReference) StringWithinTransport() string { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.reference == "" { + if s.name == nil { return storeSpec + "@" + s.id } if s.id == "" { @@ -138,8 +120,11 @@ func (s storageReference) PolicyConfigurationNamespaces() []string { driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" namespaces := []string{} if s.name != nil { - name := reference.TrimNamed(s.name) - components := strings.Split(name.String(), "/") + if s.id != "" { + // The reference without the ID is also a valid namespace. + namespaces = append(namespaces, storeSpec+s.reference) + } + components := strings.Split(s.name.Name(), "/") for len(components) > 0 { namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) components = components[:len(components)-1] diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go index 39c81a58..1a0ebd04 100644 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -11,14 +11,11 @@ import ( "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" + ddigest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) -const ( - minimumTruncatedIDLength = 3 -) - func init() { transports.Register(Transport) } @@ -104,124 +101,60 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { // relative to the given store, and returns it in a reference object. func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { var name reference.Named + var sum digest.Digest + var err error if ref == "" { - return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference") + return nil, ErrInvalidReference } if ref[0] == '[' { // Ignore the store specifier. closeIndex := strings.IndexRune(ref, ']') if closeIndex < 1 { - return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) + return nil, ErrInvalidReference } ref = ref[closeIndex+1:] } - - // The last segment, if there's more than one, is either a digest from a reference, or an image ID. - split := strings.LastIndex(ref, "@") - idOrDigest := "" - if split != -1 { - // Peel off that last bit so that we can work on the rest. - idOrDigest = ref[split+1:] - if idOrDigest == "" { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) + refInfo := strings.SplitN(ref, "@", 2) + if len(refInfo) == 1 { + // A name. + name, err = reference.ParseNormalizedNamed(refInfo[0]) + if err != nil { + return nil, err } - ref = ref[:split] + } else if len(refInfo) == 2 { + // An ID, possibly preceded by a name. + if refInfo[0] != "" { + name, err = reference.ParseNormalizedNamed(refInfo[0]) + if err != nil { + return nil, err + } + } + sum, err = digest.Parse(refInfo[1]) + if err != nil || sum.Validate() != nil { + sum, err = digest.Parse("sha256:" + refInfo[1]) + if err != nil || sum.Validate() != nil { + return nil, err + } + } + } else { // Coverage: len(refInfo) is always 1 or 2 + // Anything else: store specified in a form we don't + // recognize. + return nil, ErrInvalidReference } - - // The middle segment (now the last segment), if there is one, is a digest. - split = strings.LastIndex(ref, "@") - sum := digest.Digest("") - if split != -1 { - sum = digest.Digest(ref[split+1:]) - if sum == "" { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) - } - ref = ref[:split] - } - - // If we have something that unambiguously should be a digest, validate it, and then the third part, - // if we have one, as an ID. - id := "" - if sum != "" { - if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest) - } - if err := sum.Validate(); err != nil { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) - } - id = idOrDigest - if img, err := store.Image(idOrDigest); err == nil && img != nil && len(id) >= minimumTruncatedIDLength { - // The ID is a truncated version of the ID of an image that's present in local storage, - // so we might as well use the expanded value. - id = img.ID - } - } else if idOrDigest != "" { - // There was no middle portion, so the final portion could be either a digest or an ID. - if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil { - // It's an ID. - id = idOrDigest - } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil { - // It's a digest. - sum = idSum - } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength { - // It's a truncated version of the ID of an image that's present in local storage, - // and we may need the expanded value. - id = img.ID - } else { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) - } - } - - // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's - // at least of what we guess is a reasonable minimum length, because we don't want a really short value - // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. - if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" { - if img, err := store.Image(idOrDigest); err == nil && img != nil { - // It's a truncated version of the ID of an image that's present in local storage; - // we need to expand it. - id = img.ID - ref = "" - } - } - - // The initial portion is probably a name, possibly with a tag. - if ref != "" { - var err error - if name, err = reference.ParseNormalizedNamed(ref); err != nil { - return nil, errors.Wrapf(err, "error parsing named reference %q", ref) - } - } - if name == nil && sum == "" && id == "" { - return nil, errors.Errorf("error parsing reference") - } - - // Construct a copy of the store spec. optionsList := "" options := store.GraphOptions() if len(options) > 0 { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]" - - // Convert the name back into a reference string, if we got a name. + id := "" + if sum.Validate() == nil { + id = sum.Hex() + } refname := "" - tag := "" if name != nil { - if sum.Validate() == nil { - canonical, err := reference.WithDigest(name, sum) - if err != nil { - return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum) - } - refname = verboseName(canonical) - } else { - name = reference.TagNameOnly(name) - tagged, ok := name.(reference.Tagged) - if !ok { - return nil, errors.Errorf("error parsing possibly-tagless name %q", ref) - } - refname = verboseName(name) - tag = tagged.Tag() - } + name = reference.TagNameOnly(name) + refname = verboseName(name) } if refname == "" { logrus.Debugf("parsed reference into %q", storeSpec+"@"+id) @@ -230,7 +163,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( } else { logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) } - return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil } func (s *storageTransport) GetStore() (storage.Store, error) { @@ -249,14 +182,11 @@ func (s *storageTransport) GetStore() (storage.Store, error) { return s.store, nil } -// ParseReference takes a name and a tag or digest and/or ID -// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"), +// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), // possibly prefixed with a store specifier in the form "[_graphroot_]" or // "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or // "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", // tries to figure out which it is, and returns it in a reference object. -// If _id_ is the ID of an image that's present in local storage, it can be truncated, and -// even be specified as if it were a _name_, value. func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { var store storage.Store // Check if there's a store location prefix. If there is, then it @@ -405,7 +335,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { if err != nil { return err } - _, err = digest.Parse("sha256:" + scopeInfo[1]) + _, err = ddigest.Parse("sha256:" + scopeInfo[1]) if err != nil { return err } @@ -415,28 +345,11 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { return nil } -func verboseName(r reference.Reference) string { - if r == nil { - return "" - } - named, isNamed := r.(reference.Named) - digested, isDigested := r.(reference.Digested) - tagged, isTagged := r.(reference.Tagged) - name := "" +func verboseName(name reference.Named) string { + name = reference.TagNameOnly(name) tag := "" - sum := "" - if isNamed { - name = (reference.TrimNamed(named)).String() + if tagged, ok := name.(reference.NamedTagged); ok { + tag = ":" + tagged.Tag() } - if isTagged { - if tagged.Tag() != "" { - tag = ":" + tagged.Tag() - } - } - if isDigested { - if digested.Digest().Validate() == nil { - sum = "@" + digested.Digest().String() - } - } - return name + tag + sum + return name.Name() + tag } diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index bae7319a..4ede907b 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -96,7 +96,6 @@ type BlobInfo struct { Size int64 // -1 if unknown URLs []string Annotations map[string]string - MediaType string } // ImageSource is a service, possibly remote (= slow), to download components of a single image. @@ -119,14 +118,10 @@ type ImageSource interface { // out of a manifest list. GetTargetManifest(digest digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // The Digest field in BlobInfo is guaranteed to be provided; Size may be -1. GetBlob(BlobInfo) (io.ReadCloser, int64, error) // GetSignatures returns the image's signatures. It may use a remote (= slow) service. GetSignatures(context.Context) ([][]byte, error) - // UpdatedLayerInfos returns either nil (meaning there are no updates), or updated values for the layer blobsums that are listed in the image's manifest. - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - UpdatedLayerInfos() []BlobInfo } // ImageDestination is a service, possibly remote (= slow), to store components of a single image. @@ -158,10 +153,9 @@ type ImageDestination interface { AcceptsForeignLayerURLs() bool // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. MustMatchRuntimeOS() bool - // PutBlob writes contents of stream and returns data representing the result. + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. - // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. @@ -211,10 +205,6 @@ type UnparsedImage interface { Manifest() ([]byte, string, error) // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. Signatures(ctx context.Context) ([][]byte, error) - // UpdatedLayerInfos returns either nil (meaning there are no updates), or updated values for the layer blobsums that are listed in the image's manifest. - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // WARNING: The list may contain duplicates, and they are semantically relevant. - UpdatedLayerInfos() []BlobInfo } // Image is the primary API for inspecting properties of images. @@ -225,7 +215,7 @@ type Image interface { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. ConfigInfo() BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. + // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. // The result is cached; it is OK to call this however often you need. ConfigBlob() ([]byte, error) // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about @@ -233,7 +223,7 @@ type Image interface { // old image manifests work (docker v2s1 especially). OCIConfig() (*v1.Image, error) // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. LayerInfos() []BlobInfo // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -259,7 +249,7 @@ type Image interface { // ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest type ManifestUpdateOptions struct { - LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls) which should replace the originals, in order (the root layer first, and then successive layered layers) EmbeddedDockerReference reference.Named ManifestMIMEType string // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index 3263f580..d5bae3b0 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -1,5 +1,5 @@ github.com/sirupsen/logrus v1.0.0 -github.com/containers/storage 9e0c323a4b425557f8310ee8d125634acd39d8f5 +github.com/containers/storage 47536c89fcc545a87745e1a1573addc439409165 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 @@ -36,5 +36,4 @@ github.com/tchap/go-patricia v2.2.6 github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0 github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 -github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8 -github.com/pquerna/ffjson master +github.com/gogo/protobuf/proto fcdc5011193ff531a548e9b0301828d5a5b97fd8 diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index ae601f43..249f98bc 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -650,21 +650,10 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return err - } mountpoint := path.Join(d.dir(id), "merged") if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil { - // If no lower, we used the diff directory, so no work to do - if os.IsNotExist(err) { - return nil - } - return err - } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index f7e3dc34..c84bfaf9 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -1888,16 +1888,10 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye } storeLayers, err := m(store, d) if err != nil { - if errors.Cause(err) != ErrLayerUnknown { - return nil, err - } - continue + return nil, err } layers = append(layers, storeLayers...) } - if len(layers) == 0 { - return nil, ErrLayerUnknown - } return layers, nil } diff --git a/version/version.go b/version/version.go index 67f47db1..7eaa12c4 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "1.8.0-dev" +const Version = "1.9.0-dev"