From 680f7a6106005b8c2a93753b38402bc80a055320 Mon Sep 17 00:00:00 2001 From: Ryan Cole Date: Fri, 16 Jun 2017 13:24:00 -0400 Subject: [PATCH] Add `kpod push` command Push an image to a specified location, such as to an atomic registry or a local directory Signed-off-by: Ryan Cole --- README.md | 1 + cmd/kpod/common.go | 115 +++++++-- cmd/kpod/common_test.go | 12 + cmd/kpod/containerImageRef.go | 449 +++++++++++++++++++++++++++++++++ cmd/kpod/docker/types.go | 271 ++++++++++++++++++++ cmd/kpod/imagePushData.go | 406 +++++++++++++++++++++++++++++ cmd/kpod/imagePushData_test.go | 40 +++ cmd/kpod/main.go | 6 +- cmd/kpod/pull.go | 6 +- cmd/kpod/push.go | 196 ++++++++++++++ cmd/kpod/push_test.go | 72 ++++++ completions/bash/kpod | 102 +++++--- docs/kpod-push.1.md | 104 ++++++++ kpod-push.1.md | 47 ++++ server/server.go | 3 +- test/kpod.bats | 75 +++++- 16 files changed, 1848 insertions(+), 57 deletions(-) create mode 100644 cmd/kpod/containerImageRef.go create mode 100644 cmd/kpod/docker/types.go create mode 100644 cmd/kpod/imagePushData.go create mode 100644 cmd/kpod/imagePushData_test.go create mode 100644 cmd/kpod/push.go create mode 100644 cmd/kpod/push_test.go create mode 100644 docs/kpod-push.1.md create mode 100644 kpod-push.1.md diff --git a/README.md b/README.md index a5d63641..2e5661d3 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,7 @@ It is currently in active development in the Kubernetes community through the [d | [kpod-history(1)](/docs/kpod-history.1.md)] | Shows the history of an image | | [kpod-images(1)](/docs/kpod-images.1.md) | List images in local storage | | [kpod-pull(1)](/docs/kpod-pull.1.md) | Pull an image from a registry | +| [kpod-push(1)](/docs/kpod-push.1.md) | Push an image to a specified destination | | [kpod-rmi(1)](/docs/kpod-rmi.1.md) | Removes one or more images | | [kpod-tag(1)](/docs/kpod-tag.1.md) | Add an additional name to a local image | | [kpod-version(1)](/docs/kpod-version.1.md) | Display the Kpod Version Information | diff --git a/cmd/kpod/common.go b/cmd/kpod/common.go index 17f0a94d..98c9e60d 100644 --- a/cmd/kpod/common.go +++ b/cmd/kpod/common.go @@ -8,6 +8,7 @@ import ( "time" cp "github.com/containers/image/copy" + "github.com/containers/image/signature" is "github.com/containers/image/storage" "github.com/containers/image/types" "github.com/containers/storage" @@ -24,6 +25,33 @@ type imageMetadata struct { SignatureSizes []string `json:"signature-sizes"` } +// DockerRegistryOptions encapsulates settings that affect how we connect or +// authenticate to a remote registry. +type dockerRegistryOptions struct { + // DockerRegistryCreds is the user name and password to supply in case + // we need to pull an image from a registry, and it requires us to + // authenticate. + DockerRegistryCreds *types.DockerAuthConfig + // DockerCertPath is the location of a directory containing CA + // certificates which will be used to verify the registry's certificate + // (all files with names ending in ".crt"), and possibly client + // certificates and private keys (pairs of files with the same name, + // except for ".cert" and ".key" suffixes). + DockerCertPath string + // DockerInsecureSkipTLSVerify turns off verification of TLS + // certificates and allows connecting to registries without encryption. + DockerInsecureSkipTLSVerify bool +} + +// SigningOptions encapsulates settings that control whether or not we strip or +// add signatures to images when writing them. +type signingOptions struct { + // RemoveSignatures directs us to remove any signatures which are already present. + RemoveSignatures bool + // SignBy is a key identifier of some kind, indicating that a signature should be generated using the specified private key and stored with the image. + SignBy string +} + func getStore(c *cli.Context) (storage.Store, error) { options := storage.DefaultStoreOptions if c.GlobalIsSet("root") { @@ -50,31 +78,53 @@ func getStore(c *cli.Context) (storage.Store, error) { return store, nil } +func getCopyOptions(reportWriter io.Writer, signaturePolicyPath string, srcDockerRegistry, destDockerRegistry *dockerRegistryOptions, signing signingOptions) *cp.Options { + if srcDockerRegistry == nil { + srcDockerRegistry = &dockerRegistryOptions{} + } + if destDockerRegistry == nil { + destDockerRegistry = &dockerRegistryOptions{} + } + srcContext := srcDockerRegistry.getSystemContext(signaturePolicyPath) + destContext := destDockerRegistry.getSystemContext(signaturePolicyPath) + return &cp.Options{ + RemoveSignatures: signing.RemoveSignatures, + SignBy: signing.SignBy, + ReportWriter: reportWriter, + SourceCtx: srcContext, + DestinationCtx: destContext, + } +} + +func getPolicyContext(path string) (*signature.PolicyContext, error) { + policy, err := signature.DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path}) + if err != nil { + return nil, err + } + return signature.NewPolicyContext(policy) +} + func findImage(store storage.Store, image string) (*storage.Image, error) { var img *storage.Image ref, err := is.Transport.ParseStoreReference(store, image) if err == nil { - img, err = is.Transport.GetStoreImage(store, ref) - } - if err != nil { - img2, err2 := store.Image(image) - if err2 != nil { - if ref == nil { - return nil, errors.Wrapf(err, "error parsing reference to image %q", image) - } - return nil, errors.Wrapf(err, "unable to locate image %q", image) + img, err := is.Transport.GetStoreImage(store, ref) + if err != nil { + return nil, err } - img = img2 + return img, nil } + img2, err2 := store.Image(image) + if err2 != nil { + if ref == nil { + return nil, errors.Wrapf(err, "error parsing reference to image %q", image) + } + return nil, errors.Wrapf(err, "unable to locate image %q", image) + } + img = img2 return img, nil } -func getCopyOptions(reportWriter io.Writer) *cp.Options { - return &cp.Options{ - ReportWriter: reportWriter, - } -} - func getSystemContext(signaturePolicyPath string) *types.SystemContext { sc := &types.SystemContext{} if signaturePolicyPath != "" { @@ -113,3 +163,36 @@ func getSize(image storage.Image, store storage.Store) (int64, error) { } return imgSize, nil } + +func copyStringStringMap(m map[string]string) map[string]string { + n := map[string]string{} + for k, v := range m { + n[k] = v + } + return n +} + +func (o dockerRegistryOptions) getSystemContext(signaturePolicyPath string) *types.SystemContext { + sc := &types.SystemContext{ + SignaturePolicyPath: signaturePolicyPath, + DockerAuthConfig: o.DockerRegistryCreds, + DockerCertPath: o.DockerCertPath, + DockerInsecureSkipTLSVerify: o.DockerInsecureSkipTLSVerify, + } + return sc +} + +func parseRegistryCreds(creds string) (*types.DockerAuthConfig, error) { + if creds == "" { + return nil, errors.New("no credentials supplied") + } + if strings.Index(creds, ":") < 0 { + return nil, errors.New("user name supplied, but no password supplied") + } + v := strings.SplitN(creds, ":", 2) + cfg := &types.DockerAuthConfig{ + Username: v[0], + Password: v[1], + } + return cfg, nil +} diff --git a/cmd/kpod/common_test.go b/cmd/kpod/common_test.go index 8bb78568..dd1ab62c 100644 --- a/cmd/kpod/common_test.go +++ b/cmd/kpod/common_test.go @@ -85,6 +85,18 @@ func failTestIfNotRoot(t *testing.T) { } } +func getStoreForTests() (storage.Store, error) { + set := flag.NewFlagSet("test", 0) + globalSet := flag.NewFlagSet("test", 0) + globalSet.String("root", "", "path to the root directory in which data, including images, is stored") + globalCtx := cli.NewContext(nil, globalSet, nil) + command := cli.Command{Name: "testCommand"} + c := cli.NewContext(nil, set, globalCtx) + c.Command = command + + return getStore(c) +} + func pullTestImage(name string) error { cmd := exec.Command("crioctl", "image", "pull", name) err := cmd.Run() diff --git a/cmd/kpod/containerImageRef.go b/cmd/kpod/containerImageRef.go new file mode 100644 index 00000000..64ce465e --- /dev/null +++ b/cmd/kpod/containerImageRef.go @@ -0,0 +1,449 @@ +package main + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + is "github.com/containers/image/storage" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/kubernetes-incubator/cri-o/cmd/kpod/docker" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type containerImageRef struct { + store storage.Store + compression archive.Compression + name reference.Named + names []string + layerID string + addHistory bool + oconfig []byte + dconfig []byte + created time.Time + createdBy string + annotations map[string]string + preferredManifestType string + exporting bool +} + +type containerImageSource struct { + path string + ref *containerImageRef + store storage.Store + layerID string + names []string + addHistory bool + compression archive.Compression + config []byte + configDigest digest.Digest + manifest []byte + manifestType string + exporting bool +} + +func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.Image, error) { + src, err := i.NewImageSource(sc, nil) + if err != nil { + return nil, err + } + return image.FromSource(src) +} + +func selectManifestType(preferred string, acceptable, supported []string) string { + selected := preferred + for _, accept := range acceptable { + if preferred == accept { + return preferred + } + for _, support := range supported { + if accept == support { + selected = accept + } + } + } + return selected +} + +func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestTypes []string) (src types.ImageSource, err error) { + // Decide which type of manifest and configuration output we're going to provide. + supportedManifestTypes := []string{v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest} + manifestType := selectManifestType(i.preferredManifestType, manifestTypes, supportedManifestTypes) + // If it's not a format we support, return an error. + if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest { + return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)", + manifestType, v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest) + } + // Start building the list of layers using the read-write layer. + layers := []string{} + layerID := i.layerID + layer, err := i.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "unable to read layer %q", layerID) + } + // Walk the list of parent layers, prepending each as we go. + for layer != nil { + layers = append(append([]string{}, layerID), layers...) + layerID = layer.Parent + if layerID == "" { + err = nil + break + } + layer, err = i.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "unable to read layer %q", layerID) + } + } + logrus.Debugf("layer list: %q", layers) + + // Make a temporary directory to hold blobs. + path, err := ioutil.TempDir(os.TempDir(), "kpod") + if err != nil { + return nil, err + } + logrus.Debugf("using %q to hold temporary data", path) + defer func() { + if src == nil { + err2 := os.RemoveAll(path) + if err2 != nil { + logrus.Errorf("error removing %q: %v", path, err) + } + } + }() + + // Build fresh copies of the configurations so that we don't mess with the values in the Builder + // object itself. + oimage := v1.Image{} + err = json.Unmarshal(i.oconfig, &oimage) + if err != nil { + return nil, err + } + dimage := docker.V2Image{} + err = json.Unmarshal(i.dconfig, &dimage) + if err != nil { + return nil, err + } + + // Start building manifests. + omanifest := v1.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: v1.Descriptor{ + MediaType: v1.MediaTypeImageConfig, + }, + Layers: []v1.Descriptor{}, + Annotations: i.annotations, + } + dmanifest := docker.V2S2Manifest{ + V2Versioned: docker.V2Versioned{ + SchemaVersion: 2, + MediaType: docker.V2S2MediaTypeManifest, + }, + Config: docker.V2S2Descriptor{ + MediaType: docker.V2S2MediaTypeImageConfig, + }, + Layers: []docker.V2S2Descriptor{}, + } + + oimage.RootFS.Type = docker.TypeLayers + oimage.RootFS.DiffIDs = []digest.Digest{} + dimage.RootFS = &docker.V2S2RootFS{} + dimage.RootFS.Type = docker.TypeLayers + dimage.RootFS.DiffIDs = []digest.Digest{} + + // Extract each layer and compute its digests, both compressed (if requested) and uncompressed. + for _, layerID := range layers { + omediaType := v1.MediaTypeImageLayer + dmediaType := docker.V2S2MediaTypeUncompressedLayer + // Figure out which media type we want to call this. Assume no compression. + if i.compression != archive.Uncompressed { + switch i.compression { + case archive.Gzip: + omediaType = v1.MediaTypeImageLayerGzip + dmediaType = docker.V2S2MediaTypeLayer + logrus.Debugf("compressing layer %q with gzip", layerID) + case archive.Bzip2: + // Until the image specs define a media type for bzip2-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with bzip2. + return nil, errors.New("media type for bzip2-compressed layers is not defined") + default: + logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID) + } + } + // If we're not re-exporting the data, just fake up layer and diff IDs for the manifest. + if !i.exporting { + fakeLayerDigest := digest.NewDigestFromHex(digest.Canonical.String(), layerID) + // Add a note in the manifest about the layer. The blobs should be identified by their + // possibly-compressed blob digests, but just use the layer IDs here. + olayerDescriptor := v1.Descriptor{ + MediaType: omediaType, + Digest: fakeLayerDigest, + Size: -1, + } + omanifest.Layers = append(omanifest.Layers, olayerDescriptor) + dlayerDescriptor := docker.V2S2Descriptor{ + MediaType: dmediaType, + Digest: fakeLayerDigest, + Size: -1, + } + dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) + // Add a note about the diffID, which should be uncompressed digest of the blob, but + // just use the layer ID here. + oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, fakeLayerDigest) + dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, fakeLayerDigest) + continue + } + // Start reading the layer. + rc, err := i.store.Diff("", layerID) + if err != nil { + return nil, errors.Wrapf(err, "error extracting layer %q", layerID) + } + defer rc.Close() + // Set up to decompress the layer, in case it's coming out compressed. Due to implementation + // differences, the result may not match the digest the blob had when it was originally imported, + // so we have to recompute all of this anyway if we want to be sure the digests we use will be + // correct. + uncompressed, err := archive.DecompressStream(rc) + if err != nil { + return nil, errors.Wrapf(err, "error decompressing layer %q", layerID) + } + defer uncompressed.Close() + srcHasher := digest.Canonical.Digester() + reader := io.TeeReader(uncompressed, srcHasher.Hash()) + // Set up to write the possibly-recompressed blob. + layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return nil, errors.Wrapf(err, "error opening file for layer %q", layerID) + } + destHasher := digest.Canonical.Digester() + counter := ioutils.NewWriteCounter(layerFile) + multiWriter := io.MultiWriter(counter, destHasher.Hash()) + // Compress the layer, if we're compressing it. + writer, err := archive.CompressStream(multiWriter, i.compression) + if err != nil { + return nil, errors.Wrapf(err, "error compressing layer %q", layerID) + } + size, err := io.Copy(writer, reader) + if err != nil { + return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) + } + writer.Close() + layerFile.Close() + if i.compression == archive.Uncompressed { + if size != counter.Count { + return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count) + } + } else { + size = counter.Count + } + logrus.Debugf("layer %q size is %d bytes", layerID, size) + // Rename the layer so that we can more easily find it by digest later. + err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())) + if err != nil { + return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) + } + // Add a note in the manifest about the layer. The blobs are identified by their possibly- + // compressed blob digests. + olayerDescriptor := v1.Descriptor{ + MediaType: omediaType, + Digest: destHasher.Digest(), + Size: size, + } + omanifest.Layers = append(omanifest.Layers, olayerDescriptor) + dlayerDescriptor := docker.V2S2Descriptor{ + MediaType: dmediaType, + Digest: destHasher.Digest(), + Size: size, + } + dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) + // Add a note about the diffID, which is always an uncompressed value. + oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest()) + dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest()) + } + + if i.addHistory { + // Build history notes in the image configurations. + onews := v1.History{ + Created: &i.created, + CreatedBy: i.createdBy, + Author: oimage.Author, + EmptyLayer: false, + } + oimage.History = append(oimage.History, onews) + dnews := docker.V2S2History{ + Created: i.created, + CreatedBy: i.createdBy, + Author: dimage.Author, + EmptyLayer: false, + } + dimage.History = append(dimage.History, dnews) + } + + // Encode the image configuration blob. + oconfig, err := json.Marshal(&oimage) + if err != nil { + return nil, err + } + logrus.Debugf("OCIv1 config = %s", oconfig) + + // Add the configuration blob to the manifest. + omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig) + omanifest.Config.Size = int64(len(oconfig)) + omanifest.Config.MediaType = v1.MediaTypeImageConfig + + // Encode the manifest. + omanifestbytes, err := json.Marshal(&omanifest) + if err != nil { + return nil, err + } + logrus.Debugf("OCIv1 manifest = %s", omanifestbytes) + + // Encode the image configuration blob. + dconfig, err := json.Marshal(&dimage) + if err != nil { + return nil, err + } + logrus.Debugf("Docker v2s2 config = %s", dconfig) + + // Add the configuration blob to the manifest. + dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig) + dmanifest.Config.Size = int64(len(dconfig)) + dmanifest.Config.MediaType = docker.V2S2MediaTypeImageConfig + + // Encode the manifest. + dmanifestbytes, err := json.Marshal(&dmanifest) + if err != nil { + return nil, err + } + logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes) + + // Decide which manifest and configuration blobs we'll actually output. + var config []byte + var manifest []byte + switch manifestType { + case v1.MediaTypeImageManifest: + manifest = omanifestbytes + config = oconfig + case docker.V2S2MediaTypeManifest: + manifest = dmanifestbytes + config = dconfig + default: + panic("unreachable code: unsupported manifest type") + } + src = &containerImageSource{ + path: path, + ref: i, + store: i.store, + layerID: i.layerID, + names: i.names, + addHistory: i.addHistory, + compression: i.compression, + config: config, + configDigest: digest.Canonical.FromBytes(config), + manifest: manifest, + manifestType: manifestType, + exporting: i.exporting, + } + return src, nil +} + +func (i *containerImageRef) NewImageDestination(sc *types.SystemContext) (types.ImageDestination, error) { + return nil, errors.Errorf("can't write to a container") +} + +func (i *containerImageRef) DockerReference() reference.Named { + return i.name +} + +func (i *containerImageRef) StringWithinTransport() string { + if len(i.names) > 0 { + return i.names[0] + } + return "" +} + +func (i *containerImageRef) DeleteImage(*types.SystemContext) error { + // we were never here + return nil +} + +func (i *containerImageRef) PolicyConfigurationIdentity() string { + return "" +} + +func (i *containerImageRef) PolicyConfigurationNamespaces() []string { + return nil +} + +func (i *containerImageRef) Transport() types.ImageTransport { + return is.Transport +} + +func (i *containerImageSource) Close() error { + err := os.RemoveAll(i.path) + if err != nil { + logrus.Errorf("error removing %q: %v", i.path, err) + } + return err +} + +func (i *containerImageSource) Reference() types.ImageReference { + return i.ref +} + +func (i *containerImageSource) GetSignatures() ([][]byte, error) { + return nil, nil +} + +func (i *containerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return []byte{}, "", errors.Errorf("TODO") +} + +func (i *containerImageSource) GetManifest() ([]byte, string, error) { + return i.manifest, i.manifestType, nil +} + +func (i *containerImageSource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) { + if blob.Digest == i.configDigest { + logrus.Debugf("start reading config") + reader := bytes.NewReader(i.config) + closer := func() error { + logrus.Debugf("finished reading config") + return nil + } + return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil + } + layerFile, err := os.OpenFile(filepath.Join(i.path, blob.Digest.String()), os.O_RDONLY, 0600) + if err != nil { + logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err) + return nil, -1, err + } + size = -1 + st, err := layerFile.Stat() + if err != nil { + logrus.Warnf("error reading size of layer %q: %v", blob.Digest.String(), err) + } else { + size = st.Size() + } + logrus.Debugf("reading layer %q", blob.Digest.String()) + closer := func() error { + layerFile.Close() + logrus.Debugf("finished reading layer %q", blob.Digest.String()) + return nil + } + return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil +} diff --git a/cmd/kpod/docker/types.go b/cmd/kpod/docker/types.go new file mode 100644 index 00000000..03757f95 --- /dev/null +++ b/cmd/kpod/docker/types.go @@ -0,0 +1,271 @@ +package docker + +// +// Types extracted from Docker +// + +import ( + "time" + + "github.com/containers/image/pkg/strslice" + "github.com/opencontainers/go-digest" +) + +// TypeLayers github.com/moby/moby/image/rootfs.go +const TypeLayers = "layers" + +// V2S2MediaTypeManifest github.com/docker/distribution/manifest/schema2/manifest.go +const V2S2MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + +// V2S2MediaTypeImageConfig github.com/docker/distribution/manifest/schema2/manifest.go +const V2S2MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json" + +// V2S2MediaTypeLayer github.com/docker/distribution/manifest/schema2/manifest.go +const V2S2MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" + +// V2S2MediaTypeUncompressedLayer github.com/docker/distribution/manifest/schema2/manifest.go +const V2S2MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" + +// V2S2RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +// github.com/moby/moby/image/rootfs.go +type V2S2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// V2S2History stores build commands that were used to create an image +// github.com/moby/moby/image/image.go +type V2S2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// ID is the content-addressable ID of an image. +// github.com/moby/moby/image/image.go +type ID digest.Digest + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +// github.com/moby/moby/api/types/container/config.go +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// PortSet is a collection of structs indexed by Port +// github.com/docker/go-connections/nat/nat.go +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +// github.com/docker/go-connections/nat/nat.go +type Port string + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +// github.com/moby/moby/api/types/container/config.go +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// V1Compatibility - For non-top-level layers, create fake V1Compatibility +// strings that fit the format and don't collide with anything else, but +// don't result in runnable images on their own. +// github.com/docker/distribution/manifest/schema1/config_builder.go +type V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// V1Image stores the V1 image configuration. +// github.com/moby/moby/image/image.go +type V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// V2Image stores the image configuration +// github.com/moby/moby/image/image.go +type V2Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *V2S2RootFS `json:"rootfs,omitempty"` + History []V2S2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + //rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + //computedID ID +} + +// V2Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +// github.com/docker/distribution/manifest/versioned.go +type V2Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} + +// V2S1FSLayer is a container struct for BlobSums defined in an image manifest +// github.com/docker/distribution/manifest/schema1/manifest.go +type V2S1FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// V2S1History stores unstructured v1 compatibility information +// github.com/docker/distribution/manifest/schema1/manifest.go +type V2S1History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// V2S1Manifest provides the base accessible fields for working with V2 image +// format in the registry. +// github.com/docker/distribution/manifest/schema1/manifest.go +type V2S1Manifest struct { + V2Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []V2S1FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []V2S1History `json:"history"` +} + +// V2S2Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +// github.com/docker/distribution/blobs.go +type V2S2Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// V2S2Manifest defines a schema2 manifest. +// github.com/docker/distribution/manifest/schema2/manifest.go +type V2S2Manifest struct { + V2Versioned + + // Config references the image configuration as a blob. + Config V2S2Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []V2S2Descriptor `json:"layers"` +} diff --git a/cmd/kpod/imagePushData.go b/cmd/kpod/imagePushData.go new file mode 100644 index 00000000..1134f366 --- /dev/null +++ b/cmd/kpod/imagePushData.go @@ -0,0 +1,406 @@ +package main + +import ( + "encoding/json" + "fmt" + "path/filepath" + "runtime" + "time" + + "github.com/containers/image/docker/reference" + is "github.com/containers/image/storage" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/kubernetes-incubator/cri-o/cmd/kpod/docker" // Get rid of this eventually + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest, + // suitable for specifying as a value of the PreferredManifestType + // member of a CommitOptions structure. It is also the default. + OCIv1ImageManifest = v1.MediaTypeImageManifest +) + +type imagePushData struct { + store storage.Store + // Type is used to help a build container's metadata + Type string `json:"type"` + // FromImage is the name of the source image which ws used to create + // the container, if one was used + FromImage string `json:"image,omitempty"` + // FromImageID is the id of the source image + FromImageID string `json:"imageid"` + // Config is the source image's configuration + Config []byte `json:"config,omitempty"` + // Manifest is the source image's manifest + Manifest []byte `json:"manifest,omitempty"` + // ImageAnnotations is a set of key-value pairs which is stored in the + // image's manifest + ImageAnnotations map[string]string `json:"annotations,omitempty"` + // ImageCreatedBy is a description of how this container was built + ImageCreatedBy string `json:"created-by,omitempty"` + + // Image metadata and runtime settings, in multiple formats + OCIv1 ociv1.Image `json:"ociv1,omitempty"` + Docker docker.V2Image `json:"docker,omitempty"` +} + +func (i *imagePushData) initConfig() { + image := ociv1.Image{} + dimage := docker.V2Image{} + if len(i.Config) > 0 { + // Try to parse the image config. If we fail, try to start over from scratch + if err := json.Unmarshal(i.Config, &dimage); err == nil && dimage.DockerVersion != "" { + image, err = makeOCIv1Image(&dimage) + if err != nil { + image = ociv1.Image{} + } + } else { + if err := json.Unmarshal(i.Config, &image); err != nil { + if dimage, err = makeDockerV2S2Image(&image); err != nil { + dimage = docker.V2Image{} + } + } + } + i.OCIv1 = image + i.Docker = dimage + } else { + // Try to dig out the image configuration from the manifest + manifest := docker.V2S1Manifest{} + if err := json.Unmarshal(i.Manifest, &manifest); err == nil && manifest.SchemaVersion == 1 { + if dimage, err = makeDockerV2S1Image(manifest); err == nil { + if image, err = makeOCIv1Image(&dimage); err != nil { + image = ociv1.Image{} + } + } + } + i.OCIv1 = image + i.Docker = dimage + } + + if len(i.Manifest) > 0 { + // Attempt to recover format-specific data from the manifest + v1Manifest := ociv1.Manifest{} + if json.Unmarshal(i.Manifest, &v1Manifest) == nil { + i.ImageAnnotations = v1Manifest.Annotations + } + } + + i.fixupConfig() +} + +func (i *imagePushData) fixupConfig() { + if i.Docker.Config != nil { + // Prefer image-level settings over those from the container it was built from + i.Docker.ContainerConfig = *i.Docker.Config + } + i.Docker.Config = &i.Docker.ContainerConfig + i.Docker.DockerVersion = "" + now := time.Now().UTC() + if i.Docker.Created.IsZero() { + i.Docker.Created = now + } + if i.OCIv1.Created.IsZero() { + i.OCIv1.Created = &now + } + if i.OS() == "" { + i.SetOS(runtime.GOOS) + } + if i.Architecture() == "" { + i.SetArchitecture(runtime.GOARCH) + } + if i.WorkDir() == "" { + i.SetWorkDir(string(filepath.Separator)) + } +} + +// OS returns a name of the OS on which a container built using this image +//is intended to be run. +func (i *imagePushData) OS() string { + return i.OCIv1.OS +} + +// SetOS sets the name of the OS on which a container built using this image +// is intended to be run. +func (i *imagePushData) SetOS(os string) { + i.OCIv1.OS = os + i.Docker.OS = os +} + +// Architecture returns a name of the architecture on which a container built +// using this image is intended to be run. +func (i *imagePushData) Architecture() string { + return i.OCIv1.Architecture +} + +// SetArchitecture sets the name of the architecture on which ta container built +// using this image is intended to be run. +func (i *imagePushData) SetArchitecture(arch string) { + i.OCIv1.Architecture = arch + i.Docker.Architecture = arch +} + +// WorkDir returns the default working directory for running commands in a container +// built using this image. +func (i *imagePushData) WorkDir() string { + return i.OCIv1.Config.WorkingDir +} + +// SetWorkDir sets the location of the default working directory for running commands +// in a container built using this image. +func (i *imagePushData) SetWorkDir(there string) { + i.OCIv1.Config.WorkingDir = there + i.Docker.Config.WorkingDir = there +} + +// makeOCIv1Image builds the best OCIv1 image structure we can from the +// contents of the docker image structure. +func makeOCIv1Image(dimage *docker.V2Image) (ociv1.Image, error) { + config := dimage.Config + if config == nil { + config = &dimage.ContainerConfig + } + dimageCreatedTime := dimage.Created.UTC() + image := ociv1.Image{ + Created: &dimageCreatedTime, + Author: dimage.Author, + Architecture: dimage.Architecture, + OS: dimage.OS, + Config: ociv1.ImageConfig{ + User: config.User, + ExposedPorts: map[string]struct{}{}, + Env: config.Env, + Entrypoint: config.Entrypoint, + Cmd: config.Cmd, + Volumes: config.Volumes, + WorkingDir: config.WorkingDir, + Labels: config.Labels, + }, + RootFS: ociv1.RootFS{ + Type: "", + DiffIDs: []digest.Digest{}, + }, + History: []ociv1.History{}, + } + for port, what := range config.ExposedPorts { + image.Config.ExposedPorts[string(port)] = what + } + RootFS := docker.V2S2RootFS{} + if dimage.RootFS != nil { + RootFS = *dimage.RootFS + } + if RootFS.Type == docker.TypeLayers { + image.RootFS.Type = docker.TypeLayers + for _, id := range RootFS.DiffIDs { + image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, digest.Digest(id.String())) + } + } + for _, history := range dimage.History { + historyCreatedTime := history.Created.UTC() + ohistory := ociv1.History{ + Created: &historyCreatedTime, + CreatedBy: history.CreatedBy, + Author: history.Author, + Comment: history.Comment, + EmptyLayer: history.EmptyLayer, + } + image.History = append(image.History, ohistory) + } + return image, nil +} + +// makeDockerV2S2Image builds the best docker image structure we can from the +// contents of the OCI image structure. +func makeDockerV2S2Image(oimage *ociv1.Image) (docker.V2Image, error) { + image := docker.V2Image{ + V1Image: docker.V1Image{Created: oimage.Created.UTC(), + Author: oimage.Author, + Architecture: oimage.Architecture, + OS: oimage.OS, + ContainerConfig: docker.Config{ + User: oimage.Config.User, + ExposedPorts: docker.PortSet{}, + Env: oimage.Config.Env, + Entrypoint: oimage.Config.Entrypoint, + Cmd: oimage.Config.Cmd, + Volumes: oimage.Config.Volumes, + WorkingDir: oimage.Config.WorkingDir, + Labels: oimage.Config.Labels, + }, + }, + RootFS: &docker.V2S2RootFS{ + Type: "", + DiffIDs: []digest.Digest{}, + }, + History: []docker.V2S2History{}, + } + for port, what := range oimage.Config.ExposedPorts { + image.ContainerConfig.ExposedPorts[docker.Port(port)] = what + } + if oimage.RootFS.Type == docker.TypeLayers { + image.RootFS.Type = docker.TypeLayers + for _, id := range oimage.RootFS.DiffIDs { + d, err := digest.Parse(id.String()) + if err != nil { + return docker.V2Image{}, err + } + image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, d) + } + } + for _, history := range oimage.History { + dhistory := docker.V2S2History{ + Created: history.Created.UTC(), + CreatedBy: history.CreatedBy, + Author: history.Author, + Comment: history.Comment, + EmptyLayer: history.EmptyLayer, + } + image.History = append(image.History, dhistory) + } + image.Config = &image.ContainerConfig + return image, nil +} + +// makeDockerV2S1Image builds the best docker image structure we can from the +// contents of the V2S1 image structure. +func makeDockerV2S1Image(manifest docker.V2S1Manifest) (docker.V2Image, error) { + // Treat the most recent (first) item in the history as a description of the image. + if len(manifest.History) == 0 { + return docker.V2Image{}, errors.Errorf("error parsing image configuration from manifest") + } + dimage := docker.V2Image{} + err := json.Unmarshal([]byte(manifest.History[0].V1Compatibility), &dimage) + if err != nil { + return docker.V2Image{}, err + } + if dimage.DockerVersion == "" { + return docker.V2Image{}, errors.Errorf("error parsing image configuration from history") + } + // The DiffID list is intended to contain the sums of _uncompressed_ blobs, and these are most + // likely compressed, so leave the list empty to avoid potential confusion later on. We can + // construct a list with the correct values when we prep layers for pushing, so we don't lose. + // information by leaving this part undone. + rootFS := &docker.V2S2RootFS{ + Type: docker.TypeLayers, + DiffIDs: []digest.Digest{}, + } + // Build a filesystem history. + history := []docker.V2S2History{} + for i := range manifest.History { + h := docker.V2S2History{ + Created: time.Now().UTC(), + Author: "", + CreatedBy: "", + Comment: "", + EmptyLayer: false, + } + dcompat := docker.V1Compatibility{} + if err2 := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), &dcompat); err2 == nil { + h.Created = dcompat.Created.UTC() + h.Author = dcompat.Author + h.Comment = dcompat.Comment + if len(dcompat.ContainerConfig.Cmd) > 0 { + h.CreatedBy = fmt.Sprintf("%v", dcompat.ContainerConfig.Cmd) + } + h.EmptyLayer = dcompat.ThrowAway + } + // Prepend this layer to the list, because a v2s1 format manifest's list is in reverse order + // compared to v2s2, which lists earlier layers before later ones. + history = append([]docker.V2S2History{h}, history...) + } + dimage.RootFS = rootFS + dimage.History = history + return dimage, nil +} + +func (i *imagePushData) Annotations() map[string]string { + return copyStringStringMap(i.ImageAnnotations) +} + +func (i *imagePushData) makeImageRef(manifestType string, compress archive.Compression, names []string, layerID string, historyTimestamp *time.Time) (types.ImageReference, error) { + var name reference.Named + if len(names) > 0 { + if parsed, err := reference.ParseNamed(names[0]); err == nil { + name = parsed + } + } + if manifestType == "" { + manifestType = OCIv1ImageManifest + } + oconfig, err := json.Marshal(&i.OCIv1) + if err != nil { + return nil, errors.Wrapf(err, "error encoding OCI-format image configuration") + } + dconfig, err := json.Marshal(&i.Docker) + if err != nil { + return nil, errors.Wrapf(err, "error encoding docker-format image configuration") + } + created := time.Now().UTC() + if historyTimestamp != nil { + created = historyTimestamp.UTC() + } + ref := &containerImageRef{ + store: i.store, + compression: compress, + name: name, + names: names, + layerID: layerID, + addHistory: false, + oconfig: oconfig, + dconfig: dconfig, + created: created, + createdBy: i.ImageCreatedBy, + annotations: i.ImageAnnotations, + preferredManifestType: manifestType, + exporting: true, + } + return ref, nil +} + +func importImagePushDataFromImage(store storage.Store, img *storage.Image, systemContext *types.SystemContext) (*imagePushData, error) { + manifest := []byte{} + config := []byte{} + imageName := "" + + if img.ID != "" { + ref, err := is.Transport.ParseStoreReference(store, "@"+img.ID) + if err != nil { + return nil, errors.Wrapf(err, "no such image %q", "@"+img.ID) + } + src, err2 := ref.NewImage(systemContext) + if err2 != nil { + return nil, errors.Wrapf(err2, "error reading image configuration") + } + defer src.Close() + config, err = src.ConfigBlob() + if err != nil { + return nil, errors.Wrapf(err, "error reading image manfest") + } + manifest, _, err = src.Manifest() + if err != nil { + return nil, errors.Wrapf(err, "error reading image manifest") + } + if len(img.Names) > 0 { + imageName = img.Names[0] + } + } + + ipd := &imagePushData{ + store: store, + FromImage: imageName, + FromImageID: img.ID, + Config: config, + Manifest: manifest, + ImageAnnotations: map[string]string{}, + ImageCreatedBy: "", + } + + ipd.initConfig() + + return ipd, nil +} diff --git a/cmd/kpod/imagePushData_test.go b/cmd/kpod/imagePushData_test.go new file mode 100644 index 00000000..a6c412cc --- /dev/null +++ b/cmd/kpod/imagePushData_test.go @@ -0,0 +1,40 @@ +package main + +import ( + "bytes" + "fmt" +) + +// We have to compare the structs manually because they contain +// []byte variables, which cannot be compared with "==" +func compareImagePushData(a, b *imagePushData) bool { + if a.store != b.store { + fmt.Println("store") + return false + } else if a.Type != b.Type { + fmt.Println("type") + return false + } else if a.FromImage != b.FromImage { + fmt.Println("FromImage") + return false + } else if a.FromImageID != b.FromImageID { + fmt.Println("FromImageID") + return false + } else if !bytes.Equal(a.Config, b.Config) { + fmt.Println("Config") + return false + } else if !bytes.Equal(a.Manifest, b.Manifest) { + fmt.Println("Manifest") + return false + } else if fmt.Sprint(a.ImageAnnotations) != fmt.Sprint(b.ImageAnnotations) { + fmt.Println("Annotations") + return false + } else if a.ImageCreatedBy != b.ImageCreatedBy { + fmt.Println("ImageCreatedBy") + return false + } else if fmt.Sprintf("%+v", a.OCIv1) != fmt.Sprintf("%+v", b.OCIv1) { + fmt.Println("OCIv1") + return false + } + return true +} diff --git a/cmd/kpod/main.go b/cmd/kpod/main.go index 4fc5bb76..6057f85e 100644 --- a/cmd/kpod/main.go +++ b/cmd/kpod/main.go @@ -22,13 +22,14 @@ func main() { app.Version = Version app.Commands = []cli.Command{ + historyCommand, imagesCommand, infoCommand, + pullCommand, + pushCommand, rmiCommand, tagCommand, versionCommand, - pullCommand, - historyCommand, } app.Flags = []cli.Flag{ cli.StringFlag{ @@ -48,7 +49,6 @@ func main() { Usage: "used to pass an option to the storage driver", }, } - if err := app.Run(os.Args); err != nil { logrus.Fatal(err) } diff --git a/cmd/kpod/pull.go b/cmd/kpod/pull.go index 628e5665..a39555d4 100644 --- a/cmd/kpod/pull.go +++ b/cmd/kpod/pull.go @@ -118,8 +118,10 @@ func pullImage(store storage.Store, imgName string, allTags bool, sc *types.Syst if err != nil { return err } + defer policyContext.Destroy() + + copyOptions := getCopyOptions(os.Stdout, "", nil, nil, signingOptions{}) fmt.Println(tag + ": pulling from " + fromName) - - return cp.Image(policyContext, destRef, srcRef, getCopyOptions(os.Stdout)) + return cp.Image(policyContext, destRef, srcRef, copyOptions) } diff --git a/cmd/kpod/push.go b/cmd/kpod/push.go new file mode 100644 index 00000000..e878d4f9 --- /dev/null +++ b/cmd/kpod/push.go @@ -0,0 +1,196 @@ +package main + +import ( + "fmt" + "io" + "os" + "syscall" + + cp "github.com/containers/image/copy" + "github.com/containers/image/manifest" + "github.com/containers/image/transports/alltransports" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +var ( + pushFlags = []cli.Flag{ + cli.BoolFlag{ + Name: "disable-compression, D", + Usage: "don't compress layers", + Hidden: true, + }, + cli.StringFlag{ + Name: "signature-policy", + Usage: "`pathname` of signature policy file (not usually used)", + Hidden: true, + }, + cli.StringFlag{ + Name: "creds", + Usage: "`credentials` (USERNAME:PASSWORD) to use for authenticating to a registry", + }, + cli.StringFlag{ + Name: "cert-dir", + Usage: "`pathname` of a directory containing TLS certificates and keys", + }, + cli.BoolTFlag{ + Name: "tls-verify", + Usage: "require HTTPS and verify certificates when contacting registries (default: true)", + }, + cli.BoolFlag{ + Name: "remove-signatures", + Usage: "discard any pre-existing signatures in the image", + }, + cli.StringFlag{ + Name: "sign-by", + Usage: "add a signature at the destination using the specified key", + }, + cli.BoolFlag{ + Name: "quiet, q", + Usage: "don't output progress information when pushing images", + }, + } + pushDescription = fmt.Sprintf(` + Pushes an image to a specified location. + The Image "DESTINATION" uses a "transport":"details" format. + See kpod-push(1) section "DESTINATION" for the expected format`) + + pushCommand = cli.Command{ + Name: "push", + Usage: "push an image to a specified destination", + Description: pushDescription, + Flags: pushFlags, + Action: pushCmd, + ArgsUsage: "IMAGE DESTINATION", + } +) + +type pushOptions struct { + // Compression specifies the type of compression which is applied to + // layer blobs. The default is to not use compression, but + // archive.Gzip is recommended. + Compression archive.Compression + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // ReportWriter is an io.Writer which will be used to log the writing + // of the new image. + ReportWriter io.Writer + // Store is the local storage store which holds the source image. + Store storage.Store + // DockerRegistryOptions encapsulates settings that affect how we + // connect or authenticate to a remote registry to which we want to + // push the image. + dockerRegistryOptions + // SigningOptions encapsulates settings that control whether or not we + // strip or add signatures to the image when pushing (uploading) the + // image to a registry. + signingOptions +} + +func pushCmd(c *cli.Context) error { + var registryCreds *types.DockerAuthConfig + + args := c.Args() + if len(args) < 2 { + return errors.New("kpod push requires exactly 2 arguments") + } + srcName := c.Args().Get(0) + destName := c.Args().Get(1) + + signaturePolicy := c.String("signature-policy") + compress := archive.Uncompressed + if !c.Bool("disable-compression") { + compress = archive.Gzip + } + registryCredsString := c.String("creds") + certPath := c.String("cert-dir") + skipVerify := !c.BoolT("tls-verify") + removeSignatures := c.Bool("remove-signatures") + signBy := c.String("sign-by") + + if registryCredsString != "" { + creds, err := parseRegistryCreds(registryCredsString) + if err != nil { + return err + } + registryCreds = creds + } + + store, err := getStore(c) + if err != nil { + return err + } + + options := pushOptions{ + Compression: compress, + SignaturePolicyPath: signaturePolicy, + Store: store, + dockerRegistryOptions: dockerRegistryOptions{ + DockerRegistryCreds: registryCreds, + DockerCertPath: certPath, + DockerInsecureSkipTLSVerify: skipVerify, + }, + signingOptions: signingOptions{ + RemoveSignatures: removeSignatures, + SignBy: signBy, + }, + } + if !c.Bool("quiet") { + options.ReportWriter = os.Stderr + } + return pushImage(srcName, destName, options) +} + +func pushImage(srcName, destName string, options pushOptions) error { + if srcName == "" || destName == "" { + return errors.Wrapf(syscall.EINVAL, "source and destination image names must be specified") + } + + // Get the destination Image Reference + dest, err := alltransports.ParseImageName(destName) + if err != nil { + return errors.Wrapf(err, "error getting destination imageReference for %q", destName) + } + + policyContext, err := getPolicyContext(options.SignaturePolicyPath) + if err != nil { + return errors.Wrapf(err, "Could not get default policy context for signature policy path %q", options.SignaturePolicyPath) + } + defer policyContext.Destroy() + // Look up the image name and its layer, then build the imagePushData from + // the image + img, err := findImage(options.Store, srcName) + if err != nil { + return errors.Wrapf(err, "error locating image %q for importing settings", srcName) + } + systemContext := getSystemContext(options.SignaturePolicyPath) + ipd, err := importImagePushDataFromImage(options.Store, img, systemContext) + if err != nil { + return err + } + // Give the image we're producing the same ancestors as its source image + ipd.FromImage = ipd.Docker.ContainerConfig.Image + ipd.FromImageID = string(ipd.Docker.Parent) + + // Prep the layers and manifest for export + src, err := ipd.makeImageRef(manifest.GuessMIMEType(ipd.Manifest), options.Compression, img.Names, img.TopLayer, nil) + if err != nil { + return errors.Wrapf(err, "error copying layers and metadata") + } + + copyOptions := getCopyOptions(options.ReportWriter, options.SignaturePolicyPath, nil, &options.dockerRegistryOptions, options.signingOptions) + + // Copy the image to the remote destination + err = cp.Image(policyContext, dest, src, copyOptions) + if err != nil { + return errors.Wrapf(err, "Error copying image to the remote destination") + } + return nil +} diff --git a/cmd/kpod/push_test.go b/cmd/kpod/push_test.go new file mode 100644 index 00000000..2705ebe6 --- /dev/null +++ b/cmd/kpod/push_test.go @@ -0,0 +1,72 @@ +package main + +import ( + "os/user" + "testing" + + is "github.com/containers/image/storage" +) + +func TestImportImagePushDataFromImage(t *testing.T) { + u, err := user.Current() + if err != nil { + t.Log("Could not determine user. Running as root may cause tests to fail") + } else if u.Uid != "0" { + t.Fatal("tests will fail unless run as root") + } + // Get Store + store, err := getStoreForTests() + if err != nil { + t.Fatalf("could not get store: %q", err) + } + // Pull an image and save it to the store + testImageName := "docker.io/library/busybox:1.26" + err = pullTestImage(testImageName) + if err != nil { + t.Fatalf("could not pull test image: %q", err) + } + img, err := findImage(store, testImageName) + if err != nil { + t.Fatalf("could not find image in store: %q", err) + } + // Get System Context + systemContext := getSystemContext("") + // Call importImagePushDataFromImage + ipd, err := importImagePushDataFromImage(store, img, systemContext) + if err != nil { + t.Fatalf("could not get ImagePushData: %q", err) + } + // Get ref and from it, get the config and the manifest + ref, err := is.Transport.ParseStoreReference(store, "@"+img.ID) + if err != nil { + t.Fatalf("no such image %q", "@"+img.ID) + } + src, err := ref.NewImage(systemContext) + if err != nil { + t.Fatalf("error creating new image from system context: %q", err) + } + defer src.Close() + config, err := src.ConfigBlob() + if err != nil { + t.Fatalf("error reading image config: %q", err) + } + manifest, _, err := src.Manifest() + if err != nil { + t.Fatalf("error reading image manifest: %q", err) + } + //Create "expected" ipd struct + expectedIpd := &imagePushData{ + store: store, + FromImage: testImageName, + FromImageID: img.ID, + Config: config, + Manifest: manifest, + ImageAnnotations: map[string]string{}, + ImageCreatedBy: "", + } + expectedIpd.initConfig() + //Compare structs, error if they are not the same + if !compareImagePushData(ipd, expectedIpd) { + t.Errorf("imagePushData did not match expected imagePushData") + } +} diff --git a/completions/bash/kpod b/completions/bash/kpod index f24ccdd2..687f7e48 100644 --- a/completions/bash/kpod +++ b/completions/bash/kpod @@ -19,6 +19,47 @@ _kpod_images() { --format --filter -f + " + local options_with_args=" + " + + local all_options="$options_with_args $boolean_options" + + case "$cur" in + -*) + COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur")) + ;; + esac + } + +_kpod_launch() { + local options_with_args=" + " + local boolean_options=" + " + _complete_ "$options_with_args" "$boolean_options" +} + +_kpod_pull() { + local options_with_args=" + " + local boolean_options=" + --all-tags -a + " + _complete_ "$options_with_args" "$boolean_options" +} + +_kpod_push() { + local boolean_options=" + --disable-compression + -D + --quiet + -q + --signature-policy + --certs + --tls-verify + --remove-signatures + --sign-by " local options_with_args=" @@ -33,23 +74,6 @@ _kpod_images() { esac } -_complete_() { - local options_with_args=$1 - local boolean_options="$2 -h --help" - - case "$prev" in - $options_with_args) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) - ;; - esac -} - _kpod_rmi() { local boolean_options=" --help @@ -68,14 +92,6 @@ _kpod_rmi() { esac } -_kpod_version() { - local options_with_args=" - " - local boolean_options=" - " - _complete_ "$options_with_args" "$boolean_options" -} - kpod_tag() { local options_with_args=" " @@ -84,13 +100,29 @@ kpod_tag() { _complete_ "$options_with_args" "$boolean_options" } -_kpod_pull() { - local options_with_args=" - " - local boolean_options=" - --all-tags -a - " - _complete_ "$options_with_args" "$boolean_options" +_kpod_version() { + local options_with_args=" + " + local boolean_options=" + " + _complete_ "$options_with_args" "$boolean_options" +} + +_complete_() { + local options_with_args=$1 + local boolean_options="$2 -h --help" + + case "$prev" in + $options_with_args) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + esac } _kpod_history() { @@ -123,8 +155,10 @@ _kpod_kpod() { --help -h " commands=" - images - rmi + images + launch + push + rmi tag version pull diff --git a/docs/kpod-push.1.md b/docs/kpod-push.1.md new file mode 100644 index 00000000..6a2fc1c3 --- /dev/null +++ b/docs/kpod-push.1.md @@ -0,0 +1,104 @@ +## kpod-push "1" "June 2017" "kpod" + +## NAME +kpod push - Push an image from local storage to elsewhere. + +## SYNOPSIS +**kpod** **push** [*options* [...]] **imageID** [**destination**] + +## DESCRIPTION +Pushes an image from local storage to a specified destination, decompressing +and recompessing layers as needed. + +## imageID +Image stored in local container/storage + +## DESTINATION + + The DESTINATION is a location to store container images + The Image "DESTINATION" uses a "transport":"details" format. + + Multiple transports are supported: + + **atomic:**_hostname_**/**_namespace_**/**_stream_**:**_tag_ + An image served by an OpenShift(Atomic) Registry server. The current OpenShift project and OpenShift Registry instance are by default read from `$HOME/.kube/config`, which is set e.g. using `(oc login)`. + + **dir:**_path_ + An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection. + + **docker://**_docker-reference_ + An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$HOME/.docker/config.json`, which is set e.g. using `(docker login)`. + + **docker-archive:**_path_[**:**_docker-reference_] + An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest. + + **docker-daemon:**_docker-reference_ + An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID). + + **oci:**_path_**:**_tag_ + An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_. + + **ostree:**_image_[**@**_/absolute/repo/path_] + An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_. + +## OPTIONS + +**--creds="CREDENTIALS"** + +Credentials (USERNAME:PASSWORD) to use for authenticating to a registry + +**cert-dir="PATHNAME"** + +Pathname of a directory containing TLS certificates and keys + +**--disable-compression, -D** + +Don't compress copies of filesystem layers which will be pushed + +**--quiet, -q** + +When writing the output image, suppress progress output + +**--remove-signatures** + +Discard any pre-existing signatures in the image + +**--signature-policy="PATHNAME"** + +Pathname of a signature policy file to use. It is not recommended that this +option be used, as the default behavior of using the system-wide default policy +(frequently */etc/containers/policy.json*) is most often preferred + +**--sign-by="KEY"** + +Add a signature at the destination using the specified key + +**--tls-verify** + +Require HTTPS and verify certificates when contacting registries (default: true) + +## EXAMPLE + +This example extracts the imageID image to a local directory in docker format. + + `# kpod push imageID dir:/path/to/image` + +This example extracts the imageID image to a local directory in oci format. + + `# kpod push imageID oci:/path/to/layout` + +This example extracts the imageID image to a container registry named registry.example.com + + `# kpod push imageID docker://registry.example.com/repository:tag` + +This example extracts the imageID image and puts into the local docker container store + + `# kpod push imageID docker-daemon:image:tag` + +This example extracts the imageID image and pushes it to an OpenShift(Atomic) registry + + `# kpod push imageID atomic:registry.example.com/company/image:tag` + + +## SEE ALSO +kpod(1) diff --git a/kpod-push.1.md b/kpod-push.1.md new file mode 100644 index 00000000..fc99f215 --- /dev/null +++ b/kpod-push.1.md @@ -0,0 +1,47 @@ +## kpod-push "1" "June 2017" "kpod" + +## NAME +kpod push - push an image to a specified location + +## SYNOPSIS +**kpod** **push** [*options* [...]] **imageID [...]** **TRANSPORT:REFERENCE** + +## DESCRIPTION +Pushes an image to a specified location + +## OPTIONS + +**disable-compression, D** + Don't compress layers + +**signature-policy**="" + Pathname of signature policy file (not usually used) + +**creds**="" + Credentials (USERNAME:PASSWORD) to use for authenticating to a registry + +**cert-dir**="" + Pathname of a directory containing TLS certificates and keys + +**tls-verify**=[true|false] + Require HTTPS and verify certificates when contacting registries (default: true) + +**remove-signatures** + Discard any pre-existing signatures in the image + +**sign-by**="" + Add a signature at the destination using the specified key + +**quiet, q** + Don't output progress information when pushing images + +## EXAMPLE + +kpod push fedora:25 containers-storage:[overlay2@/var/lib/containers/storage]fedora + +kpod push --disable-compression busybox:latest dir:/tmp/busybox + +kpod push --creds=myusername:password123 redis:alpine docker://myusername/redis:alpine + +## SEE ALSO +kpod(1) diff --git a/server/server.go b/server/server.go index d55564e9..c18bff5d 100644 --- a/server/server.go +++ b/server/server.go @@ -54,7 +54,8 @@ type streamService struct { // Server implements the RuntimeService and ImageService type Server struct { libkpod.ContainerServer - config Config + config Config + storageRuntimeServer storage.RuntimeServer stateLock sync.Locker updateLock sync.RWMutex diff --git a/test/kpod.bats b/test/kpod.bats index acd31d0d..7e8fce32 100644 --- a/test/kpod.bats +++ b/test/kpod.bats @@ -2,11 +2,15 @@ load helpers -IMAGE="alpine" +IMAGE="alpine:latest" ROOT="$TESTDIR/crio" RUNROOT="$TESTDIR/crio-run" KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT --storage-driver vfs" +function teardown() { + cleanup_test +} + @test "kpod version test" { run ${KPOD_BINARY} version echo "$output" @@ -117,3 +121,72 @@ KPOD_OPTIONS="--root $ROOT --runroot $RUNROOT --storage-driver vfs" run ${KPOD_BINARY} $KPOD_OPTIONS rmi $IMAGE [ "$status" -eq 0 ] } + +@test "kpod push to containers/storage" { + run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" containers-storage:[$ROOT]busybox:test + echo "$output" + [ "$status" -eq 0 ] + run crioctl image remove "$IMAGE" + run crioctl image remove busybox:test + stop_crio +} + +@test "kpod push to directory" { + run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + run mkdir /tmp/busybox + echo "$output" + [ "$status" -eq 0 ] + run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" dir:/tmp/busybox + echo "$output" + [ "$status" -eq 0 ] + run crioctl image remove "$IMAGE" + run rm -rf /tmp/busybox + stop_crio +} + +@test "kpod push to docker archive" { + run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + run ${KPOD_BINARY} $KPOD_OPTIONS push "$IMAGE" docker-archive:/tmp/busybox-archive:1.26 + echo "$output" + [ "$status" -eq 0 ] + rm /tmp/busybox-archive + run crioctl image remove "$IMAGE" + stop_crio +} + +@test "kpod push to oci without compression" { + run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + run mkdir /tmp/oci-busybox + echo "$output" + [ "$status" -eq 0 ] + run ${KPOD_BINARY} $KPOD_OPTIONS push --disable-compression "$IMAGE" oci:/tmp/oci-busybox + echo "$output" + [ "$status" -eq 0 ] + run rm -rf /tmp/oci-busybox + run crioctl image remove "$IMAGE" + stop_crio +} + +@test "kpod push without signatures" { + run ${KPOD_BINARY} $KPOD_OPTIONS pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + run mkdir /tmp/busybox + echo "$output" + [ "$status" -eq 0 ] + run ${KPOD_BINARY} $KPOD_OPTIONS push --remove-signatures "$IMAGE" dir:/tmp/busybox + echo "$output" + [ "$status" -eq 0 ] + run rm -rf /tmp/busybox + run crioctl image remove "$IMAGE" + stop_crio +}