Move libkpod/image libkpod/layer to libpod/images and libpod/layers
Begin moving image and layer handling out of libkpod into libpod. Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
parent
47ef2f66df
commit
e18e962238
26 changed files with 272 additions and 104 deletions
|
@ -1,98 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
cp "github.com/containers/image/copy"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoPassword is returned if the user did not supply a password
|
||||
ErrNoPassword = errors.Wrapf(syscall.EINVAL, "password was not supplied")
|
||||
)
|
||||
|
||||
// GetCopyOptions constructs a new containers/image/copy.Options{} struct from the given parameters
|
||||
func GetCopyOptions(reportWriter io.Writer, signaturePolicyPath string, srcDockerRegistry, destDockerRegistry *DockerRegistryOptions, signing SigningOptions) *cp.Options {
|
||||
if srcDockerRegistry == nil {
|
||||
srcDockerRegistry = &DockerRegistryOptions{}
|
||||
}
|
||||
if destDockerRegistry == nil {
|
||||
destDockerRegistry = &DockerRegistryOptions{}
|
||||
}
|
||||
srcContext := srcDockerRegistry.GetSystemContext(signaturePolicyPath)
|
||||
destContext := destDockerRegistry.GetSystemContext(signaturePolicyPath)
|
||||
return &cp.Options{
|
||||
RemoveSignatures: signing.RemoveSignatures,
|
||||
SignBy: signing.SignBy,
|
||||
ReportWriter: reportWriter,
|
||||
SourceCtx: srcContext,
|
||||
DestinationCtx: destContext,
|
||||
}
|
||||
}
|
||||
|
||||
// GetSystemContext Constructs a new containers/image/types.SystemContext{} struct from the given signaturePolicy path
|
||||
func GetSystemContext(signaturePolicyPath string) *types.SystemContext {
|
||||
sc := &types.SystemContext{}
|
||||
if signaturePolicyPath != "" {
|
||||
sc.SignaturePolicyPath = signaturePolicyPath
|
||||
}
|
||||
return sc
|
||||
}
|
||||
|
||||
// CopyStringStringMap deep copies a map[string]string and returns the result
|
||||
func CopyStringStringMap(m map[string]string) map[string]string {
|
||||
n := map[string]string{}
|
||||
for k, v := range m {
|
||||
n[k] = v
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// IsTrue determines whether the given string equals "true"
|
||||
func IsTrue(str string) bool {
|
||||
return str == "true"
|
||||
}
|
||||
|
||||
// IsFalse determines whether the given string equals "false"
|
||||
func IsFalse(str string) bool {
|
||||
return str == "false"
|
||||
}
|
||||
|
||||
// IsValidBool determines whether the given string equals "true" or "false"
|
||||
func IsValidBool(str string) bool {
|
||||
return IsTrue(str) || IsFalse(str)
|
||||
}
|
||||
|
||||
// GetPolicyContext creates a signature policy context for the given signature policy path
|
||||
func GetPolicyContext(path string) (*signature.PolicyContext, error) {
|
||||
policy, err := signature.DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signature.NewPolicyContext(policy)
|
||||
}
|
||||
|
||||
// ParseRegistryCreds takes a credentials string in the form USERNAME:PASSWORD
|
||||
// and returns a DockerAuthConfig
|
||||
func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) {
|
||||
if creds == "" {
|
||||
return nil, errors.New("no credentials supplied")
|
||||
}
|
||||
if !strings.Contains(creds, ":") {
|
||||
return &types.DockerAuthConfig{
|
||||
Username: creds,
|
||||
Password: "",
|
||||
}, ErrNoPassword
|
||||
}
|
||||
v := strings.SplitN(creds, ":", 2)
|
||||
cfg := &types.DockerAuthConfig{
|
||||
Username: v[0],
|
||||
Password: v[1],
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
package common
|
||||
|
||||
import "github.com/containers/image/types"
|
||||
|
||||
// DockerRegistryOptions encapsulates settings that affect how we connect or
|
||||
// authenticate to a remote registry.
|
||||
type DockerRegistryOptions struct {
|
||||
// DockerRegistryCreds is the user name and password to supply in case
|
||||
// we need to pull an image from a registry, and it requires us to
|
||||
// authenticate.
|
||||
DockerRegistryCreds *types.DockerAuthConfig
|
||||
// DockerCertPath is the location of a directory containing CA
|
||||
// certificates which will be used to verify the registry's certificate
|
||||
// (all files with names ending in ".crt"), and possibly client
|
||||
// certificates and private keys (pairs of files with the same name,
|
||||
// except for ".cert" and ".key" suffixes).
|
||||
DockerCertPath string
|
||||
// DockerInsecureSkipTLSVerify turns off verification of TLS
|
||||
// certificates and allows connecting to registries without encryption.
|
||||
DockerInsecureSkipTLSVerify bool
|
||||
}
|
||||
|
||||
// GetSystemContext constructs a new system context from the given signaturePolicy path and the
|
||||
// values in the DockerRegistryOptions
|
||||
func (o DockerRegistryOptions) GetSystemContext(signaturePolicyPath string) *types.SystemContext {
|
||||
sc := &types.SystemContext{
|
||||
SignaturePolicyPath: signaturePolicyPath,
|
||||
DockerAuthConfig: o.DockerRegistryCreds,
|
||||
DockerCertPath: o.DockerCertPath,
|
||||
DockerInsecureSkipTLSVerify: o.DockerInsecureSkipTLSVerify,
|
||||
}
|
||||
return sc
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
package common
|
|
@ -1,10 +0,0 @@
|
|||
package common
|
||||
|
||||
// SigningOptions encapsulates settings that control whether or not we strip or
|
||||
// add signatures to images when writing them.
|
||||
type SigningOptions struct {
|
||||
// RemoveSignatures directs us to remove any signatures which are already present.
|
||||
RemoveSignatures bool
|
||||
// SignBy is a key identifier of some kind, indicating that a signature should be generated using the specified private key and stored with the image.
|
||||
SignBy string
|
||||
}
|
|
@ -7,8 +7,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/fields"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/driver"
|
||||
libkpodimage "github.com/kubernetes-incubator/cri-o/libkpod/image"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/driver"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/images"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
@ -77,7 +77,7 @@ func (c *ContainerServer) GetContainerData(name string, size bool) (*ContainerDa
|
|||
if container.ImageID == "" {
|
||||
return nil, errors.Errorf("error reading container image data: container is not based on an image")
|
||||
}
|
||||
imageData, err := libkpodimage.GetData(c.store, container.ImageID)
|
||||
imageData, err := images.GetData(c.store, container.ImageID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading container image data")
|
||||
}
|
||||
|
|
|
@ -2,8 +2,8 @@ package libkpod
|
|||
|
||||
import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/image"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/layer"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/images"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/layers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -28,11 +28,11 @@ func (c *ContainerServer) GetDiff(from, to string) ([]archive.Change, error) {
|
|||
// If the id matches a layer, the top layer id is returned
|
||||
func (c *ContainerServer) getLayerID(id string) (string, error) {
|
||||
var toLayer string
|
||||
toImage, err := image.FindImage(c.store, id)
|
||||
toImage, err := images.FindImage(c.store, id)
|
||||
if err != nil {
|
||||
toCtr, err := c.store.Container(id)
|
||||
if err != nil {
|
||||
toLayer, err = layer.FullID(c.store, id)
|
||||
toLayer, err = layers.FullID(c.store, id)
|
||||
if err != nil {
|
||||
return "", errors.Errorf("layer, image, or container %s does not exist", id)
|
||||
}
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
package driver
|
||||
|
||||
import cstorage "github.com/containers/storage"
|
||||
|
||||
// Data handles the data for a storage driver
|
||||
type Data struct {
|
||||
Name string
|
||||
Data map[string]string
|
||||
}
|
||||
|
||||
// GetDriverName returns the name of the driver for the given store
|
||||
func GetDriverName(store cstorage.Store) (string, error) {
|
||||
driver, err := store.GraphDriver()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return driver.String(), nil
|
||||
}
|
||||
|
||||
// GetDriverMetadata returns the metadata regarding the driver for the layer in the given store
|
||||
func GetDriverMetadata(store cstorage.Store, layerID string) (map[string]string, error) {
|
||||
driver, err := store.GraphDriver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return driver.Metadata(layerID)
|
||||
}
|
|
@ -1,185 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
cp "github.com/containers/image/copy"
|
||||
"github.com/containers/image/docker/tarfile"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/signature"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/common"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultRegistry is a prefix that we apply to an image name
|
||||
// to check docker hub first for the image
|
||||
DefaultRegistry = "docker://"
|
||||
)
|
||||
|
||||
// CopyOptions contains the options given when pushing or pulling images
|
||||
type CopyOptions struct {
|
||||
// Compression specifies the type of compression which is applied to
|
||||
// layer blobs. The default is to not use compression, but
|
||||
// archive.Gzip is recommended.
|
||||
Compression archive.Compression
|
||||
// SignaturePolicyPath specifies an override location for the signature
|
||||
// policy which should be used for verifying the new image as it is
|
||||
// being written. Except in specific circumstances, no value should be
|
||||
// specified, indicating that the shared, system-wide default policy
|
||||
// should be used.
|
||||
SignaturePolicyPath string
|
||||
// ReportWriter is an io.Writer which will be used to log the writing
|
||||
// of the new image.
|
||||
ReportWriter io.Writer
|
||||
// Store is the local storage store which holds the source image.
|
||||
Store storage.Store
|
||||
// DockerRegistryOptions encapsulates settings that affect how we
|
||||
// connect or authenticate to a remote registry to which we want to
|
||||
// push the image.
|
||||
common.DockerRegistryOptions
|
||||
// SigningOptions encapsulates settings that control whether or not we
|
||||
// strip or add signatures to the image when pushing (uploading) the
|
||||
// image to a registry.
|
||||
common.SigningOptions
|
||||
}
|
||||
|
||||
// PushImage pushes the src image to the destination
|
||||
func PushImage(srcName, destName string, options CopyOptions) error {
|
||||
if srcName == "" || destName == "" {
|
||||
return errors.Wrapf(syscall.EINVAL, "source and destination image names must be specified")
|
||||
}
|
||||
|
||||
// Get the destination Image Reference
|
||||
dest, err := alltransports.ParseImageName(destName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting destination imageReference for %q", destName)
|
||||
}
|
||||
|
||||
policyContext, err := common.GetPolicyContext(options.SignaturePolicyPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Could not get default policy context for signature policy path %q", options.SignaturePolicyPath)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
// Look up the image name and its layer, then build the imagePushData from
|
||||
// the image
|
||||
img, err := FindImage(options.Store, srcName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error locating image %q for importing settings", srcName)
|
||||
}
|
||||
systemContext := common.GetSystemContext(options.SignaturePolicyPath)
|
||||
cd, err := ImportCopyDataFromImage(options.Store, systemContext, img.ID, "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Give the image we're producing the same ancestors as its source image
|
||||
cd.FromImage = cd.Docker.ContainerConfig.Image
|
||||
cd.FromImageID = string(cd.Docker.Parent)
|
||||
|
||||
// Prep the layers and manifest for export
|
||||
src, err := cd.MakeImageRef(manifest.GuessMIMEType(cd.Manifest), options.Compression, img.Names, img.TopLayer, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying layers and metadata")
|
||||
}
|
||||
|
||||
copyOptions := common.GetCopyOptions(options.ReportWriter, options.SignaturePolicyPath, nil, &options.DockerRegistryOptions, options.SigningOptions)
|
||||
|
||||
// Copy the image to the remote destination
|
||||
err = cp.Image(policyContext, dest, src, copyOptions)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error copying image to the remote destination")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PullImage copies the image from the source to the destination
|
||||
func PullImage(store storage.Store, imgName string, allTags, quiet bool, sc *types.SystemContext) error {
|
||||
var (
|
||||
images []string
|
||||
output io.Writer
|
||||
)
|
||||
|
||||
if quiet {
|
||||
output = nil
|
||||
} else {
|
||||
output = os.Stdout
|
||||
}
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(imgName)
|
||||
if err != nil {
|
||||
defaultName := DefaultRegistry + imgName
|
||||
srcRef2, err2 := alltransports.ParseImageName(defaultName)
|
||||
if err2 != nil {
|
||||
return errors.Errorf("error parsing image name %q: %v", defaultName, err2)
|
||||
}
|
||||
srcRef = srcRef2
|
||||
}
|
||||
|
||||
splitArr := strings.Split(imgName, ":")
|
||||
|
||||
// supports pulling from docker-archive, oci, and registries
|
||||
if splitArr[0] == "docker-archive" {
|
||||
tarSource := tarfile.NewSource(splitArr[len(splitArr)-1])
|
||||
manifest, err := tarSource.LoadTarManifest()
|
||||
if err != nil {
|
||||
return errors.Errorf("error retrieving manifest.json: %v", err)
|
||||
}
|
||||
// to pull all the images stored in one tar file
|
||||
for i := range manifest {
|
||||
if manifest[i].RepoTags != nil {
|
||||
images = append(images, manifest[i].RepoTags[0])
|
||||
} else {
|
||||
// create an image object and use the hex value of the digest as the image ID
|
||||
// for parsing the store reference
|
||||
newImg, err := srcRef.NewImage(sc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer newImg.Close()
|
||||
digest := newImg.ConfigInfo().Digest
|
||||
if err := digest.Validate(); err == nil {
|
||||
images = append(images, "@"+digest.Hex())
|
||||
} else {
|
||||
return errors.Wrapf(err, "error getting config info")
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if splitArr[0] == "oci" {
|
||||
// needs to be implemented in future
|
||||
return errors.Errorf("oci not supported")
|
||||
} else {
|
||||
images = append(images, imgName)
|
||||
}
|
||||
|
||||
policy, err := signature.DefaultPolicy(sc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policyContext, err := signature.NewPolicyContext(policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
copyOptions := common.GetCopyOptions(output, "", nil, nil, common.SigningOptions{})
|
||||
|
||||
for _, image := range images {
|
||||
destRef, err := is.Transport.ParseStoreReference(store, image)
|
||||
if err != nil {
|
||||
return errors.Errorf("error parsing dest reference name: %v", err)
|
||||
}
|
||||
if err = cp.Image(policyContext, destRef, srcRef, copyOptions); err != nil {
|
||||
return errors.Errorf("error loading image %q: %v", image, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,552 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/kubernetes-incubator/cri-o/cmd/kpod/docker"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/common"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// Package is used to identify working containers
|
||||
Package = "kpod"
|
||||
containerType = Package + " 0.0.1"
|
||||
stateFile = Package + ".json"
|
||||
// OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,
|
||||
// suitable for specifying as a value of the PreferredManifestType
|
||||
// member of a CommitOptions structure. It is also the default.
|
||||
OCIv1ImageManifest = v1.MediaTypeImageManifest
|
||||
)
|
||||
|
||||
// CopyData stores the basic data used when copying a container or image
|
||||
type CopyData struct {
|
||||
store storage.Store
|
||||
|
||||
// Type is used to help identify a build container's metadata. It
|
||||
// should not be modified.
|
||||
Type string `json:"type"`
|
||||
// FromImage is the name of the source image which was used to create
|
||||
// the container, if one was used. It should not be modified.
|
||||
FromImage string `json:"image,omitempty"`
|
||||
// FromImageID is the ID of the source image which was used to create
|
||||
// the container, if one was used. It should not be modified.
|
||||
FromImageID string `json:"image-id"`
|
||||
// Config is the source image's configuration. It should not be
|
||||
// modified.
|
||||
Config []byte `json:"config,omitempty"`
|
||||
// Manifest is the source image's manifest. It should not be modified.
|
||||
Manifest []byte `json:"manifest,omitempty"`
|
||||
|
||||
// Container is the name of the build container. It should not be modified.
|
||||
Container string `json:"container-name,omitempty"`
|
||||
// ContainerID is the ID of the build container. It should not be modified.
|
||||
ContainerID string `json:"container-id,omitempty"`
|
||||
// MountPoint is the last location where the container's root
|
||||
// filesystem was mounted. It should not be modified.
|
||||
MountPoint string `json:"mountpoint,omitempty"`
|
||||
|
||||
// ImageAnnotations is a set of key-value pairs which is stored in the
|
||||
// image's manifest.
|
||||
ImageAnnotations map[string]string `json:"annotations,omitempty"`
|
||||
// ImageCreatedBy is a description of how this container was built.
|
||||
ImageCreatedBy string `json:"created-by,omitempty"`
|
||||
|
||||
// Image metadata and runtime settings, in multiple formats.
|
||||
OCIv1 v1.Image `json:"ociv1,omitempty"`
|
||||
Docker docker.V2Image `json:"docker,omitempty"`
|
||||
}
|
||||
|
||||
func (c *CopyData) initConfig() {
|
||||
image := ociv1.Image{}
|
||||
dimage := docker.V2Image{}
|
||||
if len(c.Config) > 0 {
|
||||
// Try to parse the image config. If we fail, try to start over from scratch
|
||||
if err := json.Unmarshal(c.Config, &dimage); err == nil && dimage.DockerVersion != "" {
|
||||
image, err = makeOCIv1Image(&dimage)
|
||||
if err != nil {
|
||||
image = ociv1.Image{}
|
||||
}
|
||||
} else {
|
||||
if err := json.Unmarshal(c.Config, &image); err != nil {
|
||||
if dimage, err = makeDockerV2S2Image(&image); err != nil {
|
||||
dimage = docker.V2Image{}
|
||||
}
|
||||
}
|
||||
}
|
||||
c.OCIv1 = image
|
||||
c.Docker = dimage
|
||||
} else {
|
||||
// Try to dig out the image configuration from the manifest
|
||||
manifest := docker.V2S1Manifest{}
|
||||
if err := json.Unmarshal(c.Manifest, &manifest); err == nil && manifest.SchemaVersion == 1 {
|
||||
if dimage, err = makeDockerV2S1Image(manifest); err == nil {
|
||||
if image, err = makeOCIv1Image(&dimage); err != nil {
|
||||
image = ociv1.Image{}
|
||||
}
|
||||
}
|
||||
}
|
||||
c.OCIv1 = image
|
||||
c.Docker = dimage
|
||||
}
|
||||
|
||||
if len(c.Manifest) > 0 {
|
||||
// Attempt to recover format-specific data from the manifest
|
||||
v1Manifest := ociv1.Manifest{}
|
||||
if json.Unmarshal(c.Manifest, &v1Manifest) == nil {
|
||||
c.ImageAnnotations = v1Manifest.Annotations
|
||||
}
|
||||
}
|
||||
|
||||
c.fixupConfig()
|
||||
}
|
||||
|
||||
func (c *CopyData) fixupConfig() {
|
||||
if c.Docker.Config != nil {
|
||||
// Prefer image-level settings over those from the container it was built from
|
||||
c.Docker.ContainerConfig = *c.Docker.Config
|
||||
}
|
||||
c.Docker.Config = &c.Docker.ContainerConfig
|
||||
c.Docker.DockerVersion = ""
|
||||
now := time.Now().UTC()
|
||||
if c.Docker.Created.IsZero() {
|
||||
c.Docker.Created = now
|
||||
}
|
||||
if c.OCIv1.Created.IsZero() {
|
||||
c.OCIv1.Created = &now
|
||||
}
|
||||
if c.OS() == "" {
|
||||
c.SetOS(runtime.GOOS)
|
||||
}
|
||||
if c.Architecture() == "" {
|
||||
c.SetArchitecture(runtime.GOARCH)
|
||||
}
|
||||
if c.WorkDir() == "" {
|
||||
c.SetWorkDir(string(filepath.Separator))
|
||||
}
|
||||
}
|
||||
|
||||
// OS returns a name of the OS on which a container built using this image
|
||||
//is intended to be run.
|
||||
func (c *CopyData) OS() string {
|
||||
return c.OCIv1.OS
|
||||
}
|
||||
|
||||
// SetOS sets the name of the OS on which a container built using this image
|
||||
// is intended to be run.
|
||||
func (c *CopyData) SetOS(os string) {
|
||||
c.OCIv1.OS = os
|
||||
c.Docker.OS = os
|
||||
}
|
||||
|
||||
// Architecture returns a name of the architecture on which a container built
|
||||
// using this image is intended to be run.
|
||||
func (c *CopyData) Architecture() string {
|
||||
return c.OCIv1.Architecture
|
||||
}
|
||||
|
||||
// SetArchitecture sets the name of the architecture on which ta container built
|
||||
// using this image is intended to be run.
|
||||
func (c *CopyData) SetArchitecture(arch string) {
|
||||
c.OCIv1.Architecture = arch
|
||||
c.Docker.Architecture = arch
|
||||
}
|
||||
|
||||
// WorkDir returns the default working directory for running commands in a container
|
||||
// built using this image.
|
||||
func (c *CopyData) WorkDir() string {
|
||||
return c.OCIv1.Config.WorkingDir
|
||||
}
|
||||
|
||||
// SetWorkDir sets the location of the default working directory for running commands
|
||||
// in a container built using this image.
|
||||
func (c *CopyData) SetWorkDir(there string) {
|
||||
c.OCIv1.Config.WorkingDir = there
|
||||
c.Docker.Config.WorkingDir = there
|
||||
}
|
||||
|
||||
// makeOCIv1Image builds the best OCIv1 image structure we can from the
|
||||
// contents of the docker image structure.
|
||||
func makeOCIv1Image(dimage *docker.V2Image) (ociv1.Image, error) {
|
||||
config := dimage.Config
|
||||
if config == nil {
|
||||
config = &dimage.ContainerConfig
|
||||
}
|
||||
dimageCreatedTime := dimage.Created.UTC()
|
||||
image := ociv1.Image{
|
||||
Created: &dimageCreatedTime,
|
||||
Author: dimage.Author,
|
||||
Architecture: dimage.Architecture,
|
||||
OS: dimage.OS,
|
||||
Config: ociv1.ImageConfig{
|
||||
User: config.User,
|
||||
ExposedPorts: map[string]struct{}{},
|
||||
Env: config.Env,
|
||||
Entrypoint: config.Entrypoint,
|
||||
Cmd: config.Cmd,
|
||||
Volumes: config.Volumes,
|
||||
WorkingDir: config.WorkingDir,
|
||||
Labels: config.Labels,
|
||||
},
|
||||
RootFS: ociv1.RootFS{
|
||||
Type: "",
|
||||
DiffIDs: []digest.Digest{},
|
||||
},
|
||||
History: []ociv1.History{},
|
||||
}
|
||||
for port, what := range config.ExposedPorts {
|
||||
image.Config.ExposedPorts[string(port)] = what
|
||||
}
|
||||
RootFS := docker.V2S2RootFS{}
|
||||
if dimage.RootFS != nil {
|
||||
RootFS = *dimage.RootFS
|
||||
}
|
||||
if RootFS.Type == docker.TypeLayers {
|
||||
image.RootFS.Type = docker.TypeLayers
|
||||
for _, id := range RootFS.DiffIDs {
|
||||
image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, digest.Digest(id.String()))
|
||||
}
|
||||
}
|
||||
for _, history := range dimage.History {
|
||||
historyCreatedTime := history.Created.UTC()
|
||||
ohistory := ociv1.History{
|
||||
Created: &historyCreatedTime,
|
||||
CreatedBy: history.CreatedBy,
|
||||
Author: history.Author,
|
||||
Comment: history.Comment,
|
||||
EmptyLayer: history.EmptyLayer,
|
||||
}
|
||||
image.History = append(image.History, ohistory)
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// makeDockerV2S2Image builds the best docker image structure we can from the
|
||||
// contents of the OCI image structure.
|
||||
func makeDockerV2S2Image(oimage *ociv1.Image) (docker.V2Image, error) {
|
||||
image := docker.V2Image{
|
||||
V1Image: docker.V1Image{Created: oimage.Created.UTC(),
|
||||
Author: oimage.Author,
|
||||
Architecture: oimage.Architecture,
|
||||
OS: oimage.OS,
|
||||
ContainerConfig: docker.Config{
|
||||
User: oimage.Config.User,
|
||||
ExposedPorts: docker.PortSet{},
|
||||
Env: oimage.Config.Env,
|
||||
Entrypoint: oimage.Config.Entrypoint,
|
||||
Cmd: oimage.Config.Cmd,
|
||||
Volumes: oimage.Config.Volumes,
|
||||
WorkingDir: oimage.Config.WorkingDir,
|
||||
Labels: oimage.Config.Labels,
|
||||
},
|
||||
},
|
||||
RootFS: &docker.V2S2RootFS{
|
||||
Type: "",
|
||||
DiffIDs: []digest.Digest{},
|
||||
},
|
||||
History: []docker.V2S2History{},
|
||||
}
|
||||
for port, what := range oimage.Config.ExposedPorts {
|
||||
image.ContainerConfig.ExposedPorts[docker.Port(port)] = what
|
||||
}
|
||||
if oimage.RootFS.Type == docker.TypeLayers {
|
||||
image.RootFS.Type = docker.TypeLayers
|
||||
for _, id := range oimage.RootFS.DiffIDs {
|
||||
d, err := digest.Parse(id.String())
|
||||
if err != nil {
|
||||
return docker.V2Image{}, err
|
||||
}
|
||||
image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, d)
|
||||
}
|
||||
}
|
||||
for _, history := range oimage.History {
|
||||
dhistory := docker.V2S2History{
|
||||
Created: history.Created.UTC(),
|
||||
CreatedBy: history.CreatedBy,
|
||||
Author: history.Author,
|
||||
Comment: history.Comment,
|
||||
EmptyLayer: history.EmptyLayer,
|
||||
}
|
||||
image.History = append(image.History, dhistory)
|
||||
}
|
||||
image.Config = &image.ContainerConfig
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// makeDockerV2S1Image builds the best docker image structure we can from the
|
||||
// contents of the V2S1 image structure.
|
||||
func makeDockerV2S1Image(manifest docker.V2S1Manifest) (docker.V2Image, error) {
|
||||
// Treat the most recent (first) item in the history as a description of the image.
|
||||
if len(manifest.History) == 0 {
|
||||
return docker.V2Image{}, errors.Errorf("error parsing image configuration from manifest")
|
||||
}
|
||||
dimage := docker.V2Image{}
|
||||
err := json.Unmarshal([]byte(manifest.History[0].V1Compatibility), &dimage)
|
||||
if err != nil {
|
||||
return docker.V2Image{}, err
|
||||
}
|
||||
if dimage.DockerVersion == "" {
|
||||
return docker.V2Image{}, errors.Errorf("error parsing image configuration from history")
|
||||
}
|
||||
// The DiffID list is intended to contain the sums of _uncompressed_ blobs, and these are most
|
||||
// likely compressed, so leave the list empty to avoid potential confusion later on. We can
|
||||
// construct a list with the correct values when we prep layers for pushing, so we don't lose.
|
||||
// information by leaving this part undone.
|
||||
rootFS := &docker.V2S2RootFS{
|
||||
Type: docker.TypeLayers,
|
||||
DiffIDs: []digest.Digest{},
|
||||
}
|
||||
// Build a filesystem history.
|
||||
history := []docker.V2S2History{}
|
||||
for i := range manifest.History {
|
||||
h := docker.V2S2History{
|
||||
Created: time.Now().UTC(),
|
||||
Author: "",
|
||||
CreatedBy: "",
|
||||
Comment: "",
|
||||
EmptyLayer: false,
|
||||
}
|
||||
dcompat := docker.V1Compatibility{}
|
||||
if err2 := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), &dcompat); err2 == nil {
|
||||
h.Created = dcompat.Created.UTC()
|
||||
h.Author = dcompat.Author
|
||||
h.Comment = dcompat.Comment
|
||||
if len(dcompat.ContainerConfig.Cmd) > 0 {
|
||||
h.CreatedBy = fmt.Sprintf("%v", dcompat.ContainerConfig.Cmd)
|
||||
}
|
||||
h.EmptyLayer = dcompat.ThrowAway
|
||||
}
|
||||
// Prepend this layer to the list, because a v2s1 format manifest's list is in reverse order
|
||||
// compared to v2s2, which lists earlier layers before later ones.
|
||||
history = append([]docker.V2S2History{h}, history...)
|
||||
}
|
||||
dimage.RootFS = rootFS
|
||||
dimage.History = history
|
||||
return dimage, nil
|
||||
}
|
||||
|
||||
// Annotations gets the anotations of the container or image
|
||||
func (c *CopyData) Annotations() map[string]string {
|
||||
return common.CopyStringStringMap(c.ImageAnnotations)
|
||||
}
|
||||
|
||||
// Save the CopyData to disk
|
||||
func (c *CopyData) Save() error {
|
||||
buildstate, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cdir, err := c.store.ContainerDirectory(c.ContainerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600)
|
||||
|
||||
}
|
||||
|
||||
// GetContainerCopyData gets the copy data for a container
|
||||
func GetContainerCopyData(store storage.Store, name string) (*CopyData, error) {
|
||||
var data *CopyData
|
||||
var err error
|
||||
if name != "" {
|
||||
data, err = openCopyData(store, name)
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
data, err = importCopyData(store, name, "")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading build container")
|
||||
}
|
||||
if data == nil {
|
||||
return nil, errors.Errorf("error finding build container")
|
||||
}
|
||||
return data, nil
|
||||
|
||||
}
|
||||
|
||||
// GetImageCopyData gets the copy data for an image
|
||||
func GetImageCopyData(store storage.Store, image string) (*CopyData, error) {
|
||||
if image == "" {
|
||||
return nil, errors.Errorf("image name must be specified")
|
||||
}
|
||||
img, err := FindImage(store, image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error locating image %q for importing settings", image)
|
||||
}
|
||||
|
||||
systemContext := common.GetSystemContext("")
|
||||
data, err := ImportCopyDataFromImage(store, systemContext, img.ID, "", "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image")
|
||||
}
|
||||
if data == nil {
|
||||
return nil, errors.Errorf("error mocking up build configuration")
|
||||
}
|
||||
return data, nil
|
||||
|
||||
}
|
||||
|
||||
func importCopyData(store storage.Store, container, signaturePolicyPath string) (*CopyData, error) {
|
||||
if container == "" {
|
||||
return nil, errors.Errorf("container name must be specified")
|
||||
}
|
||||
|
||||
c, err := store.Container(container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
systemContext := common.GetSystemContext(signaturePolicyPath)
|
||||
|
||||
data, err := ImportCopyDataFromImage(store, systemContext, c.ImageID, container, c.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if data.FromImageID != "" {
|
||||
if d, err2 := digest.Parse(data.FromImageID); err2 == nil {
|
||||
data.Docker.Parent = docker.ID(d)
|
||||
} else {
|
||||
data.Docker.Parent = docker.ID(digest.NewDigestFromHex(digest.Canonical.String(), data.FromImageID))
|
||||
}
|
||||
}
|
||||
if data.FromImage != "" {
|
||||
data.Docker.ContainerConfig.Image = data.FromImage
|
||||
}
|
||||
|
||||
err = data.Save()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error saving CopyData state")
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func openCopyData(store storage.Store, container string) (*CopyData, error) {
|
||||
cdir, err := store.ContainerDirectory(container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &CopyData{}
|
||||
err = json.Unmarshal(buildstate, &c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.Type != containerType {
|
||||
return nil, errors.Errorf("container is not a %s container", Package)
|
||||
}
|
||||
c.store = store
|
||||
c.fixupConfig()
|
||||
return c, nil
|
||||
|
||||
}
|
||||
|
||||
// ImportCopyDataFromImage creates copy data for an image with the given parameters
|
||||
func ImportCopyDataFromImage(store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*CopyData, error) {
|
||||
manifest := []byte{}
|
||||
config := []byte{}
|
||||
imageName := ""
|
||||
|
||||
if imageID != "" {
|
||||
ref, err := is.Transport.ParseStoreReference(store, "@"+imageID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "no such image %q", "@"+imageID)
|
||||
}
|
||||
src, err2 := ref.NewImage(systemContext)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err2, "error instantiating image")
|
||||
}
|
||||
defer src.Close()
|
||||
config, err = src.ConfigBlob()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image configuration")
|
||||
}
|
||||
manifest, _, err = src.Manifest()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image manifest")
|
||||
}
|
||||
if img, err3 := store.Image(imageID); err3 == nil {
|
||||
if len(img.Names) > 0 {
|
||||
imageName = img.Names[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data := &CopyData{
|
||||
store: store,
|
||||
Type: containerType,
|
||||
FromImage: imageName,
|
||||
FromImageID: imageID,
|
||||
Config: config,
|
||||
Manifest: manifest,
|
||||
Container: containerName,
|
||||
ContainerID: containerID,
|
||||
ImageAnnotations: map[string]string{},
|
||||
ImageCreatedBy: "",
|
||||
}
|
||||
|
||||
data.initConfig()
|
||||
|
||||
return data, nil
|
||||
|
||||
}
|
||||
|
||||
// MakeImageRef converts a CopyData struct into a types.ImageReference
|
||||
func (c *CopyData) MakeImageRef(manifestType string, compress archive.Compression, names []string, layerID string, historyTimestamp *time.Time) (types.ImageReference, error) {
|
||||
var name reference.Named
|
||||
if len(names) > 0 {
|
||||
if parsed, err := reference.ParseNamed(names[0]); err == nil {
|
||||
name = parsed
|
||||
}
|
||||
}
|
||||
if manifestType == "" {
|
||||
manifestType = OCIv1ImageManifest
|
||||
}
|
||||
oconfig, err := json.Marshal(&c.OCIv1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encoding OCI-format image configuration")
|
||||
}
|
||||
dconfig, err := json.Marshal(&c.Docker)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encoding docker-format image configuration")
|
||||
}
|
||||
created := time.Now().UTC()
|
||||
if historyTimestamp != nil {
|
||||
created = historyTimestamp.UTC()
|
||||
}
|
||||
ref := &CopyRef{
|
||||
store: c.store,
|
||||
compression: compress,
|
||||
name: name,
|
||||
names: names,
|
||||
layerID: layerID,
|
||||
addHistory: false,
|
||||
oconfig: oconfig,
|
||||
dconfig: dconfig,
|
||||
created: created,
|
||||
createdBy: c.ImageCreatedBy,
|
||||
annotations: c.ImageAnnotations,
|
||||
preferredManifestType: manifestType,
|
||||
exporting: true,
|
||||
}
|
||||
return ref, nil
|
||||
}
|
|
@ -1,460 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/kubernetes-incubator/cri-o/cmd/kpod/docker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CopyRef handles image references used for copying images to/from remotes
|
||||
type CopyRef struct {
|
||||
store storage.Store
|
||||
compression archive.Compression
|
||||
name reference.Named
|
||||
names []string
|
||||
layerID string
|
||||
addHistory bool
|
||||
oconfig []byte
|
||||
dconfig []byte
|
||||
created time.Time
|
||||
createdBy string
|
||||
annotations map[string]string
|
||||
preferredManifestType string
|
||||
exporting bool
|
||||
}
|
||||
|
||||
type copySource struct {
|
||||
path string
|
||||
ref *CopyRef
|
||||
store storage.Store
|
||||
layerID string
|
||||
names []string
|
||||
addHistory bool
|
||||
compression archive.Compression
|
||||
config []byte
|
||||
configDigest digest.Digest
|
||||
manifest []byte
|
||||
manifestType string
|
||||
exporting bool
|
||||
}
|
||||
|
||||
// NewImage creates a new image from the given system context
|
||||
func (c *CopyRef) NewImage(sc *types.SystemContext) (types.Image, error) {
|
||||
src, err := c.NewImageSource(sc, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return image.FromSource(src)
|
||||
}
|
||||
|
||||
func selectManifestType(preferred string, acceptable, supported []string) string {
|
||||
selected := preferred
|
||||
for _, accept := range acceptable {
|
||||
if preferred == accept {
|
||||
return preferred
|
||||
}
|
||||
for _, support := range supported {
|
||||
if accept == support {
|
||||
selected = accept
|
||||
}
|
||||
}
|
||||
}
|
||||
return selected
|
||||
}
|
||||
|
||||
// NewImageSource creates a new image source from the given system context and manifest
|
||||
func (c *CopyRef) NewImageSource(sc *types.SystemContext, manifestTypes []string) (src types.ImageSource, err error) {
|
||||
// Decide which type of manifest and configuration output we're going to provide.
|
||||
supportedManifestTypes := []string{v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest}
|
||||
manifestType := selectManifestType(c.preferredManifestType, manifestTypes, supportedManifestTypes)
|
||||
// If it's not a format we support, return an error.
|
||||
if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest {
|
||||
return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
||||
manifestType, v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest)
|
||||
}
|
||||
// Start building the list of layers using the read-write layer.
|
||||
layers := []string{}
|
||||
layerID := c.layerID
|
||||
layer, err := c.store.Layer(layerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
|
||||
}
|
||||
// Walk the list of parent layers, prepending each as we go.
|
||||
for layer != nil {
|
||||
layers = append(append([]string{}, layerID), layers...)
|
||||
layerID = layer.Parent
|
||||
if layerID == "" {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
layer, err = c.store.Layer(layerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
|
||||
}
|
||||
}
|
||||
logrus.Debugf("layer list: %q", layers)
|
||||
|
||||
// Make a temporary directory to hold blobs.
|
||||
path, err := ioutil.TempDir(os.TempDir(), "kpod")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("using %q to hold temporary data", path)
|
||||
defer func() {
|
||||
if src == nil {
|
||||
err2 := os.RemoveAll(path)
|
||||
if err2 != nil {
|
||||
logrus.Errorf("error removing %q: %v", path, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Build fresh copies of the configurations so that we don't mess with the values in the Builder
|
||||
// object itself.
|
||||
oimage := v1.Image{}
|
||||
err = json.Unmarshal(c.oconfig, &oimage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dimage := docker.V2Image{}
|
||||
err = json.Unmarshal(c.dconfig, &dimage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Start building manifests.
|
||||
omanifest := v1.Manifest{
|
||||
Versioned: specs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
Config: v1.Descriptor{
|
||||
MediaType: v1.MediaTypeImageConfig,
|
||||
},
|
||||
Layers: []v1.Descriptor{},
|
||||
Annotations: c.annotations,
|
||||
}
|
||||
dmanifest := docker.V2S2Manifest{
|
||||
V2Versioned: docker.V2Versioned{
|
||||
SchemaVersion: 2,
|
||||
MediaType: docker.V2S2MediaTypeManifest,
|
||||
},
|
||||
Config: docker.V2S2Descriptor{
|
||||
MediaType: docker.V2S2MediaTypeImageConfig,
|
||||
},
|
||||
Layers: []docker.V2S2Descriptor{},
|
||||
}
|
||||
|
||||
oimage.RootFS.Type = docker.TypeLayers
|
||||
oimage.RootFS.DiffIDs = []digest.Digest{}
|
||||
dimage.RootFS = &docker.V2S2RootFS{}
|
||||
dimage.RootFS.Type = docker.TypeLayers
|
||||
dimage.RootFS.DiffIDs = []digest.Digest{}
|
||||
|
||||
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
|
||||
for _, layerID := range layers {
|
||||
omediaType := v1.MediaTypeImageLayer
|
||||
dmediaType := docker.V2S2MediaTypeUncompressedLayer
|
||||
// Figure out which media type we want to call this. Assume no compression.
|
||||
if c.compression != archive.Uncompressed {
|
||||
switch c.compression {
|
||||
case archive.Gzip:
|
||||
omediaType = v1.MediaTypeImageLayerGzip
|
||||
dmediaType = docker.V2S2MediaTypeLayer
|
||||
logrus.Debugf("compressing layer %q with gzip", layerID)
|
||||
case archive.Bzip2:
|
||||
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with bzip2.
|
||||
return nil, errors.New("media type for bzip2-compressed layers is not defined")
|
||||
default:
|
||||
logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID)
|
||||
}
|
||||
}
|
||||
// If we're not re-exporting the data, just fake up layer and diff IDs for the manifest.
|
||||
if !c.exporting {
|
||||
fakeLayerDigest := digest.NewDigestFromHex(digest.Canonical.String(), layerID)
|
||||
// Add a note in the manifest about the layer. The blobs should be identified by their
|
||||
// possibly-compressed blob digests, but just use the layer IDs here.
|
||||
olayerDescriptor := v1.Descriptor{
|
||||
MediaType: omediaType,
|
||||
Digest: fakeLayerDigest,
|
||||
Size: -1,
|
||||
}
|
||||
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
||||
dlayerDescriptor := docker.V2S2Descriptor{
|
||||
MediaType: dmediaType,
|
||||
Digest: fakeLayerDigest,
|
||||
Size: -1,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Add a note about the diffID, which should be uncompressed digest of the blob, but
|
||||
// just use the layer ID here.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, fakeLayerDigest)
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, fakeLayerDigest)
|
||||
continue
|
||||
}
|
||||
// Start reading the layer.
|
||||
rc, err := c.store.Diff("", layerID, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error extracting layer %q", layerID)
|
||||
}
|
||||
defer rc.Close()
|
||||
// Set up to decompress the layer, in case it's coming out compressed. Due to implementation
|
||||
// differences, the result may not match the digest the blob had when it was originally imported,
|
||||
// so we have to recompute all of this anyway if we want to be sure the digests we use will be
|
||||
// correct.
|
||||
uncompressed, err := archive.DecompressStream(rc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error decompressing layer %q", layerID)
|
||||
}
|
||||
defer uncompressed.Close()
|
||||
srcHasher := digest.Canonical.Digester()
|
||||
reader := io.TeeReader(uncompressed, srcHasher.Hash())
|
||||
// Set up to write the possibly-recompressed blob.
|
||||
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening file for layer %q", layerID)
|
||||
}
|
||||
destHasher := digest.Canonical.Digester()
|
||||
counter := ioutils.NewWriteCounter(layerFile)
|
||||
multiWriter := io.MultiWriter(counter, destHasher.Hash())
|
||||
// Compress the layer, if we're compressing it.
|
||||
writer, err := archive.CompressStream(multiWriter, c.compression)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error compressing layer %q", layerID)
|
||||
}
|
||||
size, err := io.Copy(writer, reader)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID)
|
||||
}
|
||||
writer.Close()
|
||||
layerFile.Close()
|
||||
if c.compression == archive.Uncompressed {
|
||||
if size != counter.Count {
|
||||
return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count)
|
||||
}
|
||||
} else {
|
||||
size = counter.Count
|
||||
}
|
||||
logrus.Debugf("layer %q size is %d bytes", layerID, size)
|
||||
// Rename the layer so that we can more easily find it by digest later.
|
||||
err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID)
|
||||
}
|
||||
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
|
||||
// compressed blob digests.
|
||||
olayerDescriptor := v1.Descriptor{
|
||||
MediaType: omediaType,
|
||||
Digest: destHasher.Digest(),
|
||||
Size: size,
|
||||
}
|
||||
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
||||
dlayerDescriptor := docker.V2S2Descriptor{
|
||||
MediaType: dmediaType,
|
||||
Digest: destHasher.Digest(),
|
||||
Size: size,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Add a note about the diffID, which is always an uncompressed value.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||
}
|
||||
|
||||
if c.addHistory {
|
||||
// Build history notes in the image configurations.
|
||||
onews := v1.History{
|
||||
Created: &c.created,
|
||||
CreatedBy: c.createdBy,
|
||||
Author: oimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
oimage.History = append(oimage.History, onews)
|
||||
dnews := docker.V2S2History{
|
||||
Created: c.created,
|
||||
CreatedBy: c.createdBy,
|
||||
Author: dimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
dimage.History = append(dimage.History, dnews)
|
||||
}
|
||||
|
||||
// Encode the image configuration blob.
|
||||
oconfig, err := json.Marshal(&oimage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("OCIv1 config = %s", oconfig)
|
||||
|
||||
// Add the configuration blob to the manifest.
|
||||
omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
|
||||
omanifest.Config.Size = int64(len(oconfig))
|
||||
omanifest.Config.MediaType = v1.MediaTypeImageConfig
|
||||
|
||||
// Encode the manifest.
|
||||
omanifestbytes, err := json.Marshal(&omanifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
|
||||
|
||||
// Encode the image configuration blob.
|
||||
dconfig, err := json.Marshal(&dimage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Docker v2s2 config = %s", dconfig)
|
||||
|
||||
// Add the configuration blob to the manifest.
|
||||
dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
|
||||
dmanifest.Config.Size = int64(len(dconfig))
|
||||
dmanifest.Config.MediaType = docker.V2S2MediaTypeImageConfig
|
||||
|
||||
// Encode the manifest.
|
||||
dmanifestbytes, err := json.Marshal(&dmanifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
|
||||
|
||||
// Decide which manifest and configuration blobs we'll actually output.
|
||||
var config []byte
|
||||
var manifest []byte
|
||||
switch manifestType {
|
||||
case v1.MediaTypeImageManifest:
|
||||
manifest = omanifestbytes
|
||||
config = oconfig
|
||||
case docker.V2S2MediaTypeManifest:
|
||||
manifest = dmanifestbytes
|
||||
config = dconfig
|
||||
default:
|
||||
panic("unreachable code: unsupported manifest type")
|
||||
}
|
||||
src = ©Source{
|
||||
path: path,
|
||||
ref: c,
|
||||
store: c.store,
|
||||
layerID: c.layerID,
|
||||
names: c.names,
|
||||
addHistory: c.addHistory,
|
||||
compression: c.compression,
|
||||
config: config,
|
||||
configDigest: digest.Canonical.FromBytes(config),
|
||||
manifest: manifest,
|
||||
manifestType: manifestType,
|
||||
exporting: c.exporting,
|
||||
}
|
||||
return src, nil
|
||||
}
|
||||
|
||||
// NewImageDestination creates a new image destination from the given system context
|
||||
func (c *CopyRef) NewImageDestination(sc *types.SystemContext) (types.ImageDestination, error) {
|
||||
return nil, errors.Errorf("can't write to a container")
|
||||
}
|
||||
|
||||
// DockerReference gets the docker reference for the given CopyRef
|
||||
func (c *CopyRef) DockerReference() reference.Named {
|
||||
return c.name
|
||||
}
|
||||
|
||||
// StringWithinTransport returns the first name of the copyRef
|
||||
func (c *CopyRef) StringWithinTransport() string {
|
||||
if len(c.names) > 0 {
|
||||
return c.names[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// DeleteImage deletes an image in the CopyRef
|
||||
func (c *CopyRef) DeleteImage(*types.SystemContext) error {
|
||||
// we were never here
|
||||
return nil
|
||||
}
|
||||
|
||||
// PolicyConfigurationIdentity returns the policy configuration for the CopyRef
|
||||
func (c *CopyRef) PolicyConfigurationIdentity() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// PolicyConfigurationNamespaces returns the policy configuration namespace for the CopyRef
|
||||
func (c *CopyRef) PolicyConfigurationNamespaces() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Transport returns an ImageTransport for the given CopyRef
|
||||
func (c *CopyRef) Transport() types.ImageTransport {
|
||||
return is.Transport
|
||||
}
|
||||
|
||||
func (cs *copySource) Close() error {
|
||||
err := os.RemoveAll(cs.path)
|
||||
if err != nil {
|
||||
logrus.Errorf("error removing %q: %v", cs.path, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (cs *copySource) Reference() types.ImageReference {
|
||||
return cs.ref
|
||||
}
|
||||
|
||||
func (cs *copySource) GetSignatures(context.Context) ([][]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (cs *copySource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
return []byte{}, "", errors.Errorf("TODO")
|
||||
}
|
||||
|
||||
func (cs *copySource) GetManifest() ([]byte, string, error) {
|
||||
return cs.manifest, cs.manifestType, nil
|
||||
}
|
||||
|
||||
func (cs *copySource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
|
||||
if blob.Digest == cs.configDigest {
|
||||
logrus.Debugf("start reading config")
|
||||
reader := bytes.NewReader(cs.config)
|
||||
closer := func() error {
|
||||
logrus.Debugf("finished reading config")
|
||||
return nil
|
||||
}
|
||||
return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
|
||||
}
|
||||
layerFile, err := os.OpenFile(filepath.Join(cs.path, blob.Digest.String()), os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
|
||||
return nil, -1, err
|
||||
}
|
||||
size = -1
|
||||
st, err := layerFile.Stat()
|
||||
if err != nil {
|
||||
logrus.Warnf("error reading size of layer %q: %v", blob.Digest.String(), err)
|
||||
} else {
|
||||
size = st.Size()
|
||||
}
|
||||
logrus.Debugf("reading layer %q", blob.Digest.String())
|
||||
closer := func() error {
|
||||
layerFile.Close()
|
||||
logrus.Debugf("finished reading layer %q", blob.Digest.String())
|
||||
return nil
|
||||
}
|
||||
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
|
||||
}
|
|
@ -1,288 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/common"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// FilterParams contains the filter options that may be given when outputting images
|
||||
type FilterParams struct {
|
||||
dangling string
|
||||
label string
|
||||
beforeImage time.Time
|
||||
sinceImage time.Time
|
||||
referencePattern string
|
||||
}
|
||||
|
||||
// ParseFilter takes a set of images and a filter string as input, and returns the
|
||||
func ParseFilter(store storage.Store, filter string) (*FilterParams, error) {
|
||||
images, err := store.Images()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params := new(FilterParams)
|
||||
filterStrings := strings.Split(filter, ",")
|
||||
for _, param := range filterStrings {
|
||||
pair := strings.SplitN(param, "=", 2)
|
||||
switch strings.TrimSpace(pair[0]) {
|
||||
case "dangling":
|
||||
if common.IsValidBool(pair[1]) {
|
||||
params.dangling = pair[1]
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid filter: '%s=[%s]'", pair[0], pair[1])
|
||||
}
|
||||
case "label":
|
||||
params.label = pair[1]
|
||||
case "before":
|
||||
if img, err := findImageInSlice(images, pair[1]); err == nil {
|
||||
info, err := getImageInspectInfo(store, img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params.beforeImage = info.Created
|
||||
} else {
|
||||
return nil, fmt.Errorf("no such id: %s", pair[0])
|
||||
}
|
||||
case "since":
|
||||
if img, err := findImageInSlice(images, pair[1]); err == nil {
|
||||
info, err := getImageInspectInfo(store, img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params.sinceImage = info.Created
|
||||
} else {
|
||||
return nil, fmt.Errorf("no such id: %s``", pair[0])
|
||||
}
|
||||
case "reference":
|
||||
params.referencePattern = pair[1]
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid filter: '%s'", pair[0])
|
||||
}
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func matchesFilter(store storage.Store, image storage.Image, name string, params *FilterParams) bool {
|
||||
if params == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
info, err := getImageInspectInfo(store, image)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if params.dangling != "" && !matchesDangling(name, params.dangling) {
|
||||
return false
|
||||
} else if params.label != "" && !matchesLabel(info, store, params.label) {
|
||||
return false
|
||||
} else if !params.beforeImage.IsZero() && !matchesBeforeImage(info, name, params) {
|
||||
return false
|
||||
} else if !params.sinceImage.IsZero() && !matchesSinceImage(info, name, params) {
|
||||
return false
|
||||
} else if params.referencePattern != "" && !MatchesReference(name, params.referencePattern) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func matchesDangling(name string, dangling string) bool {
|
||||
if common.IsFalse(dangling) && name != "<none>" {
|
||||
return true
|
||||
} else if common.IsTrue(dangling) && name == "<none>" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func matchesLabel(info *types.ImageInspectInfo, store storage.Store, label string) bool {
|
||||
pair := strings.SplitN(label, "=", 2)
|
||||
for key, value := range info.Labels {
|
||||
if key == pair[0] {
|
||||
if len(pair) == 2 {
|
||||
if value == pair[1] {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns true if the image was created since the filter image. Returns
|
||||
// false otherwise
|
||||
func matchesBeforeImage(info *types.ImageInspectInfo, name string, params *FilterParams) bool {
|
||||
return info.Created.Before(params.beforeImage)
|
||||
}
|
||||
|
||||
// Returns true if the image was created since the filter image. Returns
|
||||
// false otherwise
|
||||
func matchesSinceImage(info *types.ImageInspectInfo, name string, params *FilterParams) bool {
|
||||
return info.Created.After(params.sinceImage)
|
||||
}
|
||||
|
||||
// MatchesID returns true if argID is a full or partial match for id
|
||||
func MatchesID(id, argID string) bool {
|
||||
return strings.HasPrefix(argID, id)
|
||||
}
|
||||
|
||||
// MatchesReference returns true if argName is a full or partial match for name
|
||||
// Partial matches will register only if they match the most specific part of the name available
|
||||
// For example, take the image docker.io/library/redis:latest
|
||||
// redis, library,redis, docker.io/library/redis, redis:latest, etc. will match
|
||||
// But redis:alpine, ry/redis, library, and io/library/redis will not
|
||||
func MatchesReference(name, argName string) bool {
|
||||
if argName == "" {
|
||||
return false
|
||||
}
|
||||
splitName := strings.Split(name, ":")
|
||||
// If the arg contains a tag, we handle it differently than if it does not
|
||||
if strings.Contains(argName, ":") {
|
||||
splitArg := strings.Split(argName, ":")
|
||||
return strings.HasSuffix(splitName[0], splitArg[0]) && (splitName[1] == splitArg[1])
|
||||
}
|
||||
return strings.HasSuffix(splitName[0], argName)
|
||||
}
|
||||
|
||||
// FormattedSize returns a human-readable formatted size for the image
|
||||
func FormattedSize(size float64) string {
|
||||
suffixes := [5]string{"B", "KB", "MB", "GB", "TB"}
|
||||
|
||||
count := 0
|
||||
for size >= 1024 && count < 4 {
|
||||
size /= 1024
|
||||
count++
|
||||
}
|
||||
return fmt.Sprintf("%.4g %s", size, suffixes[count])
|
||||
}
|
||||
|
||||
// FindImage searches for a *storage.Image with a matching the given name or ID in the given store.
|
||||
func FindImage(store storage.Store, image string) (*storage.Image, error) {
|
||||
var img *storage.Image
|
||||
ref, err := is.Transport.ParseStoreReference(store, image)
|
||||
if err == nil {
|
||||
img, err = is.Transport.GetStoreImage(store, ref)
|
||||
}
|
||||
if err != nil {
|
||||
img2, err2 := store.Image(image)
|
||||
if err2 != nil {
|
||||
if ref == nil {
|
||||
return nil, errors.Wrapf(err, "error parsing reference to image %q", image)
|
||||
}
|
||||
return nil, errors.Wrapf(err, "unable to locate image %q", image)
|
||||
}
|
||||
img = img2
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
|
||||
// FindImageRef searches for and returns a new types.Image matching the given name or ID in the given store.
|
||||
func FindImageRef(store storage.Store, image string) (types.Image, error) {
|
||||
img, err := FindImage(store, image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to locate image %q", image)
|
||||
}
|
||||
ref, err := is.Transport.ParseStoreReference(store, "@"+img.ID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
|
||||
}
|
||||
imgRef, err := ref.NewImage(nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image %q", img.ID)
|
||||
}
|
||||
return imgRef, nil
|
||||
}
|
||||
|
||||
func findImageInSlice(images []storage.Image, ref string) (storage.Image, error) {
|
||||
for _, image := range images {
|
||||
if MatchesID(image.ID, ref) {
|
||||
return image, nil
|
||||
}
|
||||
for _, name := range image.Names {
|
||||
if MatchesReference(name, ref) {
|
||||
return image, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return storage.Image{}, errors.New("could not find image")
|
||||
}
|
||||
|
||||
// InfoAndDigestAndSize returns the inspection info and size of the image in the given
|
||||
// store and the digest of its manifest, if it has one, or "" if it doesn't.
|
||||
func InfoAndDigestAndSize(store storage.Store, img storage.Image) (*types.ImageInspectInfo, digest.Digest, int64, error) {
|
||||
imgRef, err := FindImageRef(store, "@"+img.ID)
|
||||
if err != nil {
|
||||
return nil, "", -1, errors.Wrapf(err, "error reading image %q", img.ID)
|
||||
}
|
||||
defer imgRef.Close()
|
||||
return infoAndDigestAndSize(imgRef)
|
||||
}
|
||||
|
||||
func infoAndDigestAndSize(imgRef types.Image) (*types.ImageInspectInfo, digest.Digest, int64, error) {
|
||||
imgSize, err := imgRef.Size()
|
||||
if err != nil {
|
||||
return nil, "", -1, errors.Wrapf(err, "error reading size of image %q", transports.ImageName(imgRef.Reference()))
|
||||
}
|
||||
manifest, _, err := imgRef.Manifest()
|
||||
if err != nil {
|
||||
return nil, "", -1, errors.Wrapf(err, "error reading manifest for image %q", transports.ImageName(imgRef.Reference()))
|
||||
}
|
||||
manifestDigest := digest.Digest("")
|
||||
if len(manifest) > 0 {
|
||||
manifestDigest = digest.Canonical.FromBytes(manifest)
|
||||
}
|
||||
info, err := imgRef.Inspect()
|
||||
if err != nil {
|
||||
return nil, "", -1, errors.Wrapf(err, "error inspecting image %q", transports.ImageName(imgRef.Reference()))
|
||||
}
|
||||
return info, manifestDigest, imgSize, nil
|
||||
}
|
||||
|
||||
// GetImagesMatchingFilter returns a slice of all images in the store that match the provided FilterParams.
|
||||
// Images with more than one name matching the filter will be in the slice once for each name
|
||||
func GetImagesMatchingFilter(store storage.Store, filter *FilterParams, argName string) ([]storage.Image, error) {
|
||||
images, err := store.Images()
|
||||
filteredImages := []storage.Image{}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, image := range images {
|
||||
names := []string{}
|
||||
if len(image.Names) > 0 {
|
||||
names = image.Names
|
||||
} else {
|
||||
names = append(names, "<none>")
|
||||
}
|
||||
for _, name := range names {
|
||||
if (filter == nil && argName == "") || (filter != nil && matchesFilter(store, image, name, filter)) || MatchesReference(name, argName) {
|
||||
newImage := image
|
||||
newImage.Names = []string{name}
|
||||
filteredImages = append(filteredImages, newImage)
|
||||
}
|
||||
}
|
||||
}
|
||||
return filteredImages, nil
|
||||
}
|
||||
|
||||
func getImageInspectInfo(store storage.Store, image storage.Image) (*types.ImageInspectInfo, error) {
|
||||
storeRef, err := is.Transport.ParseStoreReference(store, "@"+image.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
img, err := storeRef.NewImage(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer img.Close()
|
||||
return img.Inspect()
|
||||
}
|
|
@ -1,164 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/storage"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/driver"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Data handles the data used when inspecting a container
|
||||
// nolint
|
||||
type Data struct {
|
||||
ID string
|
||||
Tags []string
|
||||
Digests []string
|
||||
Digest digest.Digest
|
||||
Comment string
|
||||
Created *time.Time
|
||||
Container string
|
||||
Author string
|
||||
Config ociv1.ImageConfig
|
||||
Architecture string
|
||||
OS string
|
||||
Annotations map[string]string
|
||||
CreatedBy string
|
||||
Size uint
|
||||
VirtualSize uint
|
||||
GraphDriver driver.Data
|
||||
RootFS ociv1.RootFS
|
||||
}
|
||||
|
||||
// ParseImageNames parses the names we've stored with an image into a list of
|
||||
// tagged references and a list of references which contain digests.
|
||||
func ParseImageNames(names []string) (tags, digests []string, err error) {
|
||||
for _, name := range names {
|
||||
if named, err := reference.ParseNamed(name); err == nil {
|
||||
if digested, ok := named.(reference.Digested); ok {
|
||||
canonical, err := reference.WithDigest(named, digested.Digest())
|
||||
if err == nil {
|
||||
digests = append(digests, canonical.String())
|
||||
}
|
||||
} else {
|
||||
if reference.IsNameOnly(named) {
|
||||
named = reference.TagNameOnly(named)
|
||||
}
|
||||
if tagged, ok := named.(reference.Tagged); ok {
|
||||
namedTagged, err := reference.WithTag(named, tagged.Tag())
|
||||
if err == nil {
|
||||
tags = append(tags, namedTagged.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return tags, digests, nil
|
||||
}
|
||||
|
||||
func annotations(manifest []byte, manifestType string) map[string]string {
|
||||
annotations := make(map[string]string)
|
||||
switch manifestType {
|
||||
case ociv1.MediaTypeImageManifest:
|
||||
var m ociv1.Manifest
|
||||
if err := json.Unmarshal(manifest, &m); err == nil {
|
||||
for k, v := range m.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return annotations
|
||||
}
|
||||
|
||||
// GetData gets the Data for a container with the given name in the given store.
|
||||
func GetData(store storage.Store, name string) (*Data, error) {
|
||||
img, err := FindImage(store, name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image %q", name)
|
||||
}
|
||||
|
||||
imgRef, err := FindImageRef(store, "@"+img.ID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image %q", img.ID)
|
||||
}
|
||||
defer imgRef.Close()
|
||||
|
||||
tags, digests, err := ParseImageNames(img.Names)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing image names for %q", name)
|
||||
}
|
||||
|
||||
driverName, err := driver.GetDriverName(store)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading name of storage driver")
|
||||
}
|
||||
|
||||
topLayerID := img.TopLayer
|
||||
|
||||
driverMetadata, err := driver.GetDriverMetadata(store, topLayerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error asking storage driver %q for metadata", driverName)
|
||||
}
|
||||
|
||||
layer, err := store.Layer(topLayerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading information about layer %q", topLayerID)
|
||||
}
|
||||
size, err := store.DiffSize(layer.Parent, layer.ID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error determining size of layer %q", layer.ID)
|
||||
}
|
||||
|
||||
imgSize, err := imgRef.Size()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error determining size of image %q", transports.ImageName(imgRef.Reference()))
|
||||
}
|
||||
|
||||
manifest, manifestType, err := imgRef.Manifest()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading manifest for image %q", img.ID)
|
||||
}
|
||||
manifestDigest := digest.Digest("")
|
||||
if len(manifest) > 0 {
|
||||
manifestDigest = digest.Canonical.FromBytes(manifest)
|
||||
}
|
||||
annotations := annotations(manifest, manifestType)
|
||||
|
||||
config, err := imgRef.OCIConfig()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image configuration for %q", img.ID)
|
||||
}
|
||||
historyComment := ""
|
||||
historyCreatedBy := ""
|
||||
if len(config.History) > 0 {
|
||||
historyComment = config.History[len(config.History)-1].Comment
|
||||
historyCreatedBy = config.History[len(config.History)-1].CreatedBy
|
||||
}
|
||||
|
||||
return &Data{
|
||||
ID: img.ID,
|
||||
Tags: tags,
|
||||
Digests: digests,
|
||||
Digest: manifestDigest,
|
||||
Comment: historyComment,
|
||||
Created: config.Created,
|
||||
Author: config.Author,
|
||||
Config: config.Config,
|
||||
Architecture: config.Architecture,
|
||||
OS: config.OS,
|
||||
Annotations: annotations,
|
||||
CreatedBy: historyCreatedBy,
|
||||
Size: uint(size),
|
||||
VirtualSize: uint(size + imgSize),
|
||||
GraphDriver: driver.Data{
|
||||
Name: driverName,
|
||||
Data: driverMetadata,
|
||||
},
|
||||
RootFS: config.RootFS,
|
||||
}, nil
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// UntagImage removes the tag from the given image
|
||||
func UntagImage(store storage.Store, image *storage.Image, imgArg string) (string, error) {
|
||||
// Remove name from image.Names and set the new names
|
||||
newNames := []string{}
|
||||
removedName := ""
|
||||
for _, name := range image.Names {
|
||||
if MatchesReference(name, imgArg) {
|
||||
removedName = name
|
||||
continue
|
||||
}
|
||||
newNames = append(newNames, name)
|
||||
}
|
||||
if removedName != "" {
|
||||
if err := store.SetNames(image.ID, newNames); err != nil {
|
||||
return "", errors.Wrapf(err, "error removing name %q from image %q", removedName, image.ID)
|
||||
}
|
||||
}
|
||||
return removedName, nil
|
||||
}
|
||||
|
||||
// RemoveImage removes the given image from storage
|
||||
func RemoveImage(image *storage.Image, store storage.Store) (string, error) {
|
||||
_, err := store.DeleteImage(image.ID, true)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "could not remove image %q", image.ID)
|
||||
}
|
||||
return image.ID, nil
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
package layer
|
||||
|
||||
import cstorage "github.com/containers/storage"
|
||||
|
||||
// FullID gets the full id of a layer given a partial id or name
|
||||
func FullID(store cstorage.Store, id string) (string, error) {
|
||||
layer, err := store.Layer(id)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return layer.ID, nil
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue