Remove dead code
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
parent
88deb3934f
commit
9dab0eee24
2 changed files with 0 additions and 1109 deletions
|
@ -1,664 +0,0 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/kubernetes-incubator/cri-o/cmd/kpod/docker"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/common"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/driver"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// Package is used to identify working containers
|
||||
Package = "kpod"
|
||||
containerType = Package + " 0.0.1"
|
||||
stateFile = Package + ".json"
|
||||
// OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,
|
||||
// suitable for specifying as a value of the PreferredManifestType
|
||||
// member of a CommitOptions structure. It is also the default.
|
||||
OCIv1ImageManifest = v1.MediaTypeImageManifest
|
||||
)
|
||||
|
||||
// Data handles the data used when inspecting a container
|
||||
// nolint
|
||||
type Data struct {
|
||||
ID string
|
||||
Tags []string
|
||||
Digests []string
|
||||
ManifestDigest digest.Digest
|
||||
Comment string
|
||||
Created *time.Time
|
||||
Container string
|
||||
Author string
|
||||
Config ociv1.ImageConfig
|
||||
Architecture string
|
||||
OS string
|
||||
Annotations map[string]string
|
||||
CreatedBy string
|
||||
Size uint
|
||||
VirtualSize uint
|
||||
GraphDriver driver.Data
|
||||
RootFS ociv1.RootFS
|
||||
}
|
||||
|
||||
// CopyData stores the basic data used when copying a container or image
|
||||
type CopyData struct {
|
||||
store storage.Store
|
||||
|
||||
// Type is used to help identify a build container's metadata. It
|
||||
// should not be modified.
|
||||
Type string `json:"type"`
|
||||
// FromImage is the name of the source image which was used to create
|
||||
// the container, if one was used. It should not be modified.
|
||||
FromImage string `json:"image,omitempty"`
|
||||
// FromImageID is the ID of the source image which was used to create
|
||||
// the container, if one was used. It should not be modified.
|
||||
FromImageID string `json:"image-id"`
|
||||
// Config is the source image's configuration. It should not be
|
||||
// modified.
|
||||
Config []byte `json:"config,omitempty"`
|
||||
// Manifest is the source image's manifest. It should not be modified.
|
||||
Manifest []byte `json:"manifest,omitempty"`
|
||||
|
||||
// Container is the name of the build container. It should not be modified.
|
||||
Container string `json:"container-name,omitempty"`
|
||||
// ContainerID is the ID of the build container. It should not be modified.
|
||||
ContainerID string `json:"container-id,omitempty"`
|
||||
// MountPoint is the last location where the container's root
|
||||
// filesystem was mounted. It should not be modified.
|
||||
MountPoint string `json:"mountpoint,omitempty"`
|
||||
|
||||
// ImageAnnotations is a set of key-value pairs which is stored in the
|
||||
// image's manifest.
|
||||
ImageAnnotations map[string]string `json:"annotations,omitempty"`
|
||||
// ImageCreatedBy is a description of how this container was built.
|
||||
ImageCreatedBy string `json:"created-by,omitempty"`
|
||||
|
||||
// Image metadata and runtime settings, in multiple formats.
|
||||
OCIv1 v1.Image `json:"ociv1,omitempty"`
|
||||
Docker docker.V2Image `json:"docker,omitempty"`
|
||||
}
|
||||
|
||||
func (c *CopyData) initConfig() {
|
||||
image := ociv1.Image{}
|
||||
dimage := docker.V2Image{}
|
||||
if len(c.Config) > 0 {
|
||||
// Try to parse the image config. If we fail, try to start over from scratch
|
||||
if err := json.Unmarshal(c.Config, &dimage); err == nil && dimage.DockerVersion != "" {
|
||||
image, err = makeOCIv1Image(&dimage)
|
||||
if err != nil {
|
||||
image = ociv1.Image{}
|
||||
}
|
||||
} else {
|
||||
if err := json.Unmarshal(c.Config, &image); err != nil {
|
||||
if dimage, err = makeDockerV2S2Image(&image); err != nil {
|
||||
dimage = docker.V2Image{}
|
||||
}
|
||||
}
|
||||
}
|
||||
c.OCIv1 = image
|
||||
c.Docker = dimage
|
||||
} else {
|
||||
// Try to dig out the image configuration from the manifest
|
||||
manifest := docker.V2S1Manifest{}
|
||||
if err := json.Unmarshal(c.Manifest, &manifest); err == nil && manifest.SchemaVersion == 1 {
|
||||
if dimage, err = makeDockerV2S1Image(manifest); err == nil {
|
||||
if image, err = makeOCIv1Image(&dimage); err != nil {
|
||||
image = ociv1.Image{}
|
||||
}
|
||||
}
|
||||
}
|
||||
c.OCIv1 = image
|
||||
c.Docker = dimage
|
||||
}
|
||||
|
||||
if len(c.Manifest) > 0 {
|
||||
// Attempt to recover format-specific data from the manifest
|
||||
v1Manifest := ociv1.Manifest{}
|
||||
if json.Unmarshal(c.Manifest, &v1Manifest) == nil {
|
||||
c.ImageAnnotations = v1Manifest.Annotations
|
||||
}
|
||||
}
|
||||
|
||||
c.fixupConfig()
|
||||
}
|
||||
|
||||
func (c *CopyData) fixupConfig() {
|
||||
if c.Docker.Config != nil {
|
||||
// Prefer image-level settings over those from the container it was built from
|
||||
c.Docker.ContainerConfig = *c.Docker.Config
|
||||
}
|
||||
c.Docker.Config = &c.Docker.ContainerConfig
|
||||
c.Docker.DockerVersion = ""
|
||||
now := time.Now().UTC()
|
||||
if c.Docker.Created.IsZero() {
|
||||
c.Docker.Created = now
|
||||
}
|
||||
if c.OCIv1.Created.IsZero() {
|
||||
c.OCIv1.Created = &now
|
||||
}
|
||||
if c.OS() == "" {
|
||||
c.SetOS(runtime.GOOS)
|
||||
}
|
||||
if c.Architecture() == "" {
|
||||
c.SetArchitecture(runtime.GOARCH)
|
||||
}
|
||||
if c.WorkDir() == "" {
|
||||
c.SetWorkDir(string(filepath.Separator))
|
||||
}
|
||||
}
|
||||
|
||||
// OS returns a name of the OS on which a container built using this image
|
||||
//is intended to be run.
|
||||
func (c *CopyData) OS() string {
|
||||
return c.OCIv1.OS
|
||||
}
|
||||
|
||||
// SetOS sets the name of the OS on which a container built using this image
|
||||
// is intended to be run.
|
||||
func (c *CopyData) SetOS(os string) {
|
||||
c.OCIv1.OS = os
|
||||
c.Docker.OS = os
|
||||
}
|
||||
|
||||
// Architecture returns a name of the architecture on which a container built
|
||||
// using this image is intended to be run.
|
||||
func (c *CopyData) Architecture() string {
|
||||
return c.OCIv1.Architecture
|
||||
}
|
||||
|
||||
// SetArchitecture sets the name of the architecture on which ta container built
|
||||
// using this image is intended to be run.
|
||||
func (c *CopyData) SetArchitecture(arch string) {
|
||||
c.OCIv1.Architecture = arch
|
||||
c.Docker.Architecture = arch
|
||||
}
|
||||
|
||||
// WorkDir returns the default working directory for running commands in a container
|
||||
// built using this image.
|
||||
func (c *CopyData) WorkDir() string {
|
||||
return c.OCIv1.Config.WorkingDir
|
||||
}
|
||||
|
||||
// SetWorkDir sets the location of the default working directory for running commands
|
||||
// in a container built using this image.
|
||||
func (c *CopyData) SetWorkDir(there string) {
|
||||
c.OCIv1.Config.WorkingDir = there
|
||||
c.Docker.Config.WorkingDir = there
|
||||
}
|
||||
|
||||
// makeOCIv1Image builds the best OCIv1 image structure we can from the
|
||||
// contents of the docker image structure.
|
||||
func makeOCIv1Image(dimage *docker.V2Image) (ociv1.Image, error) {
|
||||
config := dimage.Config
|
||||
if config == nil {
|
||||
config = &dimage.ContainerConfig
|
||||
}
|
||||
dimageCreatedTime := dimage.Created.UTC()
|
||||
image := ociv1.Image{
|
||||
Created: &dimageCreatedTime,
|
||||
Author: dimage.Author,
|
||||
Architecture: dimage.Architecture,
|
||||
OS: dimage.OS,
|
||||
Config: ociv1.ImageConfig{
|
||||
User: config.User,
|
||||
ExposedPorts: map[string]struct{}{},
|
||||
Env: config.Env,
|
||||
Entrypoint: config.Entrypoint,
|
||||
Cmd: config.Cmd,
|
||||
Volumes: config.Volumes,
|
||||
WorkingDir: config.WorkingDir,
|
||||
Labels: config.Labels,
|
||||
},
|
||||
RootFS: ociv1.RootFS{
|
||||
Type: "",
|
||||
DiffIDs: []digest.Digest{},
|
||||
},
|
||||
History: []ociv1.History{},
|
||||
}
|
||||
for port, what := range config.ExposedPorts {
|
||||
image.Config.ExposedPorts[string(port)] = what
|
||||
}
|
||||
RootFS := docker.V2S2RootFS{}
|
||||
if dimage.RootFS != nil {
|
||||
RootFS = *dimage.RootFS
|
||||
}
|
||||
if RootFS.Type == docker.TypeLayers {
|
||||
image.RootFS.Type = docker.TypeLayers
|
||||
for _, id := range RootFS.DiffIDs {
|
||||
image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, digest.Digest(id.String()))
|
||||
}
|
||||
}
|
||||
for _, history := range dimage.History {
|
||||
historyCreatedTime := history.Created.UTC()
|
||||
ohistory := ociv1.History{
|
||||
Created: &historyCreatedTime,
|
||||
CreatedBy: history.CreatedBy,
|
||||
Author: history.Author,
|
||||
Comment: history.Comment,
|
||||
EmptyLayer: history.EmptyLayer,
|
||||
}
|
||||
image.History = append(image.History, ohistory)
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// makeDockerV2S2Image builds the best docker image structure we can from the
|
||||
// contents of the OCI image structure.
|
||||
func makeDockerV2S2Image(oimage *ociv1.Image) (docker.V2Image, error) {
|
||||
image := docker.V2Image{
|
||||
V1Image: docker.V1Image{Created: oimage.Created.UTC(),
|
||||
Author: oimage.Author,
|
||||
Architecture: oimage.Architecture,
|
||||
OS: oimage.OS,
|
||||
ContainerConfig: docker.Config{
|
||||
User: oimage.Config.User,
|
||||
ExposedPorts: docker.PortSet{},
|
||||
Env: oimage.Config.Env,
|
||||
Entrypoint: oimage.Config.Entrypoint,
|
||||
Cmd: oimage.Config.Cmd,
|
||||
Volumes: oimage.Config.Volumes,
|
||||
WorkingDir: oimage.Config.WorkingDir,
|
||||
Labels: oimage.Config.Labels,
|
||||
},
|
||||
},
|
||||
RootFS: &docker.V2S2RootFS{
|
||||
Type: "",
|
||||
DiffIDs: []digest.Digest{},
|
||||
},
|
||||
History: []docker.V2S2History{},
|
||||
}
|
||||
for port, what := range oimage.Config.ExposedPorts {
|
||||
image.ContainerConfig.ExposedPorts[docker.Port(port)] = what
|
||||
}
|
||||
if oimage.RootFS.Type == docker.TypeLayers {
|
||||
image.RootFS.Type = docker.TypeLayers
|
||||
for _, id := range oimage.RootFS.DiffIDs {
|
||||
d, err := digest.Parse(id.String())
|
||||
if err != nil {
|
||||
return docker.V2Image{}, err
|
||||
}
|
||||
image.RootFS.DiffIDs = append(image.RootFS.DiffIDs, d)
|
||||
}
|
||||
}
|
||||
for _, history := range oimage.History {
|
||||
dhistory := docker.V2S2History{
|
||||
Created: history.Created.UTC(),
|
||||
CreatedBy: history.CreatedBy,
|
||||
Author: history.Author,
|
||||
Comment: history.Comment,
|
||||
EmptyLayer: history.EmptyLayer,
|
||||
}
|
||||
image.History = append(image.History, dhistory)
|
||||
}
|
||||
image.Config = &image.ContainerConfig
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// makeDockerV2S1Image builds the best docker image structure we can from the
|
||||
// contents of the V2S1 image structure.
|
||||
func makeDockerV2S1Image(manifest docker.V2S1Manifest) (docker.V2Image, error) {
|
||||
// Treat the most recent (first) item in the history as a description of the image.
|
||||
if len(manifest.History) == 0 {
|
||||
return docker.V2Image{}, errors.Errorf("error parsing image configuration from manifest")
|
||||
}
|
||||
dimage := docker.V2Image{}
|
||||
err := json.Unmarshal([]byte(manifest.History[0].V1Compatibility), &dimage)
|
||||
if err != nil {
|
||||
return docker.V2Image{}, err
|
||||
}
|
||||
if dimage.DockerVersion == "" {
|
||||
return docker.V2Image{}, errors.Errorf("error parsing image configuration from history")
|
||||
}
|
||||
// The DiffID list is intended to contain the sums of _uncompressed_ blobs, and these are most
|
||||
// likely compressed, so leave the list empty to avoid potential confusion later on. We can
|
||||
// construct a list with the correct values when we prep layers for pushing, so we don't lose.
|
||||
// information by leaving this part undone.
|
||||
rootFS := &docker.V2S2RootFS{
|
||||
Type: docker.TypeLayers,
|
||||
DiffIDs: []digest.Digest{},
|
||||
}
|
||||
// Build a filesystem history.
|
||||
history := []docker.V2S2History{}
|
||||
for i := range manifest.History {
|
||||
h := docker.V2S2History{
|
||||
Created: time.Now().UTC(),
|
||||
Author: "",
|
||||
CreatedBy: "",
|
||||
Comment: "",
|
||||
EmptyLayer: false,
|
||||
}
|
||||
dcompat := docker.V1Compatibility{}
|
||||
if err2 := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), &dcompat); err2 == nil {
|
||||
h.Created = dcompat.Created.UTC()
|
||||
h.Author = dcompat.Author
|
||||
h.Comment = dcompat.Comment
|
||||
if len(dcompat.ContainerConfig.Cmd) > 0 {
|
||||
h.CreatedBy = fmt.Sprintf("%v", dcompat.ContainerConfig.Cmd)
|
||||
}
|
||||
h.EmptyLayer = dcompat.ThrowAway
|
||||
}
|
||||
// Prepend this layer to the list, because a v2s1 format manifest's list is in reverse order
|
||||
// compared to v2s2, which lists earlier layers before later ones.
|
||||
history = append([]docker.V2S2History{h}, history...)
|
||||
}
|
||||
dimage.RootFS = rootFS
|
||||
dimage.History = history
|
||||
return dimage, nil
|
||||
}
|
||||
|
||||
// Annotations gets the anotations of the container or image
|
||||
func (c *CopyData) Annotations() map[string]string {
|
||||
return common.CopyStringStringMap(c.ImageAnnotations)
|
||||
}
|
||||
|
||||
// Save the CopyData to disk
|
||||
func (c *CopyData) Save() error {
|
||||
buildstate, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cdir, err := c.store.ContainerDirectory(c.ContainerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600)
|
||||
|
||||
}
|
||||
|
||||
// GetContainerCopyData gets the copy data for a container
|
||||
func (r *Runtime) GetContainerCopyData(name string) (*CopyData, error) {
|
||||
var data *CopyData
|
||||
var err error
|
||||
if name != "" {
|
||||
data, err = openCopyData(r.store, name)
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
data, err = r.importCopyData(r.store, name, "")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading build container")
|
||||
}
|
||||
if data == nil {
|
||||
return nil, errors.Errorf("error finding build container")
|
||||
}
|
||||
return data, nil
|
||||
|
||||
}
|
||||
|
||||
// GetImageCopyData gets the copy data for an image
|
||||
func (r *Runtime) GetImageCopyData(image string) (*CopyData, error) {
|
||||
if image == "" {
|
||||
return nil, errors.Errorf("image name must be specified")
|
||||
}
|
||||
img, err := r.GetImage(image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error locating image %q for importing settings", image)
|
||||
}
|
||||
|
||||
systemContext := common.GetSystemContext("", "")
|
||||
data, err := r.ImportCopyDataFromImage(systemContext, img.ID, "", "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image")
|
||||
}
|
||||
if data == nil {
|
||||
return nil, errors.Errorf("error mocking up build configuration")
|
||||
}
|
||||
return data, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *Runtime) importCopyData(store storage.Store, container, signaturePolicyPath string) (*CopyData, error) {
|
||||
if container == "" {
|
||||
return nil, errors.Errorf("container name must be specified")
|
||||
}
|
||||
|
||||
c, err := store.Container(container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
systemContext := common.GetSystemContext(signaturePolicyPath, "")
|
||||
|
||||
data, err := r.ImportCopyDataFromImage(systemContext, c.ImageID, container, c.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if data.FromImageID != "" {
|
||||
if d, err2 := digest.Parse(data.FromImageID); err2 == nil {
|
||||
data.Docker.Parent = docker.ID(d)
|
||||
} else {
|
||||
data.Docker.Parent = docker.ID(digest.NewDigestFromHex(digest.Canonical.String(), data.FromImageID))
|
||||
}
|
||||
}
|
||||
if data.FromImage != "" {
|
||||
data.Docker.ContainerConfig.Image = data.FromImage
|
||||
}
|
||||
|
||||
err = data.Save()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error saving CopyData state")
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func openCopyData(store storage.Store, container string) (*CopyData, error) {
|
||||
cdir, err := store.ContainerDirectory(container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &CopyData{}
|
||||
err = json.Unmarshal(buildstate, &c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.Type != containerType {
|
||||
return nil, errors.Errorf("container is not a %s container", Package)
|
||||
}
|
||||
c.store = store
|
||||
c.fixupConfig()
|
||||
return c, nil
|
||||
|
||||
}
|
||||
|
||||
// ImportCopyDataFromImage creates copy data for an image with the given parameters
|
||||
func (r *Runtime) ImportCopyDataFromImage(systemContext *types.SystemContext, imageID, containerName, containerID string) (*CopyData, error) {
|
||||
manifest := []byte{}
|
||||
config := []byte{}
|
||||
imageName := ""
|
||||
|
||||
if imageID != "" {
|
||||
ref, err := is.Transport.ParseStoreReference(r.store, "@"+imageID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "no such image %q", "@"+imageID)
|
||||
}
|
||||
src, err2 := ref.NewImage(systemContext)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrapf(err2, "error instantiating image")
|
||||
}
|
||||
defer src.Close()
|
||||
config, err = src.ConfigBlob()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image configuration")
|
||||
}
|
||||
manifest, _, err = src.Manifest()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image manifest")
|
||||
}
|
||||
if img, err3 := r.store.Image(imageID); err3 == nil {
|
||||
if len(img.Names) > 0 {
|
||||
imageName = img.Names[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data := &CopyData{
|
||||
store: r.store,
|
||||
Type: containerType,
|
||||
FromImage: imageName,
|
||||
FromImageID: imageID,
|
||||
Config: config,
|
||||
Manifest: manifest,
|
||||
Container: containerName,
|
||||
ContainerID: containerID,
|
||||
ImageAnnotations: map[string]string{},
|
||||
ImageCreatedBy: "",
|
||||
}
|
||||
|
||||
data.initConfig()
|
||||
|
||||
return data, nil
|
||||
|
||||
}
|
||||
|
||||
// MakeImageRef converts a CopyData struct into a types.ImageReference
|
||||
func (c *CopyData) MakeImageRef(manifestType string, compress archive.Compression, names []string, layerID string, historyTimestamp *time.Time) (types.ImageReference, error) {
|
||||
var name reference.Named
|
||||
if len(names) > 0 {
|
||||
if parsed, err := reference.ParseNamed(names[0]); err == nil {
|
||||
name = parsed
|
||||
}
|
||||
}
|
||||
if manifestType == "" {
|
||||
manifestType = OCIv1ImageManifest
|
||||
}
|
||||
oconfig, err := json.Marshal(&c.OCIv1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encoding OCI-format image configuration")
|
||||
}
|
||||
dconfig, err := json.Marshal(&c.Docker)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encoding docker-format image configuration")
|
||||
}
|
||||
created := time.Now().UTC()
|
||||
if historyTimestamp != nil {
|
||||
created = historyTimestamp.UTC()
|
||||
}
|
||||
ref := &CopyRef{
|
||||
store: c.store,
|
||||
compression: compress,
|
||||
name: name,
|
||||
names: names,
|
||||
layerID: layerID,
|
||||
addHistory: false,
|
||||
oconfig: oconfig,
|
||||
dconfig: dconfig,
|
||||
created: created,
|
||||
createdBy: c.ImageCreatedBy,
|
||||
annotations: c.ImageAnnotations,
|
||||
preferredManifestType: manifestType,
|
||||
exporting: true,
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
// GetData gets the Data for a container with the given name in the given store.
|
||||
func (r *Runtime) GetData(name string) (*Data, error) {
|
||||
img, err := r.GetImage(name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image %q", name)
|
||||
}
|
||||
|
||||
imgRef, err := r.GetImageRef("@" + img.ID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image reference %q", img.ID)
|
||||
}
|
||||
defer imgRef.Close()
|
||||
|
||||
tags, digests, err := ParseImageNames(img.Names)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing image names for %q", name)
|
||||
}
|
||||
|
||||
driverName, err := driver.GetDriverName(r.store)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading name of storage driver")
|
||||
}
|
||||
|
||||
topLayerID := img.TopLayer
|
||||
|
||||
driverMetadata, err := driver.GetDriverMetadata(r.store, topLayerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error asking storage driver %q for metadata", driverName)
|
||||
}
|
||||
|
||||
layer, err := r.store.Layer(topLayerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading information about layer %q", topLayerID)
|
||||
}
|
||||
size, err := r.store.DiffSize(layer.Parent, layer.ID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error determining size of layer %q", layer.ID)
|
||||
}
|
||||
|
||||
imgSize, err := imgRef.Size()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error determining size of image %q", transports.ImageName(imgRef.Reference()))
|
||||
}
|
||||
|
||||
manifest, manifestType, err := imgRef.Manifest()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading manifest for image %q", img.ID)
|
||||
}
|
||||
manifestDigest := digest.Digest("")
|
||||
if len(manifest) > 0 {
|
||||
manifestDigest = digest.Canonical.FromBytes(manifest)
|
||||
}
|
||||
annotations := annotations(manifest, manifestType)
|
||||
|
||||
config, err := imgRef.OCIConfig()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image configuration for %q", img.ID)
|
||||
}
|
||||
historyComment := ""
|
||||
historyCreatedBy := ""
|
||||
if len(config.History) > 0 {
|
||||
historyComment = config.History[len(config.History)-1].Comment
|
||||
historyCreatedBy = config.History[len(config.History)-1].CreatedBy
|
||||
}
|
||||
|
||||
return &Data{
|
||||
ID: img.ID,
|
||||
Tags: tags,
|
||||
Digests: digests,
|
||||
ManifestDigest: manifestDigest,
|
||||
Comment: historyComment,
|
||||
Created: config.Created,
|
||||
Author: config.Author,
|
||||
Config: config.Config,
|
||||
Architecture: config.Architecture,
|
||||
OS: config.OS,
|
||||
Annotations: annotations,
|
||||
CreatedBy: historyCreatedBy,
|
||||
Size: uint(size),
|
||||
VirtualSize: uint(size + imgSize),
|
||||
GraphDriver: driver.Data{
|
||||
Name: driverName,
|
||||
Data: driverMetadata,
|
||||
},
|
||||
RootFS: config.RootFS,
|
||||
}, nil
|
||||
}
|
|
@ -1,445 +0,0 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/kubernetes-incubator/cri-o/cmd/kpod/docker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CopyRef handles image references used for copying images to/from remotes
|
||||
type CopyRef struct {
|
||||
store storage.Store
|
||||
compression archive.Compression
|
||||
name reference.Named
|
||||
names []string
|
||||
layerID string
|
||||
addHistory bool
|
||||
oconfig []byte
|
||||
dconfig []byte
|
||||
created time.Time
|
||||
createdBy string
|
||||
annotations map[string]string
|
||||
preferredManifestType string
|
||||
exporting bool
|
||||
}
|
||||
|
||||
type copySource struct {
|
||||
path string
|
||||
ref *CopyRef
|
||||
store storage.Store
|
||||
layerID string
|
||||
names []string
|
||||
addHistory bool
|
||||
compression archive.Compression
|
||||
config []byte
|
||||
configDigest digest.Digest
|
||||
manifest []byte
|
||||
manifestType string
|
||||
exporting bool
|
||||
}
|
||||
|
||||
// NewImage creates a new image from the given system context
|
||||
func (c *CopyRef) NewImage(sc *types.SystemContext) (types.Image, error) {
|
||||
src, err := c.NewImageSource(sc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return image.FromSource(src)
|
||||
}
|
||||
|
||||
// NewImageSource creates a new image source from the given system context and manifest
|
||||
func (c *CopyRef) NewImageSource(sc *types.SystemContext) (src types.ImageSource, err error) {
|
||||
// Decide which type of manifest and configuration output we're going to provide.
|
||||
manifestType := c.preferredManifestType
|
||||
// If it's not a format we support, return an error.
|
||||
// Try to provide a manifest and configuration in the same format the current ones are in.
|
||||
if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest {
|
||||
return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
||||
manifestType, v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest)
|
||||
}
|
||||
// Start building the list of layers using the read-write layer.
|
||||
layers := []string{}
|
||||
layerID := c.layerID
|
||||
layer, err := c.store.Layer(layerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
|
||||
}
|
||||
// Walk the list of parent layers, prepending each as we go.
|
||||
for layer != nil {
|
||||
layers = append(append([]string{}, layerID), layers...)
|
||||
layerID = layer.Parent
|
||||
if layerID == "" {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
layer, err = c.store.Layer(layerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
|
||||
}
|
||||
}
|
||||
logrus.Debugf("layer list: %q", layers)
|
||||
|
||||
// Make a temporary directory to hold blobs.
|
||||
path, err := ioutil.TempDir(os.TempDir(), "kpod")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("using %q to hold temporary data", path)
|
||||
defer func() {
|
||||
if src == nil {
|
||||
err2 := os.RemoveAll(path)
|
||||
if err2 != nil {
|
||||
logrus.Errorf("error removing %q: %v", path, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Build fresh copies of the configurations so that we don't mess with the values in the Builder
|
||||
// object itself.
|
||||
oimage := v1.Image{}
|
||||
err = json.Unmarshal(c.oconfig, &oimage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dimage := docker.V2Image{}
|
||||
err = json.Unmarshal(c.dconfig, &dimage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Start building manifests.
|
||||
omanifest := v1.Manifest{
|
||||
Versioned: specs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
Config: v1.Descriptor{
|
||||
MediaType: v1.MediaTypeImageConfig,
|
||||
},
|
||||
Layers: []v1.Descriptor{},
|
||||
Annotations: c.annotations,
|
||||
}
|
||||
dmanifest := docker.V2S2Manifest{
|
||||
V2Versioned: docker.V2Versioned{
|
||||
SchemaVersion: 2,
|
||||
MediaType: docker.V2S2MediaTypeManifest,
|
||||
},
|
||||
Config: docker.V2S2Descriptor{
|
||||
MediaType: docker.V2S2MediaTypeImageConfig,
|
||||
},
|
||||
Layers: []docker.V2S2Descriptor{},
|
||||
}
|
||||
|
||||
oimage.RootFS.Type = docker.TypeLayers
|
||||
oimage.RootFS.DiffIDs = []digest.Digest{}
|
||||
dimage.RootFS = &docker.V2S2RootFS{}
|
||||
dimage.RootFS.Type = docker.TypeLayers
|
||||
dimage.RootFS.DiffIDs = []digest.Digest{}
|
||||
|
||||
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
|
||||
for _, layerID := range layers {
|
||||
omediaType := v1.MediaTypeImageLayer
|
||||
dmediaType := docker.V2S2MediaTypeUncompressedLayer
|
||||
// Figure out which media type we want to call this. Assume no compression.
|
||||
if c.compression != archive.Uncompressed {
|
||||
switch c.compression {
|
||||
case archive.Gzip:
|
||||
omediaType = v1.MediaTypeImageLayerGzip
|
||||
dmediaType = docker.V2S2MediaTypeLayer
|
||||
logrus.Debugf("compressing layer %q with gzip", layerID)
|
||||
case archive.Bzip2:
|
||||
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with bzip2.
|
||||
return nil, errors.New("media type for bzip2-compressed layers is not defined")
|
||||
default:
|
||||
logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID)
|
||||
}
|
||||
}
|
||||
// If we're not re-exporting the data, just fake up layer and diff IDs for the manifest.
|
||||
if !c.exporting {
|
||||
fakeLayerDigest := digest.NewDigestFromHex(digest.Canonical.String(), layerID)
|
||||
// Add a note in the manifest about the layer. The blobs should be identified by their
|
||||
// possibly-compressed blob digests, but just use the layer IDs here.
|
||||
olayerDescriptor := v1.Descriptor{
|
||||
MediaType: omediaType,
|
||||
Digest: fakeLayerDigest,
|
||||
Size: -1,
|
||||
}
|
||||
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
||||
dlayerDescriptor := docker.V2S2Descriptor{
|
||||
MediaType: dmediaType,
|
||||
Digest: fakeLayerDigest,
|
||||
Size: -1,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Add a note about the diffID, which should be uncompressed digest of the blob, but
|
||||
// just use the layer ID here.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, fakeLayerDigest)
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, fakeLayerDigest)
|
||||
continue
|
||||
}
|
||||
// Start reading the layer.
|
||||
rc, err := c.store.Diff("", layerID, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error extracting layer %q", layerID)
|
||||
}
|
||||
defer rc.Close()
|
||||
// Set up to decompress the layer, in case it's coming out compressed. Due to implementation
|
||||
// differences, the result may not match the digest the blob had when it was originally imported,
|
||||
// so we have to recompute all of this anyway if we want to be sure the digests we use will be
|
||||
// correct.
|
||||
uncompressed, err := archive.DecompressStream(rc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error decompressing layer %q", layerID)
|
||||
}
|
||||
defer uncompressed.Close()
|
||||
srcHasher := digest.Canonical.Digester()
|
||||
reader := io.TeeReader(uncompressed, srcHasher.Hash())
|
||||
// Set up to write the possibly-recompressed blob.
|
||||
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening file for layer %q", layerID)
|
||||
}
|
||||
destHasher := digest.Canonical.Digester()
|
||||
counter := ioutils.NewWriteCounter(layerFile)
|
||||
multiWriter := io.MultiWriter(counter, destHasher.Hash())
|
||||
// Compress the layer, if we're compressing it.
|
||||
writer, err := archive.CompressStream(multiWriter, c.compression)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error compressing layer %q", layerID)
|
||||
}
|
||||
size, err := io.Copy(writer, reader)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID)
|
||||
}
|
||||
writer.Close()
|
||||
layerFile.Close()
|
||||
if c.compression == archive.Uncompressed {
|
||||
if size != counter.Count {
|
||||
return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count)
|
||||
}
|
||||
} else {
|
||||
size = counter.Count
|
||||
}
|
||||
logrus.Debugf("layer %q size is %d bytes", layerID, size)
|
||||
// Rename the layer so that we can more easily find it by digest later.
|
||||
err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID)
|
||||
}
|
||||
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
|
||||
// compressed blob digests.
|
||||
olayerDescriptor := v1.Descriptor{
|
||||
MediaType: omediaType,
|
||||
Digest: destHasher.Digest(),
|
||||
Size: size,
|
||||
}
|
||||
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
||||
dlayerDescriptor := docker.V2S2Descriptor{
|
||||
MediaType: dmediaType,
|
||||
Digest: destHasher.Digest(),
|
||||
Size: size,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Add a note about the diffID, which is always an uncompressed value.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||
}
|
||||
|
||||
if c.addHistory {
|
||||
// Build history notes in the image configurations.
|
||||
onews := v1.History{
|
||||
Created: &c.created,
|
||||
CreatedBy: c.createdBy,
|
||||
Author: oimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
oimage.History = append(oimage.History, onews)
|
||||
dnews := docker.V2S2History{
|
||||
Created: c.created,
|
||||
CreatedBy: c.createdBy,
|
||||
Author: dimage.Author,
|
||||
EmptyLayer: false,
|
||||
}
|
||||
dimage.History = append(dimage.History, dnews)
|
||||
}
|
||||
|
||||
// Encode the image configuration blob.
|
||||
oconfig, err := json.Marshal(&oimage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("OCIv1 config = %s", oconfig)
|
||||
|
||||
// Add the configuration blob to the manifest.
|
||||
omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
|
||||
omanifest.Config.Size = int64(len(oconfig))
|
||||
omanifest.Config.MediaType = v1.MediaTypeImageConfig
|
||||
|
||||
// Encode the manifest.
|
||||
omanifestbytes, err := json.Marshal(&omanifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
|
||||
|
||||
// Encode the image configuration blob.
|
||||
dconfig, err := json.Marshal(&dimage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Docker v2s2 config = %s", dconfig)
|
||||
|
||||
// Add the configuration blob to the manifest.
|
||||
dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
|
||||
dmanifest.Config.Size = int64(len(dconfig))
|
||||
dmanifest.Config.MediaType = docker.V2S2MediaTypeImageConfig
|
||||
|
||||
// Encode the manifest.
|
||||
dmanifestbytes, err := json.Marshal(&dmanifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
|
||||
|
||||
// Decide which manifest and configuration blobs we'll actually output.
|
||||
var config []byte
|
||||
var manifest []byte
|
||||
switch manifestType {
|
||||
case v1.MediaTypeImageManifest:
|
||||
manifest = omanifestbytes
|
||||
config = oconfig
|
||||
case docker.V2S2MediaTypeManifest:
|
||||
manifest = dmanifestbytes
|
||||
config = dconfig
|
||||
default:
|
||||
panic("unreachable code: unsupported manifest type")
|
||||
}
|
||||
src = ©Source{
|
||||
path: path,
|
||||
ref: c,
|
||||
store: c.store,
|
||||
layerID: c.layerID,
|
||||
names: c.names,
|
||||
addHistory: c.addHistory,
|
||||
compression: c.compression,
|
||||
config: config,
|
||||
configDigest: digest.Canonical.FromBytes(config),
|
||||
manifest: manifest,
|
||||
manifestType: manifestType,
|
||||
exporting: c.exporting,
|
||||
}
|
||||
return src, nil
|
||||
}
|
||||
|
||||
// NewImageDestination creates a new image destination from the given system context
|
||||
func (c *CopyRef) NewImageDestination(sc *types.SystemContext) (types.ImageDestination, error) {
|
||||
return nil, errors.Errorf("can't write to a container")
|
||||
}
|
||||
|
||||
// DockerReference gets the docker reference for the given CopyRef
|
||||
func (c *CopyRef) DockerReference() reference.Named {
|
||||
return c.name
|
||||
}
|
||||
|
||||
// StringWithinTransport returns the first name of the copyRef
|
||||
func (c *CopyRef) StringWithinTransport() string {
|
||||
if len(c.names) > 0 {
|
||||
return c.names[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// DeleteImage deletes an image in the CopyRef
|
||||
func (c *CopyRef) DeleteImage(*types.SystemContext) error {
|
||||
// we were never here
|
||||
return nil
|
||||
}
|
||||
|
||||
// PolicyConfigurationIdentity returns the policy configuration for the CopyRef
|
||||
func (c *CopyRef) PolicyConfigurationIdentity() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// PolicyConfigurationNamespaces returns the policy configuration namespace for the CopyRef
|
||||
func (c *CopyRef) PolicyConfigurationNamespaces() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Transport returns an ImageTransport for the given CopyRef
|
||||
func (c *CopyRef) Transport() types.ImageTransport {
|
||||
return is.Transport
|
||||
}
|
||||
|
||||
func (cs *copySource) Close() error {
|
||||
err := os.RemoveAll(cs.path)
|
||||
if err != nil {
|
||||
logrus.Errorf("error removing %q: %v", cs.path, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (cs *copySource) Reference() types.ImageReference {
|
||||
return cs.ref
|
||||
}
|
||||
|
||||
func (cs *copySource) GetSignatures(context.Context) ([][]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (cs *copySource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
return []byte{}, "", errors.Errorf("TODO")
|
||||
}
|
||||
|
||||
func (cs *copySource) GetManifest() ([]byte, string, error) {
|
||||
return cs.manifest, cs.manifestType, nil
|
||||
}
|
||||
|
||||
func (cs *copySource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
|
||||
if blob.Digest == cs.configDigest {
|
||||
logrus.Debugf("start reading config")
|
||||
reader := bytes.NewReader(cs.config)
|
||||
closer := func() error {
|
||||
logrus.Debugf("finished reading config")
|
||||
return nil
|
||||
}
|
||||
return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
|
||||
}
|
||||
layerFile, err := os.OpenFile(filepath.Join(cs.path, blob.Digest.String()), os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
|
||||
return nil, -1, err
|
||||
}
|
||||
size = -1
|
||||
st, err := layerFile.Stat()
|
||||
if err != nil {
|
||||
logrus.Warnf("error reading size of layer %q: %v", blob.Digest.String(), err)
|
||||
} else {
|
||||
size = st.Size()
|
||||
}
|
||||
logrus.Debugf("reading layer %q", blob.Digest.String())
|
||||
closer := func() error {
|
||||
layerFile.Close()
|
||||
logrus.Debugf("finished reading layer %q", blob.Digest.String())
|
||||
return nil
|
||||
}
|
||||
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
|
||||
}
|
Loading…
Reference in a new issue