Merge 803fbbfb56
into 6a43d07bae
This commit is contained in:
commit
8a1f3882db
45 changed files with 1823 additions and 1682 deletions
|
@ -87,7 +87,7 @@ func historyCmd(c *cli.Context) error {
|
||||||
|
|
||||||
runtime, err := getRuntime(c)
|
runtime, err := getRuntime(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "Could not get config")
|
return errors.Wrapf(err, "Could not get runtime")
|
||||||
}
|
}
|
||||||
defer runtime.Shutdown(false)
|
defer runtime.Shutdown(false)
|
||||||
|
|
||||||
|
|
190
cmd/kpod/import.go
Normal file
190
cmd/kpod/import.go
Normal file
|
@ -0,0 +1,190 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/kubernetes-incubator/cri-o/libpod"
|
||||||
|
|
||||||
|
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/urfave/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
importFlags = []cli.Flag{
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "change, c",
|
||||||
|
Usage: "Apply imgspecv1 configurations to the created image",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "message, m",
|
||||||
|
Usage: "Set commit message for image imported",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
importDescription = "Imports a tarball and saves it as a root filesystem image.\n" +
|
||||||
|
"The commit message and image config can be modified by the user."
|
||||||
|
importCommand = cli.Command{
|
||||||
|
Name: "import",
|
||||||
|
Usage: "Import a tarball to create a filesystem image",
|
||||||
|
Description: importDescription,
|
||||||
|
Flags: importFlags,
|
||||||
|
Action: importCmd,
|
||||||
|
ArgsUsage: "TARBALL",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func importCmd(c *cli.Context) error {
|
||||||
|
if err := validateFlags(c, historyFlags); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
runtime, err := getRuntime(c)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "Could not get runtime")
|
||||||
|
}
|
||||||
|
defer runtime.Shutdown(false)
|
||||||
|
|
||||||
|
var opts libpod.CopyOptions
|
||||||
|
var source string
|
||||||
|
args := c.Args()
|
||||||
|
switch len(args) {
|
||||||
|
case 0:
|
||||||
|
return errors.Errorf("need to give the path to the tarball")
|
||||||
|
case 1:
|
||||||
|
source = args[0]
|
||||||
|
case 2:
|
||||||
|
source = args[0]
|
||||||
|
opts.Reference = args[1]
|
||||||
|
default:
|
||||||
|
return errors.Errorf("too many arguments, need 2 only")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := url.ParseRequestURI(source); err == nil {
|
||||||
|
file, err := downloadFromURL(source)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer os.Remove(file)
|
||||||
|
source = file
|
||||||
|
}
|
||||||
|
|
||||||
|
changes := v1.ImageConfig{}
|
||||||
|
if c.IsSet("change") {
|
||||||
|
changes, err = getImageConfig(c.String("change"))
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error adding config changes to image %q", source)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
history := []v1.History{
|
||||||
|
{Comment: c.String("message")},
|
||||||
|
}
|
||||||
|
|
||||||
|
config := v1.Image{
|
||||||
|
Config: changes,
|
||||||
|
History: history,
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.ImageConfig = config
|
||||||
|
|
||||||
|
return runtime.ImportImage(source, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// donwloadFromURL downloads an image in the format "https:/example.com/myimage.tar"
|
||||||
|
// and tempoarily saves in it /var/tmp/importxyz, which is deleted after the image is imported
|
||||||
|
func downloadFromURL(source string) (string, error) {
|
||||||
|
fmt.Printf("Downloading from %q\n", source)
|
||||||
|
|
||||||
|
outFile, err := ioutil.TempFile("/var/tmp", "import")
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "error creating file")
|
||||||
|
}
|
||||||
|
defer outFile.Close()
|
||||||
|
|
||||||
|
response, err := http.Get(source)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrapf(err, "error downloading %q", source)
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(outFile, response.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrapf(err, "error saving %q to %q", source, outFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
return outFile.Name(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getImageConfig converts the --change flag values in the format "CMD=/bin/bash USER=example"
|
||||||
|
// to a type v1.ImageConfig
|
||||||
|
func getImageConfig(change string) (v1.ImageConfig, error) {
|
||||||
|
// USER=value | EXPOSE=value | ENV=value | ENTRYPOINT=value |
|
||||||
|
// CMD=value | VOLUME=value | WORKDIR=value | LABEL=key=value | STOPSIGNAL=value
|
||||||
|
|
||||||
|
var (
|
||||||
|
user string
|
||||||
|
env []string
|
||||||
|
entrypoint []string
|
||||||
|
cmd []string
|
||||||
|
workingDir string
|
||||||
|
stopSignal string
|
||||||
|
)
|
||||||
|
|
||||||
|
exposedPorts := make(map[string]struct{})
|
||||||
|
volumes := make(map[string]struct{})
|
||||||
|
labels := make(map[string]string)
|
||||||
|
|
||||||
|
changes := strings.Split(change, " ")
|
||||||
|
|
||||||
|
for _, ch := range changes {
|
||||||
|
pair := strings.Split(ch, "=")
|
||||||
|
if len(pair) == 1 {
|
||||||
|
return v1.ImageConfig{}, errors.Errorf("no value given for instruction %q", ch)
|
||||||
|
}
|
||||||
|
switch pair[0] {
|
||||||
|
case "USER":
|
||||||
|
user = pair[1]
|
||||||
|
case "EXPOSE":
|
||||||
|
var st struct{}
|
||||||
|
exposedPorts[pair[1]] = st
|
||||||
|
case "ENV":
|
||||||
|
env = append(env, pair[1])
|
||||||
|
case "ENTRYPOINT":
|
||||||
|
entrypoint = append(entrypoint, pair[1])
|
||||||
|
case "CMD":
|
||||||
|
cmd = append(cmd, pair[1])
|
||||||
|
case "VOLUME":
|
||||||
|
var st struct{}
|
||||||
|
volumes[pair[1]] = st
|
||||||
|
case "WORKDIR":
|
||||||
|
workingDir = pair[1]
|
||||||
|
case "LABEL":
|
||||||
|
if len(pair) == 3 {
|
||||||
|
labels[pair[1]] = pair[2]
|
||||||
|
} else {
|
||||||
|
labels[pair[1]] = ""
|
||||||
|
}
|
||||||
|
case "STOPSIGNAL":
|
||||||
|
stopSignal = pair[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return v1.ImageConfig{
|
||||||
|
User: user,
|
||||||
|
ExposedPorts: exposedPorts,
|
||||||
|
Env: env,
|
||||||
|
Entrypoint: entrypoint,
|
||||||
|
Cmd: cmd,
|
||||||
|
Volumes: volumes,
|
||||||
|
WorkingDir: workingDir,
|
||||||
|
Labels: labels,
|
||||||
|
StopSignal: stopSignal,
|
||||||
|
}, nil
|
||||||
|
}
|
|
@ -36,6 +36,7 @@ func main() {
|
||||||
exportCommand,
|
exportCommand,
|
||||||
historyCommand,
|
historyCommand,
|
||||||
imagesCommand,
|
imagesCommand,
|
||||||
|
importCommand,
|
||||||
infoCommand,
|
infoCommand,
|
||||||
inspectCommand,
|
inspectCommand,
|
||||||
killCommand,
|
killCommand,
|
||||||
|
|
|
@ -711,6 +711,28 @@ _kpod_history() {
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_kpod_import() {
|
||||||
|
local options_with_args="
|
||||||
|
--change
|
||||||
|
-c
|
||||||
|
--message
|
||||||
|
-m
|
||||||
|
"
|
||||||
|
local boolean_options="
|
||||||
|
"
|
||||||
|
_complete_ "$options_with_args" "$boolean_options"
|
||||||
|
|
||||||
|
case "$cur" in
|
||||||
|
-*)
|
||||||
|
COMPREPLY=($(compgen -W "$boolean_options $options_with_args" -- "$cur"))
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
__kpod_list_images
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
_kpod_info() {
|
_kpod_info() {
|
||||||
local boolean_options="
|
local boolean_options="
|
||||||
--help
|
--help
|
||||||
|
@ -1402,6 +1424,7 @@ _kpod_kpod() {
|
||||||
export
|
export
|
||||||
history
|
history
|
||||||
images
|
images
|
||||||
|
import
|
||||||
info
|
info
|
||||||
inspect
|
inspect
|
||||||
kill
|
kill
|
||||||
|
|
87
docs/kpod-import.1.md
Normal file
87
docs/kpod-import.1.md
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
% kpod(1) kpod-import - Simple tool to import a tarball as an image
|
||||||
|
% Urvashi Mohnani
|
||||||
|
# kpod-import "1" "November 2017" "kpod"
|
||||||
|
|
||||||
|
## NAME
|
||||||
|
kpod-import - import a tarball and save it as a filesystem image
|
||||||
|
|
||||||
|
## SYNOPSIS
|
||||||
|
**kpod import**
|
||||||
|
**TARBALL**
|
||||||
|
[**--change**|**-c**]
|
||||||
|
[**--message**|**-m**]
|
||||||
|
[**--help**|**-h**]
|
||||||
|
|
||||||
|
## DESCRIPTION
|
||||||
|
**kpod import** imports a tarball and saves it as a filesystem image.
|
||||||
|
The image configuration can be modified with the **--change** flag and
|
||||||
|
a commit message can be set using the **--message** flag.
|
||||||
|
|
||||||
|
**kpod [GLOBAL OPTIONS]**
|
||||||
|
|
||||||
|
**kpod import [GLOBAL OPTIONS]**
|
||||||
|
|
||||||
|
**kpod import [OPTIONS] CONTAINER**
|
||||||
|
|
||||||
|
## OPTIONS
|
||||||
|
|
||||||
|
**--change, -c**
|
||||||
|
Apply imgspecv1 configurations to the created image
|
||||||
|
Possible configurations include:
|
||||||
|
**USER** | **EXPOSE** | **ENV** | **ENTRYPOINT** | **CMD** | **VOLUME** | **WORKDIR** | **LABEL** | **STOPSIGNAL**
|
||||||
|
|
||||||
|
**--message, -m**
|
||||||
|
Set commit message for image imported
|
||||||
|
|
||||||
|
## EXAMPLES
|
||||||
|
|
||||||
|
```
|
||||||
|
# kpod import --change "CMD=/bin/bash ENTRYPOINT=/bin/sh LABEL=blue=image" ctr.tar image-imported
|
||||||
|
Getting image source signatures
|
||||||
|
Copying blob sha256:b41deda5a2feb1f03a5c1bb38c598cbc12c9ccd675f438edc6acd815f7585b86
|
||||||
|
25.80 MB / 25.80 MB [======================================================] 0s
|
||||||
|
Copying config sha256:c16a6d30f3782288ec4e7521c754acc29d37155629cb39149756f486dae2d4cd
|
||||||
|
448 B / 448 B [============================================================] 0s
|
||||||
|
Writing manifest to image destination
|
||||||
|
Storing signatures
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
# cat ctr.tar | kpod import --message "importing the ctr.tar tarball" - image-imported
|
||||||
|
Getting image source signatures
|
||||||
|
Copying blob sha256:b41deda5a2feb1f03a5c1bb38c598cbc12c9ccd675f438edc6acd815f7585b86
|
||||||
|
25.80 MB / 25.80 MB [======================================================] 0s
|
||||||
|
Copying config sha256:af376cdda5c0ac1d9592bf56567253d203f8de6a8edf356c683a645d75221540
|
||||||
|
376 B / 376 B [============================================================] 0s
|
||||||
|
Writing manifest to image destination
|
||||||
|
Storing signatures
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
# cat ctr.tar | kpod import -
|
||||||
|
Getting image source signatures
|
||||||
|
Copying blob sha256:b41deda5a2feb1f03a5c1bb38c598cbc12c9ccd675f438edc6acd815f7585b86
|
||||||
|
25.80 MB / 25.80 MB [======================================================] 0s
|
||||||
|
Copying config sha256:d61387b4d5edf65edee5353e2340783703074ffeaaac529cde97a8357eea7645
|
||||||
|
378 B / 378 B [============================================================] 0s
|
||||||
|
Writing manifest to image destination
|
||||||
|
Storing signatures
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
kpod import http://lacrosse.redhat.com/~umohnani/ctr.tar url-image
|
||||||
|
Downloading from "http://lacrosse.redhat.com/~umohnani/ctr.tar"
|
||||||
|
Getting image source signatures
|
||||||
|
Copying blob sha256:b41deda5a2feb1f03a5c1bb38c598cbc12c9ccd675f438edc6acd815f7585b86
|
||||||
|
25.80 MB / 25.80 MB [======================================================] 0s
|
||||||
|
Copying config sha256:5813fe8a3b18696089fd09957a12e88bda43dc1745b5240879ffffe93240d29a
|
||||||
|
419 B / 419 B [============================================================] 0s
|
||||||
|
Writing manifest to image destination
|
||||||
|
Storing signatures
|
||||||
|
```
|
||||||
|
|
||||||
|
## SEE ALSO
|
||||||
|
kpod(1), kpod-export(1), crio(8), crio.conf(5)
|
||||||
|
|
||||||
|
## HISTORY
|
||||||
|
November 2017, Originally compiled by Urvashi Mohnani <umohnani@redhat.com>
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"github.com/containers/image/pkg/sysregistries"
|
"github.com/containers/image/pkg/sysregistries"
|
||||||
"github.com/containers/image/signature"
|
"github.com/containers/image/signature"
|
||||||
is "github.com/containers/image/storage"
|
is "github.com/containers/image/storage"
|
||||||
|
"github.com/containers/image/tarball"
|
||||||
"github.com/containers/image/transports"
|
"github.com/containers/image/transports"
|
||||||
"github.com/containers/image/transports/alltransports"
|
"github.com/containers/image/transports/alltransports"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
|
@ -49,6 +50,9 @@ var (
|
||||||
DirTransport = "dir"
|
DirTransport = "dir"
|
||||||
// TransportNames are the supported transports in string form
|
// TransportNames are the supported transports in string form
|
||||||
TransportNames = [...]string{DefaultRegistry, DockerArchive, OCIArchive, "ostree:", "dir:"}
|
TransportNames = [...]string{DefaultRegistry, DockerArchive, OCIArchive, "ostree:", "dir:"}
|
||||||
|
// TarballTransport is the transport for importing a tar archive
|
||||||
|
// and creating a filesystem image
|
||||||
|
TarballTransport = "tarball"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CopyOptions contains the options given when pushing or pulling images
|
// CopyOptions contains the options given when pushing or pulling images
|
||||||
|
@ -72,6 +76,10 @@ type CopyOptions struct {
|
||||||
AuthFile string
|
AuthFile string
|
||||||
// Writer is the reportWriter for the output
|
// Writer is the reportWriter for the output
|
||||||
Writer io.Writer
|
Writer io.Writer
|
||||||
|
// Reference is the name for the image created when a tar archive is imported
|
||||||
|
Reference string
|
||||||
|
// ImageConfig is the Image spec for the image created when a tar archive is imported
|
||||||
|
ImageConfig ociv1.Image
|
||||||
}
|
}
|
||||||
|
|
||||||
// Image API
|
// Image API
|
||||||
|
@ -877,8 +885,70 @@ func (r *Runtime) GetHistory(image string) ([]ociv1.History, []types.BlobInfo, s
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImportImage imports an OCI format image archive into storage as an image
|
// ImportImage imports an OCI format image archive into storage as an image
|
||||||
func (r *Runtime) ImportImage(path string) (*storage.Image, error) {
|
func (r *Runtime) ImportImage(path string, options CopyOptions) error {
|
||||||
return nil, ErrNotImplemented
|
r.lock.RLock()
|
||||||
|
defer r.lock.RUnlock()
|
||||||
|
|
||||||
|
if !r.valid {
|
||||||
|
return ErrRuntimeStopped
|
||||||
|
}
|
||||||
|
|
||||||
|
file := TarballTransport + ":" + path
|
||||||
|
src, err := alltransports.ParseImageName(file)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error parsing image name %q", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
updater, ok := src.(tarball.ConfigUpdater)
|
||||||
|
if !ok {
|
||||||
|
return errors.Wrapf(err, "unexpected type, a tarball reference should implement tarball.ConfigUpdater")
|
||||||
|
}
|
||||||
|
|
||||||
|
annotations := make(map[string]string)
|
||||||
|
annotations[ociv1.AnnotationDescription] = "test image built"
|
||||||
|
|
||||||
|
err = updater.ConfigUpdate(options.ImageConfig, annotations)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error updating image config")
|
||||||
|
}
|
||||||
|
|
||||||
|
var reference = options.Reference
|
||||||
|
|
||||||
|
sc := common.GetSystemContext("", "")
|
||||||
|
|
||||||
|
if reference == "" {
|
||||||
|
newImg, err := src.NewImage(sc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer newImg.Close()
|
||||||
|
|
||||||
|
digest := newImg.ConfigInfo().Digest
|
||||||
|
if err = digest.Validate(); err != nil {
|
||||||
|
return errors.Wrapf(err, "error getting config info")
|
||||||
|
}
|
||||||
|
reference = "@" + digest.Hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
policy, err := signature.DefaultPolicy(sc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
policyContext, err := signature.NewPolicyContext(policy)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer policyContext.Destroy()
|
||||||
|
|
||||||
|
copyOptions := common.GetCopyOptions(os.Stdout, "", nil, nil, common.SigningOptions{}, "")
|
||||||
|
|
||||||
|
dest, err := is.Transport.ParseStoreReference(r.store, reference)
|
||||||
|
if err != nil {
|
||||||
|
errors.Wrapf(err, "error getting image reference for %q", options.Reference)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cp.Image(policyContext, dest, src, copyOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetImageInspectInfo returns the inspect information of an image
|
// GetImageInspectInfo returns the inspect information of an image
|
||||||
|
|
137
test/kpod_import.bats
Normal file
137
test/kpod_import.bats
Normal file
|
@ -0,0 +1,137 @@
|
||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
load helpers
|
||||||
|
|
||||||
|
IMAGE="redis:alpine"
|
||||||
|
|
||||||
|
function teardown() {
|
||||||
|
cleanup_test
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "kpod import with source and reference" {
|
||||||
|
start_crio
|
||||||
|
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
pod_id="$output"
|
||||||
|
run crioctl image pull "$IMAGE"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
ctr_id="$output"
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} export -o container.tar "$ctr_id"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} import container.tar imported-image
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} images
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
images="$output"
|
||||||
|
run grep "imported-image" <<< "$images"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
cleanup_ctrs
|
||||||
|
cleanup_pods
|
||||||
|
stop_crio
|
||||||
|
rm -f container.tar
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "kpod import without reference" {
|
||||||
|
start_crio
|
||||||
|
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
pod_id="$output"
|
||||||
|
run crioctl image pull "$IMAGE"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
ctr_id="$output"
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} export -o container.tar "$ctr_id"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} import container.tar
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} images
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
images="$output"
|
||||||
|
run grep "<none>" <<< "$images"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
cleanup_ctrs
|
||||||
|
cleanup_pods
|
||||||
|
stop_crio
|
||||||
|
rm -f container.tar
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "kpod import with message flag" {
|
||||||
|
start_crio
|
||||||
|
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
pod_id="$output"
|
||||||
|
run crioctl image pull "$IMAGE"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
ctr_id="$output"
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} export -o container.tar "$ctr_id"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} import --message "importing container test message" container.tar imported-image
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} history imported-image
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
history="$output"
|
||||||
|
run grep "importing container test message" <<< "$history"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
cleanup_ctrs
|
||||||
|
cleanup_pods
|
||||||
|
stop_crio
|
||||||
|
rm -f container.tar
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "kpod import with change flag" {
|
||||||
|
start_crio
|
||||||
|
run crioctl pod run --config "$TESTDATA"/sandbox_config.json
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
pod_id="$output"
|
||||||
|
run crioctl image pull "$IMAGE"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run crioctl ctr create --config "$TESTDATA"/container_config.json --pod "$pod_id"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
ctr_id="$output"
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} export -o container.tar "$ctr_id"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} import --change "CMD=/bin/bash" container.tar imported-image
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
run ${KPOD_BINARY} ${KPOD_OPTIONS} inspect imported-image
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
inspect="$output"
|
||||||
|
run grep "/bin/bash" <<< "$inspect"
|
||||||
|
echo "$output"
|
||||||
|
[ "$status" -eq 0 ]
|
||||||
|
cleanup_ctrs
|
||||||
|
cleanup_pods
|
||||||
|
stop_crio
|
||||||
|
rm -f container.tar
|
||||||
|
}
|
|
@ -12,7 +12,7 @@ github.com/gregjones/httpcache 787624de3eb7bd915c329cba748687a3b22666a6
|
||||||
github.com/json-iterator/go 1.0.0
|
github.com/json-iterator/go 1.0.0
|
||||||
github.com/peterbourgon/diskv v2.0.1
|
github.com/peterbourgon/diskv v2.0.1
|
||||||
github.com/sirupsen/logrus v1.0.0
|
github.com/sirupsen/logrus v1.0.0
|
||||||
github.com/containers/image storage-update https://github.com/nalind/image
|
github.com/containers/image b34d14c93d3a40f2309cb9afbdf9bb76164c53e3
|
||||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||||
github.com/ostreedev/ostree-go master
|
github.com/ostreedev/ostree-go master
|
||||||
github.com/containers/storage 9e0c323a4b425557f8310ee8d125634acd39d8f5
|
github.com/containers/storage 9e0c323a4b425557f8310ee8d125634acd39d8f5
|
||||||
|
|
11
vendor/github.com/containers/image/copy/copy.go
generated
vendored
11
vendor/github.com/containers/image/copy/copy.go
generated
vendored
|
@ -320,15 +320,6 @@ func (ic *imageCopier) copyLayers() error {
|
||||||
srcInfos := ic.src.LayerInfos()
|
srcInfos := ic.src.LayerInfos()
|
||||||
destInfos := []types.BlobInfo{}
|
destInfos := []types.BlobInfo{}
|
||||||
diffIDs := []digest.Digest{}
|
diffIDs := []digest.Digest{}
|
||||||
updatedSrcInfos := ic.src.UpdatedLayerInfos()
|
|
||||||
srcInfosUpdated := false
|
|
||||||
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
|
|
||||||
if !ic.canModifyManifest {
|
|
||||||
return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden")
|
|
||||||
}
|
|
||||||
srcInfos = updatedSrcInfos
|
|
||||||
srcInfosUpdated = true
|
|
||||||
}
|
|
||||||
for _, srcLayer := range srcInfos {
|
for _, srcLayer := range srcInfos {
|
||||||
var (
|
var (
|
||||||
destInfo types.BlobInfo
|
destInfo types.BlobInfo
|
||||||
|
@ -357,7 +348,7 @@ func (ic *imageCopier) copyLayers() error {
|
||||||
if ic.diffIDsAreNeeded {
|
if ic.diffIDsAreNeeded {
|
||||||
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
|
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
|
||||||
}
|
}
|
||||||
if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
|
if layerDigestsDiffer(srcInfos, destInfos) {
|
||||||
ic.manifestUpdates.LayerInfos = destInfos
|
ic.manifestUpdates.LayerInfos = destInfos
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
5
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
5
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
|
@ -74,8 +74,3 @@ func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||||
}
|
}
|
||||||
return signatures, nil
|
return signatures, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
|
|
||||||
func (s *dirImageSource) UpdatedLayerInfos() []types.BlobInfo {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
5
vendor/github.com/containers/image/docker/archive/src.go
generated
vendored
5
vendor/github.com/containers/image/docker/archive/src.go
generated
vendored
|
@ -34,8 +34,3 @@ func (s *archiveImageSource) Reference() types.ImageReference {
|
||||||
func (s *archiveImageSource) Close() error {
|
func (s *archiveImageSource) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
|
|
||||||
func (s *archiveImageSource) UpdatedLayerInfos() []types.BlobInfo {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
5
vendor/github.com/containers/image/docker/daemon/daemon_src.go
generated
vendored
5
vendor/github.com/containers/image/docker/daemon/daemon_src.go
generated
vendored
|
@ -83,8 +83,3 @@ func (s *daemonImageSource) Reference() types.ImageReference {
|
||||||
func (s *daemonImageSource) Close() error {
|
func (s *daemonImageSource) Close() error {
|
||||||
return os.Remove(s.tarCopyPath)
|
return os.Remove(s.tarCopyPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
|
|
||||||
func (s *daemonImageSource) UpdatedLayerInfos() []types.BlobInfo {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
5
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
5
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
|
@ -52,11 +52,6 @@ func (s *dockerImageSource) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
|
|
||||||
func (s *dockerImageSource) UpdatedLayerInfos() []types.BlobInfo {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
|
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
|
||||||
// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
|
// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
|
||||||
func simplifyContentType(contentType string) string {
|
func simplifyContentType(contentType string) string {
|
||||||
|
|
6
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
6
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
|
@ -168,7 +168,7 @@ func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
||||||
func (d *Destination) PutManifest(m []byte) error {
|
func (d *Destination) PutManifest(m []byte) error {
|
||||||
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
|
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
|
||||||
// so the caller trying a different manifest kind would be pointless.
|
// so the caller trying a different manifest kind would be pointless.
|
||||||
var man manifest.Schema2
|
var man schema2Manifest
|
||||||
if err := json.Unmarshal(m, &man); err != nil {
|
if err := json.Unmarshal(m, &man); err != nil {
|
||||||
return errors.Wrap(err, "Error parsing manifest")
|
return errors.Wrap(err, "Error parsing manifest")
|
||||||
}
|
}
|
||||||
|
@ -177,12 +177,12 @@ func (d *Destination) PutManifest(m []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
layerPaths := []string{}
|
layerPaths := []string{}
|
||||||
for _, l := range man.LayersDescriptors {
|
for _, l := range man.Layers {
|
||||||
layerPaths = append(layerPaths, l.Digest.String())
|
layerPaths = append(layerPaths, l.Digest.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
items := []ManifestItem{{
|
items := []ManifestItem{{
|
||||||
Config: man.ConfigDescriptor.Digest.String(),
|
Config: man.Config.Digest.String(),
|
||||||
RepoTags: []string{d.repoTag},
|
RepoTags: []string{d.repoTag},
|
||||||
Layers: layerPaths,
|
Layers: layerPaths,
|
||||||
Parent: "",
|
Parent: "",
|
||||||
|
|
8
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
8
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
|
@ -254,22 +254,22 @@ func (s *Source) GetManifest() ([]byte, string, error) {
|
||||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
m := manifest.Schema2{
|
m := schema2Manifest{
|
||||||
SchemaVersion: 2,
|
SchemaVersion: 2,
|
||||||
MediaType: manifest.DockerV2Schema2MediaType,
|
MediaType: manifest.DockerV2Schema2MediaType,
|
||||||
ConfigDescriptor: manifest.Schema2Descriptor{
|
Config: distributionDescriptor{
|
||||||
MediaType: manifest.DockerV2Schema2ConfigMediaType,
|
MediaType: manifest.DockerV2Schema2ConfigMediaType,
|
||||||
Size: int64(len(s.configBytes)),
|
Size: int64(len(s.configBytes)),
|
||||||
Digest: s.configDigest,
|
Digest: s.configDigest,
|
||||||
},
|
},
|
||||||
LayersDescriptors: []manifest.Schema2Descriptor{},
|
Layers: []distributionDescriptor{},
|
||||||
}
|
}
|
||||||
for _, diffID := range s.orderedDiffIDList {
|
for _, diffID := range s.orderedDiffIDList {
|
||||||
li, ok := s.knownLayers[diffID]
|
li, ok := s.knownLayers[diffID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
|
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
|
||||||
}
|
}
|
||||||
m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
|
m.Layers = append(m.Layers, distributionDescriptor{
|
||||||
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
|
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
|
||||||
MediaType: manifest.DockerV2Schema2LayerMediaType,
|
MediaType: manifest.DockerV2Schema2LayerMediaType,
|
||||||
Size: li.size,
|
Size: li.size,
|
||||||
|
|
26
vendor/github.com/containers/image/docker/tarfile/types.go
generated
vendored
26
vendor/github.com/containers/image/docker/tarfile/types.go
generated
vendored
|
@ -1,9 +1,6 @@
|
||||||
package tarfile
|
package tarfile
|
||||||
|
|
||||||
import (
|
import "github.com/opencontainers/go-digest"
|
||||||
"github.com/containers/image/manifest"
|
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Various data structures.
|
// Various data structures.
|
||||||
|
|
||||||
|
@ -21,13 +18,30 @@ type ManifestItem struct {
|
||||||
Config string
|
Config string
|
||||||
RepoTags []string
|
RepoTags []string
|
||||||
Layers []string
|
Layers []string
|
||||||
Parent imageID `json:",omitempty"`
|
Parent imageID `json:",omitempty"`
|
||||||
LayerSources map[diffID]manifest.Schema2Descriptor `json:",omitempty"`
|
LayerSources map[diffID]distributionDescriptor `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type imageID string
|
type imageID string
|
||||||
type diffID digest.Digest
|
type diffID digest.Digest
|
||||||
|
|
||||||
|
// Based on github.com/docker/distribution/blobs.go
|
||||||
|
type distributionDescriptor struct {
|
||||||
|
MediaType string `json:"mediaType,omitempty"`
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
|
Digest digest.Digest `json:"digest,omitempty"`
|
||||||
|
URLs []string `json:"urls,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Based on github.com/docker/distribution/manifest/schema2/manifest.go
|
||||||
|
// FIXME: We are repeating this all over the place; make a public copy?
|
||||||
|
type schema2Manifest struct {
|
||||||
|
SchemaVersion int `json:"schemaVersion"`
|
||||||
|
MediaType string `json:"mediaType,omitempty"`
|
||||||
|
Config distributionDescriptor `json:"config"`
|
||||||
|
Layers []distributionDescriptor `json:"layers"`
|
||||||
|
}
|
||||||
|
|
||||||
// Based on github.com/docker/docker/image/image.go
|
// Based on github.com/docker/docker/image/image.go
|
||||||
// MOST CONTENT OMITTED AS UNNECESSARY
|
// MOST CONTENT OMITTED AS UNNECESSARY
|
||||||
type image struct {
|
type image struct {
|
||||||
|
|
2
vendor/github.com/containers/image/image/docker_list.go
generated
vendored
2
vendor/github.com/containers/image/image/docker_list.go
generated
vendored
|
@ -21,7 +21,7 @@ type platformSpec struct {
|
||||||
|
|
||||||
// A manifestDescriptor references a platform-specific manifest.
|
// A manifestDescriptor references a platform-specific manifest.
|
||||||
type manifestDescriptor struct {
|
type manifestDescriptor struct {
|
||||||
manifest.Schema2Descriptor
|
descriptor
|
||||||
Platform platformSpec `json:"platform"`
|
Platform platformSpec `json:"platform"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
207
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
207
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
|
@ -2,7 +2,9 @@ package image
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
|
@ -12,25 +14,87 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
type manifestSchema1 struct {
|
var (
|
||||||
m *manifest.Schema1
|
validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
type fsLayersSchema1 struct {
|
||||||
|
BlobSum digest.Digest `json:"blobSum"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) {
|
type historySchema1 struct {
|
||||||
m, err := manifest.Schema1FromManifest(manifestBlob)
|
V1Compatibility string `json:"v1Compatibility"`
|
||||||
if err != nil {
|
}
|
||||||
|
|
||||||
|
// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field.
|
||||||
|
type v1Compatibility struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Parent string `json:"parent,omitempty"`
|
||||||
|
Comment string `json:"comment,omitempty"`
|
||||||
|
Created time.Time `json:"created"`
|
||||||
|
ContainerConfig struct {
|
||||||
|
Cmd []string
|
||||||
|
} `json:"container_config,omitempty"`
|
||||||
|
Author string `json:"author,omitempty"`
|
||||||
|
ThrowAway bool `json:"throwaway,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type manifestSchema1 struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Tag string `json:"tag"`
|
||||||
|
Architecture string `json:"architecture"`
|
||||||
|
FSLayers []fsLayersSchema1 `json:"fsLayers"`
|
||||||
|
History []historySchema1 `json:"history"`
|
||||||
|
SchemaVersion int `json:"schemaVersion"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
|
||||||
|
mschema1 := &manifestSchema1{}
|
||||||
|
if err := json.Unmarshal(manifest, mschema1); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &manifestSchema1{m: m}, nil
|
if mschema1.SchemaVersion != 1 {
|
||||||
|
return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion)
|
||||||
|
}
|
||||||
|
if len(mschema1.FSLayers) != len(mschema1.History) {
|
||||||
|
return nil, errors.New("length of history not equal to number of layers")
|
||||||
|
}
|
||||||
|
if len(mschema1.FSLayers) == 0 {
|
||||||
|
return nil, errors.New("no FSLayers in manifest")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fixManifestLayers(mschema1); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return mschema1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
|
// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
|
||||||
func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest {
|
func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest {
|
||||||
return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)}
|
var name, tag string
|
||||||
|
if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
|
||||||
|
name = reference.Path(ref)
|
||||||
|
if tagged, ok := ref.(reference.NamedTagged); ok {
|
||||||
|
tag = tagged.Tag()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &manifestSchema1{
|
||||||
|
Name: name,
|
||||||
|
Tag: tag,
|
||||||
|
Architecture: architecture,
|
||||||
|
FSLayers: fsLayers,
|
||||||
|
History: history,
|
||||||
|
SchemaVersion: 1,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manifestSchema1) serialize() ([]byte, error) {
|
func (m *manifestSchema1) serialize() ([]byte, error) {
|
||||||
return m.m.Serialize()
|
// docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
|
||||||
|
unsigned, err := json.Marshal(*m)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return manifest.AddDummyV2S1Signature(unsigned)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manifestSchema1) manifestMIMEType() string {
|
func (m *manifestSchema1) manifestMIMEType() string {
|
||||||
|
@ -40,7 +104,7 @@ func (m *manifestSchema1) manifestMIMEType() string {
|
||||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||||
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
||||||
func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
|
func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
|
||||||
return m.m.ConfigInfo()
|
return types.BlobInfo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||||
|
@ -64,7 +128,11 @@ func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) {
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||||
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
|
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
|
||||||
return m.m.LayerInfos()
|
layers := make([]types.BlobInfo, len(m.FSLayers))
|
||||||
|
for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
|
||||||
|
layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
|
||||||
|
}
|
||||||
|
return layers
|
||||||
}
|
}
|
||||||
|
|
||||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||||
|
@ -85,16 +153,16 @@ func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named)
|
||||||
} else {
|
} else {
|
||||||
tag = ""
|
tag = ""
|
||||||
}
|
}
|
||||||
return m.m.Name != name || m.m.Tag != tag
|
return m.Name != name || m.Tag != tag
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) {
|
func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) {
|
||||||
v1 := &v1Image{}
|
v1 := &v1Image{}
|
||||||
if err := json.Unmarshal([]byte(m.m.History[0].V1Compatibility), v1); err != nil {
|
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
i := &types.ImageInspectInfo{
|
i := &types.ImageInspectInfo{
|
||||||
Tag: m.m.Tag,
|
Tag: m.Tag,
|
||||||
DockerVersion: v1.DockerVersion,
|
DockerVersion: v1.DockerVersion,
|
||||||
Created: v1.Created,
|
Created: v1.Created,
|
||||||
Architecture: v1.Architecture,
|
Architecture: v1.Architecture,
|
||||||
|
@ -116,18 +184,25 @@ func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp
|
||||||
// UpdatedImage returns a types.Image modified according to options.
|
// UpdatedImage returns a types.Image modified according to options.
|
||||||
// This does not change the state of the original Image object.
|
// This does not change the state of the original Image object.
|
||||||
func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
|
func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
|
||||||
copy := manifestSchema1{m: manifest.Schema1Clone(m.m)}
|
copy := *m
|
||||||
if options.LayerInfos != nil {
|
if options.LayerInfos != nil {
|
||||||
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
|
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
|
||||||
return nil, err
|
if len(copy.FSLayers) != len(options.LayerInfos) {
|
||||||
|
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
|
||||||
|
}
|
||||||
|
for i, info := range options.LayerInfos {
|
||||||
|
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
|
||||||
|
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
|
||||||
|
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
|
||||||
|
copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if options.EmbeddedDockerReference != nil {
|
if options.EmbeddedDockerReference != nil {
|
||||||
copy.m.Name = reference.Path(options.EmbeddedDockerReference)
|
copy.Name = reference.Path(options.EmbeddedDockerReference)
|
||||||
if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged {
|
if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged {
|
||||||
copy.m.Tag = tagged.Tag()
|
copy.Tag = tagged.Tag()
|
||||||
} else {
|
} else {
|
||||||
copy.m.Tag = ""
|
copy.Tag = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,20 +220,78 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ
|
||||||
return memoryImageFromManifest(©), nil
|
return memoryImageFromManifest(©), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fixManifestLayers, after validating the supplied manifest
|
||||||
|
// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History),
|
||||||
|
// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates,
|
||||||
|
// both from manifest.History and manifest.FSLayers).
|
||||||
|
// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries
|
||||||
|
// (for Dockerfile operations which change the configuration but not the filesystem).
|
||||||
|
func fixManifestLayers(manifest *manifestSchema1) error {
|
||||||
|
type imageV1 struct {
|
||||||
|
ID string
|
||||||
|
Parent string
|
||||||
|
}
|
||||||
|
// Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History)
|
||||||
|
imgs := make([]*imageV1, len(manifest.FSLayers))
|
||||||
|
for i := range manifest.FSLayers {
|
||||||
|
img := &imageV1{}
|
||||||
|
|
||||||
|
if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
imgs[i] = img
|
||||||
|
if err := validateV1ID(img.ID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if imgs[len(imgs)-1].Parent != "" {
|
||||||
|
return errors.New("Invalid parent ID in the base layer of the image")
|
||||||
|
}
|
||||||
|
// check general duplicates to error instead of a deadlock
|
||||||
|
idmap := make(map[string]struct{})
|
||||||
|
var lastID string
|
||||||
|
for _, img := range imgs {
|
||||||
|
// skip IDs that appear after each other, we handle those later
|
||||||
|
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
||||||
|
return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||||
|
}
|
||||||
|
lastID = img.ID
|
||||||
|
idmap[lastID] = struct{}{}
|
||||||
|
}
|
||||||
|
// backwards loop so that we keep the remaining indexes after removing items
|
||||||
|
for i := len(imgs) - 2; i >= 0; i-- {
|
||||||
|
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
|
||||||
|
manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
|
||||||
|
manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
|
||||||
|
} else if imgs[i].Parent != imgs[i+1].ID {
|
||||||
|
return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateV1ID(id string) error {
|
||||||
|
if ok := validHex.MatchString(id); !ok {
|
||||||
|
return errors.Errorf("image ID %q is invalid", id)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Based on github.com/docker/docker/distribution/pull_v2.go
|
// Based on github.com/docker/docker/distribution/pull_v2.go
|
||||||
func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) {
|
func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) {
|
||||||
if len(m.m.History) == 0 {
|
if len(m.History) == 0 {
|
||||||
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
|
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
|
||||||
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
|
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
|
||||||
}
|
}
|
||||||
if len(m.m.History) != len(m.m.FSLayers) {
|
if len(m.History) != len(m.FSLayers) {
|
||||||
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers))
|
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
|
||||||
}
|
}
|
||||||
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
|
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) {
|
||||||
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
|
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
|
||||||
}
|
}
|
||||||
if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) {
|
if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) {
|
||||||
return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
|
return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
|
||||||
}
|
}
|
||||||
|
|
||||||
rootFS := rootFS{
|
rootFS := rootFS{
|
||||||
|
@ -166,13 +299,13 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
|
||||||
DiffIDs: []digest.Digest{},
|
DiffIDs: []digest.Digest{},
|
||||||
BaseLayer: "",
|
BaseLayer: "",
|
||||||
}
|
}
|
||||||
var layers []manifest.Schema2Descriptor
|
var layers []descriptor
|
||||||
history := make([]imageHistory, len(m.m.History))
|
history := make([]imageHistory, len(m.History))
|
||||||
for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- {
|
for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- {
|
||||||
v2Index := (len(m.m.History) - 1) - v1Index
|
v2Index := (len(m.History) - 1) - v1Index
|
||||||
|
|
||||||
var v1compat manifest.Schema1V1Compatibility
|
var v1compat v1Compatibility
|
||||||
if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil {
|
if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil {
|
||||||
return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
|
return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
|
||||||
}
|
}
|
||||||
history[v2Index] = imageHistory{
|
history[v2Index] = imageHistory{
|
||||||
|
@ -192,19 +325,19 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
|
||||||
if layerDiffIDs != nil {
|
if layerDiffIDs != nil {
|
||||||
d = layerDiffIDs[v2Index]
|
d = layerDiffIDs[v2Index]
|
||||||
}
|
}
|
||||||
layers = append(layers, manifest.Schema2Descriptor{
|
layers = append(layers, descriptor{
|
||||||
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||||
Size: size,
|
Size: size,
|
||||||
Digest: m.m.FSLayers[v1Index].BlobSum,
|
Digest: m.FSLayers[v1Index].BlobSum,
|
||||||
})
|
})
|
||||||
rootFS.DiffIDs = append(rootFS.DiffIDs, d)
|
rootFS.DiffIDs = append(rootFS.DiffIDs, d)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
configJSON, err := configJSONFromV1Config([]byte(m.m.History[0].V1Compatibility), rootFS, history)
|
configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
configDescriptor := manifest.Schema2Descriptor{
|
configDescriptor := descriptor{
|
||||||
MediaType: "application/vnd.docker.container.image.v1+json",
|
MediaType: "application/vnd.docker.container.image.v1+json",
|
||||||
Size: int64(len(configJSON)),
|
Size: int64(len(configJSON)),
|
||||||
Digest: digest.FromBytes(configJSON),
|
Digest: digest.FromBytes(configJSON),
|
||||||
|
|
124
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
124
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
|
@ -29,44 +29,54 @@ var gzippedEmptyLayer = []byte{
|
||||||
// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer
|
// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer
|
||||||
const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
||||||
|
|
||||||
type manifestSchema2 struct {
|
type descriptor struct {
|
||||||
src types.ImageSource // May be nil if configBlob is not nil
|
MediaType string `json:"mediaType"`
|
||||||
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
|
Size int64 `json:"size"`
|
||||||
m *manifest.Schema2
|
Digest digest.Digest `json:"digest"`
|
||||||
|
URLs []string `json:"urls,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
|
type manifestSchema2 struct {
|
||||||
m, err := manifest.Schema2FromManifest(manifestBlob)
|
src types.ImageSource // May be nil if configBlob is not nil
|
||||||
if err != nil {
|
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
|
||||||
|
SchemaVersion int `json:"schemaVersion"`
|
||||||
|
MediaType string `json:"mediaType"`
|
||||||
|
ConfigDescriptor descriptor `json:"config"`
|
||||||
|
LayersDescriptors []descriptor `json:"layers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
|
||||||
|
v2s2 := manifestSchema2{src: src}
|
||||||
|
if err := json.Unmarshal(manifest, &v2s2); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &manifestSchema2{
|
return &v2s2, nil
|
||||||
src: src,
|
|
||||||
m: m,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
|
// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
|
||||||
func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest {
|
func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest {
|
||||||
return &manifestSchema2{
|
return &manifestSchema2{
|
||||||
src: src,
|
src: src,
|
||||||
configBlob: configBlob,
|
configBlob: configBlob,
|
||||||
m: manifest.Schema2FromComponents(config, layers),
|
SchemaVersion: 2,
|
||||||
|
MediaType: manifest.DockerV2Schema2MediaType,
|
||||||
|
ConfigDescriptor: config,
|
||||||
|
LayersDescriptors: layers,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manifestSchema2) serialize() ([]byte, error) {
|
func (m *manifestSchema2) serialize() ([]byte, error) {
|
||||||
return m.m.Serialize()
|
return json.Marshal(*m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manifestSchema2) manifestMIMEType() string {
|
func (m *manifestSchema2) manifestMIMEType() string {
|
||||||
return m.m.MediaType
|
return m.MediaType
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||||
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
||||||
func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
|
func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
|
||||||
return m.m.ConfigInfo()
|
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
|
||||||
}
|
}
|
||||||
|
|
||||||
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||||
|
@ -95,9 +105,9 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
|
||||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||||
}
|
}
|
||||||
stream, _, err := m.src.GetBlob(types.BlobInfo{
|
stream, _, err := m.src.GetBlob(types.BlobInfo{
|
||||||
Digest: m.m.ConfigDescriptor.Digest,
|
Digest: m.ConfigDescriptor.Digest,
|
||||||
Size: m.m.ConfigDescriptor.Size,
|
Size: m.ConfigDescriptor.Size,
|
||||||
URLs: m.m.ConfigDescriptor.URLs,
|
URLs: m.ConfigDescriptor.URLs,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -108,8 +118,8 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
computedDigest := digest.FromBytes(blob)
|
computedDigest := digest.FromBytes(blob)
|
||||||
if computedDigest != m.m.ConfigDescriptor.Digest {
|
if computedDigest != m.ConfigDescriptor.Digest {
|
||||||
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
|
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
|
||||||
}
|
}
|
||||||
m.configBlob = blob
|
m.configBlob = blob
|
||||||
}
|
}
|
||||||
|
@ -120,7 +130,15 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||||
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
|
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
|
||||||
return m.m.LayerInfos()
|
blobs := []types.BlobInfo{}
|
||||||
|
for _, layer := range m.LayersDescriptors {
|
||||||
|
blobs = append(blobs, types.BlobInfo{
|
||||||
|
Digest: layer.Digest,
|
||||||
|
Size: layer.Size,
|
||||||
|
URLs: layer.URLs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return blobs
|
||||||
}
|
}
|
||||||
|
|
||||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||||
|
@ -161,14 +179,17 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp
|
||||||
// UpdatedImage returns a types.Image modified according to options.
|
// UpdatedImage returns a types.Image modified according to options.
|
||||||
// This does not change the state of the original Image object.
|
// This does not change the state of the original Image object.
|
||||||
func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
|
func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
|
||||||
copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
|
copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
|
||||||
src: m.src,
|
|
||||||
configBlob: m.configBlob,
|
|
||||||
m: manifest.Schema2Clone(m.m),
|
|
||||||
}
|
|
||||||
if options.LayerInfos != nil {
|
if options.LayerInfos != nil {
|
||||||
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
|
if len(copy.LayersDescriptors) != len(options.LayerInfos) {
|
||||||
return nil, err
|
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
|
||||||
|
}
|
||||||
|
copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
|
||||||
|
for i, info := range options.LayerInfos {
|
||||||
|
copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
|
||||||
|
copy.LayersDescriptors[i].Digest = info.Digest
|
||||||
|
copy.LayersDescriptors[i].Size = info.Size
|
||||||
|
copy.LayersDescriptors[i].URLs = info.URLs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
|
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
|
||||||
|
@ -186,15 +207,6 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
|
||||||
return memoryImageFromManifest(©), nil
|
return memoryImageFromManifest(©), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
|
|
||||||
return imgspecv1.Descriptor{
|
|
||||||
MediaType: d.MediaType,
|
|
||||||
Size: d.Size,
|
|
||||||
Digest: d.Digest,
|
|
||||||
URLs: d.URLs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
|
func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
|
||||||
configOCI, err := m.OCIConfig()
|
configOCI, err := m.OCIConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -205,16 +217,18 @@ func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
config := imgspecv1.Descriptor{
|
config := descriptorOCI1{
|
||||||
MediaType: imgspecv1.MediaTypeImageConfig,
|
descriptor: descriptor{
|
||||||
Size: int64(len(configOCIBytes)),
|
MediaType: imgspecv1.MediaTypeImageConfig,
|
||||||
Digest: digest.FromBytes(configOCIBytes),
|
Size: int64(len(configOCIBytes)),
|
||||||
|
Digest: digest.FromBytes(configOCIBytes),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
|
layers := make([]descriptorOCI1, len(m.LayersDescriptors))
|
||||||
for idx := range layers {
|
for idx := range layers {
|
||||||
layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
|
layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]}
|
||||||
if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
|
if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
|
||||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
|
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
|
||||||
} else {
|
} else {
|
||||||
// we assume layers are gzip'ed because docker v2s2 only deals with
|
// we assume layers are gzip'ed because docker v2s2 only deals with
|
||||||
|
@ -239,8 +253,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
|
// Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
|
||||||
fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
|
fsLayers := make([]fsLayersSchema1, len(imageConfig.History))
|
||||||
history := make([]manifest.Schema1History, len(imageConfig.History))
|
history := make([]historySchema1, len(imageConfig.History))
|
||||||
nonemptyLayerIndex := 0
|
nonemptyLayerIndex := 0
|
||||||
var parentV1ID string // Set in the loop
|
var parentV1ID string // Set in the loop
|
||||||
v1ID := ""
|
v1ID := ""
|
||||||
|
@ -268,10 +282,10 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
|
||||||
}
|
}
|
||||||
blobDigest = gzippedEmptyLayerDigest
|
blobDigest = gzippedEmptyLayerDigest
|
||||||
} else {
|
} else {
|
||||||
if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
|
if nonemptyLayerIndex >= len(m.LayersDescriptors) {
|
||||||
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
|
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors))
|
||||||
}
|
}
|
||||||
blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
|
blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest
|
||||||
nonemptyLayerIndex++
|
nonemptyLayerIndex++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,7 +296,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
|
||||||
}
|
}
|
||||||
v1ID = v
|
v1ID = v
|
||||||
|
|
||||||
fakeImage := manifest.Schema1V1Compatibility{
|
fakeImage := v1Compatibility{
|
||||||
ID: v1ID,
|
ID: v1ID,
|
||||||
Parent: parentV1ID,
|
Parent: parentV1ID,
|
||||||
Comment: historyEntry.Comment,
|
Comment: historyEntry.Comment,
|
||||||
|
@ -296,8 +310,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
|
||||||
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
|
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
|
||||||
}
|
}
|
||||||
|
|
||||||
fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
|
fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest}
|
||||||
history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
|
history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)}
|
||||||
// Note that parentV1ID of the top layer is preserved when exiting this loop
|
// Note that parentV1ID of the top layer is preserved when exiting this loop
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
21
vendor/github.com/containers/image/image/manifest.go
generated
vendored
21
vendor/github.com/containers/image/image/manifest.go
generated
vendored
|
@ -1,7 +1,6 @@
|
||||||
package image
|
package image
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
|
@ -89,8 +88,11 @@ type genericManifest interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
|
func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
|
||||||
switch manifest.NormalizedMIMEType(mt) {
|
switch mt {
|
||||||
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
|
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
|
||||||
|
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
|
||||||
|
// need to happen within the ImageSource.
|
||||||
|
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json":
|
||||||
return manifestSchema1FromManifest(manblob)
|
return manifestSchema1FromManifest(manblob)
|
||||||
case imgspecv1.MediaTypeImageManifest:
|
case imgspecv1.MediaTypeImageManifest:
|
||||||
return manifestOCI1FromManifest(src, manblob)
|
return manifestOCI1FromManifest(src, manblob)
|
||||||
|
@ -98,8 +100,17 @@ func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string)
|
||||||
return manifestSchema2FromManifest(src, manblob)
|
return manifestSchema2FromManifest(src, manblob)
|
||||||
case manifest.DockerV2ListMediaType:
|
case manifest.DockerV2ListMediaType:
|
||||||
return manifestSchema2FromManifestList(src, manblob)
|
return manifestSchema2FromManifestList(src, manblob)
|
||||||
default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
|
default:
|
||||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
|
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
|
||||||
|
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
|
||||||
|
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
|
||||||
|
//
|
||||||
|
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
|
||||||
|
// This makes no real sense, but it happens
|
||||||
|
// because requests for manifests are
|
||||||
|
// redirected to a content distribution
|
||||||
|
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
|
||||||
|
return manifestSchema1FromManifest(manblob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
7
vendor/github.com/containers/image/image/memory.go
generated
vendored
7
vendor/github.com/containers/image/image/memory.go
generated
vendored
|
@ -71,10 +71,3 @@ func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) {
|
||||||
func (i *memoryImage) IsMultiImage() bool {
|
func (i *memoryImage) IsMultiImage() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdatedLayerInfos returns an updated set of layer blob information which may not match the manifest.
|
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
|
||||||
func (i *memoryImage) UpdatedLayerInfos() []types.BlobInfo {
|
|
||||||
return i.LayerInfos()
|
|
||||||
}
|
|
||||||
|
|
90
vendor/github.com/containers/image/image/oci.go
generated
vendored
90
vendor/github.com/containers/image/image/oci.go
generated
vendored
|
@ -12,34 +12,41 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
type manifestOCI1 struct {
|
type descriptorOCI1 struct {
|
||||||
src types.ImageSource // May be nil if configBlob is not nil
|
descriptor
|
||||||
configBlob []byte // If set, corresponds to contents of m.Config.
|
Annotations map[string]string `json:"annotations,omitempty"`
|
||||||
m *manifest.OCI1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
|
type manifestOCI1 struct {
|
||||||
m, err := manifest.OCI1FromManifest(manifestBlob)
|
src types.ImageSource // May be nil if configBlob is not nil
|
||||||
if err != nil {
|
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
|
||||||
|
SchemaVersion int `json:"schemaVersion"`
|
||||||
|
ConfigDescriptor descriptorOCI1 `json:"config"`
|
||||||
|
LayersDescriptors []descriptorOCI1 `json:"layers"`
|
||||||
|
Annotations map[string]string `json:"annotations,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
|
||||||
|
oci := manifestOCI1{src: src}
|
||||||
|
if err := json.Unmarshal(manifest, &oci); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &manifestOCI1{
|
return &oci, nil
|
||||||
src: src,
|
|
||||||
m: m,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
|
// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
|
||||||
func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest {
|
func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest {
|
||||||
return &manifestOCI1{
|
return &manifestOCI1{
|
||||||
src: src,
|
src: src,
|
||||||
configBlob: configBlob,
|
configBlob: configBlob,
|
||||||
m: manifest.OCI1FromComponents(config, layers),
|
SchemaVersion: 2,
|
||||||
|
ConfigDescriptor: config,
|
||||||
|
LayersDescriptors: layers,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manifestOCI1) serialize() ([]byte, error) {
|
func (m *manifestOCI1) serialize() ([]byte, error) {
|
||||||
return m.m.Serialize()
|
return json.Marshal(*m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manifestOCI1) manifestMIMEType() string {
|
func (m *manifestOCI1) manifestMIMEType() string {
|
||||||
|
@ -49,7 +56,7 @@ func (m *manifestOCI1) manifestMIMEType() string {
|
||||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||||
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
||||||
func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
|
func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
|
||||||
return m.m.ConfigInfo()
|
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, Annotations: m.ConfigDescriptor.Annotations}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||||
|
@ -60,9 +67,9 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
|
||||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||||
}
|
}
|
||||||
stream, _, err := m.src.GetBlob(types.BlobInfo{
|
stream, _, err := m.src.GetBlob(types.BlobInfo{
|
||||||
Digest: m.m.Config.Digest,
|
Digest: m.ConfigDescriptor.Digest,
|
||||||
Size: m.m.Config.Size,
|
Size: m.ConfigDescriptor.Size,
|
||||||
URLs: m.m.Config.URLs,
|
URLs: m.ConfigDescriptor.URLs,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -73,8 +80,8 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
computedDigest := digest.FromBytes(blob)
|
computedDigest := digest.FromBytes(blob)
|
||||||
if computedDigest != m.m.Config.Digest {
|
if computedDigest != m.ConfigDescriptor.Digest {
|
||||||
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
|
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
|
||||||
}
|
}
|
||||||
m.configBlob = blob
|
m.configBlob = blob
|
||||||
}
|
}
|
||||||
|
@ -100,7 +107,11 @@ func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) {
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||||
func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
|
func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
|
||||||
return m.m.LayerInfos()
|
blobs := []types.BlobInfo{}
|
||||||
|
for _, layer := range m.LayersDescriptors {
|
||||||
|
blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
|
||||||
|
}
|
||||||
|
return blobs
|
||||||
}
|
}
|
||||||
|
|
||||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||||
|
@ -141,14 +152,18 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat
|
||||||
// UpdatedImage returns a types.Image modified according to options.
|
// UpdatedImage returns a types.Image modified according to options.
|
||||||
// This does not change the state of the original Image object.
|
// This does not change the state of the original Image object.
|
||||||
func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
|
func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
|
||||||
copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
|
copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
|
||||||
src: m.src,
|
|
||||||
configBlob: m.configBlob,
|
|
||||||
m: manifest.OCI1Clone(m.m),
|
|
||||||
}
|
|
||||||
if options.LayerInfos != nil {
|
if options.LayerInfos != nil {
|
||||||
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
|
if len(copy.LayersDescriptors) != len(options.LayerInfos) {
|
||||||
return nil, err
|
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
|
||||||
|
}
|
||||||
|
copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos))
|
||||||
|
for i, info := range options.LayerInfos {
|
||||||
|
copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
|
||||||
|
copy.LayersDescriptors[i].Digest = info.Digest
|
||||||
|
copy.LayersDescriptors[i].Size = info.Size
|
||||||
|
copy.LayersDescriptors[i].Annotations = info.Annotations
|
||||||
|
copy.LayersDescriptors[i].URLs = info.URLs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
|
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
|
||||||
|
@ -164,26 +179,17 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
|
||||||
return memoryImageFromManifest(©), nil
|
return memoryImageFromManifest(©), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor {
|
|
||||||
return manifest.Schema2Descriptor{
|
|
||||||
MediaType: d.MediaType,
|
|
||||||
Size: d.Size,
|
|
||||||
Digest: d.Digest,
|
|
||||||
URLs: d.URLs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
|
func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
|
||||||
// Create a copy of the descriptor.
|
// Create a copy of the descriptor.
|
||||||
config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
|
config := m.ConfigDescriptor.descriptor
|
||||||
|
|
||||||
// The only difference between OCI and DockerSchema2 is the mediatypes. The
|
// The only difference between OCI and DockerSchema2 is the mediatypes. The
|
||||||
// media type of the manifest is handled by manifestSchema2FromComponents.
|
// media type of the manifest is handled by manifestSchema2FromComponents.
|
||||||
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
|
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
|
||||||
|
|
||||||
layers := make([]manifest.Schema2Descriptor, len(m.m.Layers))
|
layers := make([]descriptor, len(m.LayersDescriptors))
|
||||||
for idx := range layers {
|
for idx := range layers {
|
||||||
layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
|
layers[idx] = m.LayersDescriptors[idx].descriptor
|
||||||
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
|
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
4
vendor/github.com/containers/image/image/sourced.go
generated
vendored
4
vendor/github.com/containers/image/image/sourced.go
generated
vendored
|
@ -88,7 +88,3 @@ func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) {
|
||||||
func (i *sourcedImage) IsMultiImage() bool {
|
func (i *sourcedImage) IsMultiImage() bool {
|
||||||
return i.manifestMIMEType == manifest.DockerV2ListMediaType
|
return i.manifestMIMEType == manifest.DockerV2ListMediaType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *sourcedImage) UpdatedLayerInfos() []types.BlobInfo {
|
|
||||||
return i.UnparsedImage.UpdatedLayerInfos()
|
|
||||||
}
|
|
||||||
|
|
7
vendor/github.com/containers/image/image/unparsed.go
generated
vendored
7
vendor/github.com/containers/image/image/unparsed.go
generated
vendored
|
@ -83,10 +83,3 @@ func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
|
||||||
}
|
}
|
||||||
return i.cachedSignatures, nil
|
return i.cachedSignatures, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdatedLayerInfos returns an updated set of layer blob information which may not match the manifest.
|
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
|
||||||
func (i *UnparsedImage) UpdatedLayerInfos() []types.BlobInfo {
|
|
||||||
return i.src.UpdatedLayerInfos()
|
|
||||||
}
|
|
||||||
|
|
212
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
212
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
|
@ -1,212 +0,0 @@
|
||||||
package manifest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"regexp"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containers/image/docker/reference"
|
|
||||||
"github.com/containers/image/types"
|
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
|
|
||||||
type Schema1FSLayers struct {
|
|
||||||
BlobSum digest.Digest `json:"blobSum"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema1History is an entry of the "history" array in docker/distribution schema 1.
|
|
||||||
type Schema1History struct {
|
|
||||||
V1Compatibility string `json:"v1Compatibility"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema1 is a manifest in docker/distribution schema 1.
|
|
||||||
type Schema1 struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Tag string `json:"tag"`
|
|
||||||
Architecture string `json:"architecture"`
|
|
||||||
FSLayers []Schema1FSLayers `json:"fsLayers"`
|
|
||||||
History []Schema1History `json:"history"`
|
|
||||||
SchemaVersion int `json:"schemaVersion"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1.
|
|
||||||
type Schema1V1Compatibility struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Parent string `json:"parent,omitempty"`
|
|
||||||
Comment string `json:"comment,omitempty"`
|
|
||||||
Created time.Time `json:"created"`
|
|
||||||
ContainerConfig struct {
|
|
||||||
Cmd []string
|
|
||||||
} `json:"container_config,omitempty"`
|
|
||||||
Author string `json:"author,omitempty"`
|
|
||||||
ThrowAway bool `json:"throwaway,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
|
|
||||||
// (NOTE: The instance is not necessary a literal representation of the original blob,
|
|
||||||
// layers with duplicate IDs are eliminated.)
|
|
||||||
func Schema1FromManifest(manifest []byte) (*Schema1, error) {
|
|
||||||
s1 := Schema1{}
|
|
||||||
if err := json.Unmarshal(manifest, &s1); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if s1.SchemaVersion != 1 {
|
|
||||||
return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
|
|
||||||
}
|
|
||||||
if len(s1.FSLayers) != len(s1.History) {
|
|
||||||
return nil, errors.New("length of history not equal to number of layers")
|
|
||||||
}
|
|
||||||
if len(s1.FSLayers) == 0 {
|
|
||||||
return nil, errors.New("no FSLayers in manifest")
|
|
||||||
}
|
|
||||||
if err := s1.fixManifestLayers(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &s1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema1FromComponents creates an Schema1 manifest instance from the supplied data.
|
|
||||||
func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 {
|
|
||||||
var name, tag string
|
|
||||||
if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
|
|
||||||
name = reference.Path(ref)
|
|
||||||
if tagged, ok := ref.(reference.NamedTagged); ok {
|
|
||||||
tag = tagged.Tag()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &Schema1{
|
|
||||||
Name: name,
|
|
||||||
Tag: tag,
|
|
||||||
Architecture: architecture,
|
|
||||||
FSLayers: fsLayers,
|
|
||||||
History: history,
|
|
||||||
SchemaVersion: 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema1Clone creates a copy of the supplied Schema1 manifest.
|
|
||||||
func Schema1Clone(src *Schema1) *Schema1 {
|
|
||||||
copy := *src
|
|
||||||
return ©
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
|
||||||
func (m *Schema1) ConfigInfo() types.BlobInfo {
|
|
||||||
return types.BlobInfo{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
|
||||||
func (m *Schema1) LayerInfos() []types.BlobInfo {
|
|
||||||
layers := make([]types.BlobInfo, len(m.FSLayers))
|
|
||||||
for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
|
|
||||||
layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
|
|
||||||
}
|
|
||||||
return layers
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
|
||||||
func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|
||||||
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
|
|
||||||
if len(m.FSLayers) != len(layerInfos) {
|
|
||||||
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
|
|
||||||
}
|
|
||||||
for i, info := range layerInfos {
|
|
||||||
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
|
|
||||||
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
|
|
||||||
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
|
|
||||||
m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize returns the manifest in a blob format.
|
|
||||||
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
|
|
||||||
func (m *Schema1) Serialize() ([]byte, error) {
|
|
||||||
// docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
|
|
||||||
unsigned, err := json.Marshal(*m)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return AddDummyV2S1Signature(unsigned)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fixManifestLayers, after validating the supplied manifest
|
|
||||||
// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History),
|
|
||||||
// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates,
|
|
||||||
// both from m.History and m.FSLayers).
|
|
||||||
// Note that even after this succeeds, m.FSLayers may contain duplicate entries
|
|
||||||
// (for Dockerfile operations which change the configuration but not the filesystem).
|
|
||||||
func (m *Schema1) fixManifestLayers() error {
|
|
||||||
type imageV1 struct {
|
|
||||||
ID string
|
|
||||||
Parent string
|
|
||||||
}
|
|
||||||
// Per the specification, we can assume that len(m.FSLayers) == len(m.History)
|
|
||||||
imgs := make([]*imageV1, len(m.FSLayers))
|
|
||||||
for i := range m.FSLayers {
|
|
||||||
img := &imageV1{}
|
|
||||||
|
|
||||||
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
imgs[i] = img
|
|
||||||
if err := validateV1ID(img.ID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if imgs[len(imgs)-1].Parent != "" {
|
|
||||||
return errors.New("Invalid parent ID in the base layer of the image")
|
|
||||||
}
|
|
||||||
// check general duplicates to error instead of a deadlock
|
|
||||||
idmap := make(map[string]struct{})
|
|
||||||
var lastID string
|
|
||||||
for _, img := range imgs {
|
|
||||||
// skip IDs that appear after each other, we handle those later
|
|
||||||
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
|
||||||
return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
|
||||||
}
|
|
||||||
lastID = img.ID
|
|
||||||
idmap[lastID] = struct{}{}
|
|
||||||
}
|
|
||||||
// backwards loop so that we keep the remaining indexes after removing items
|
|
||||||
for i := len(imgs) - 2; i >= 0; i-- {
|
|
||||||
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
|
|
||||||
m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
|
|
||||||
m.History = append(m.History[:i], m.History[i+1:]...)
|
|
||||||
} else if imgs[i].Parent != imgs[i+1].ID {
|
|
||||||
return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
|
||||||
|
|
||||||
func validateV1ID(id string) error {
|
|
||||||
if ok := validHex.MatchString(id); !ok {
|
|
||||||
return errors.Errorf("image ID %q is invalid", id)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
|
|
||||||
func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
|
|
||||||
s1 := &Schema1V1Compatibility{}
|
|
||||||
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &types.ImageInspectInfo{
|
|
||||||
Tag: m.Tag,
|
|
||||||
Created: s1.Created,
|
|
||||||
DockerVersion: "",
|
|
||||||
Labels: make(map[string]string),
|
|
||||||
Architecture: "",
|
|
||||||
Os: "",
|
|
||||||
Layers: []string{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
241
vendor/github.com/containers/image/manifest/docker_schema2.go
generated
vendored
241
vendor/github.com/containers/image/manifest/docker_schema2.go
generated
vendored
|
@ -1,241 +0,0 @@
|
||||||
package manifest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containers/image/pkg/strslice"
|
|
||||||
"github.com/containers/image/types"
|
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
|
|
||||||
type Schema2Descriptor struct {
|
|
||||||
MediaType string `json:"mediaType"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Digest digest.Digest `json:"digest"`
|
|
||||||
URLs []string `json:"urls,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema2 is a manifest in docker/distribution schema 2.
|
|
||||||
type Schema2 struct {
|
|
||||||
SchemaVersion int `json:"schemaVersion"`
|
|
||||||
MediaType string `json:"mediaType"`
|
|
||||||
ConfigDescriptor Schema2Descriptor `json:"config"`
|
|
||||||
LayersDescriptors []Schema2Descriptor `json:"layers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema2Port is a Port, a string containing port number and protocol in the
|
|
||||||
// format "80/tcp", from docker/go-connections/nat.
|
|
||||||
type Schema2Port string
|
|
||||||
|
|
||||||
// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from
|
|
||||||
// docker/go-connections/nat.
|
|
||||||
type Schema2PortSet map[Schema2Port]struct{}
|
|
||||||
|
|
||||||
// Schema2HealthConfig is a HealthConfig, which holds configuration settings
|
|
||||||
// for the HEALTHCHECK feature, from docker/docker/api/types/container.
|
|
||||||
type Schema2HealthConfig struct {
|
|
||||||
// Test is the test to perform to check that the container is healthy.
|
|
||||||
// An empty slice means to inherit the default.
|
|
||||||
// The options are:
|
|
||||||
// {} : inherit healthcheck
|
|
||||||
// {"NONE"} : disable healthcheck
|
|
||||||
// {"CMD", args...} : exec arguments directly
|
|
||||||
// {"CMD-SHELL", command} : run command with system's default shell
|
|
||||||
Test []string `json:",omitempty"`
|
|
||||||
|
|
||||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
|
||||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
|
||||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
|
||||||
|
|
||||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
|
||||||
// Zero means inherit.
|
|
||||||
Retries int `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema2Config is a Config in docker/docker/api/types/container.
|
|
||||||
type Schema2Config struct {
|
|
||||||
Hostname string // Hostname
|
|
||||||
Domainname string // Domainname
|
|
||||||
User string // User that will run the command(s) inside the container, also support user:group
|
|
||||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
|
||||||
AttachStdout bool // Attach the standard output
|
|
||||||
AttachStderr bool // Attach the standard error
|
|
||||||
ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports
|
|
||||||
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
|
|
||||||
OpenStdin bool // Open stdin
|
|
||||||
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
|
|
||||||
Env []string // List of environment variable to set in the container
|
|
||||||
Cmd strslice.StrSlice // Command to run when starting the container
|
|
||||||
Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
|
|
||||||
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
|
|
||||||
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
|
|
||||||
Volumes map[string]struct{} // List of volumes (mounts) used for the container
|
|
||||||
WorkingDir string // Current directory (PWD) in the command will be launched
|
|
||||||
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
|
|
||||||
NetworkDisabled bool `json:",omitempty"` // Is network disabled
|
|
||||||
MacAddress string `json:",omitempty"` // Mac Address of the container
|
|
||||||
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
|
|
||||||
Labels map[string]string // List of labels set to this container
|
|
||||||
StopSignal string `json:",omitempty"` // Signal to stop a container
|
|
||||||
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
|
|
||||||
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema2V1Image is a V1Image in docker/docker/image.
|
|
||||||
type Schema2V1Image struct {
|
|
||||||
// ID is a unique 64 character identifier of the image
|
|
||||||
ID string `json:"id,omitempty"`
|
|
||||||
// Parent is the ID of the parent image
|
|
||||||
Parent string `json:"parent,omitempty"`
|
|
||||||
// Comment is the commit message that was set when committing the image
|
|
||||||
Comment string `json:"comment,omitempty"`
|
|
||||||
// Created is the timestamp at which the image was created
|
|
||||||
Created time.Time `json:"created"`
|
|
||||||
// Container is the id of the container used to commit
|
|
||||||
Container string `json:"container,omitempty"`
|
|
||||||
// ContainerConfig is the configuration of the container that is committed into the image
|
|
||||||
ContainerConfig Schema2Config `json:"container_config,omitempty"`
|
|
||||||
// DockerVersion specifies the version of Docker that was used to build the image
|
|
||||||
DockerVersion string `json:"docker_version,omitempty"`
|
|
||||||
// Author is the name of the author that was specified when committing the image
|
|
||||||
Author string `json:"author,omitempty"`
|
|
||||||
// Config is the configuration of the container received from the client
|
|
||||||
Config *Schema2Config `json:"config,omitempty"`
|
|
||||||
// Architecture is the hardware that the image is build and runs on
|
|
||||||
Architecture string `json:"architecture,omitempty"`
|
|
||||||
// OS is the operating system used to build and run the image
|
|
||||||
OS string `json:"os,omitempty"`
|
|
||||||
// Size is the total size of the image including all layers it is composed of
|
|
||||||
Size int64 `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image.
|
|
||||||
type Schema2RootFS struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema2History stores build commands that were used to create an image, from docker/docker/image.
|
|
||||||
type Schema2History struct {
|
|
||||||
// Created is the timestamp at which the image was created
|
|
||||||
Created time.Time `json:"created"`
|
|
||||||
// Author is the name of the author that was specified when committing the image
|
|
||||||
Author string `json:"author,omitempty"`
|
|
||||||
// CreatedBy keeps the Dockerfile command used while building the image
|
|
||||||
CreatedBy string `json:"created_by,omitempty"`
|
|
||||||
// Comment is the commit message that was set when committing the image
|
|
||||||
Comment string `json:"comment,omitempty"`
|
|
||||||
// EmptyLayer is set to true if this history item did not generate a
|
|
||||||
// layer. Otherwise, the history item is associated with the next
|
|
||||||
// layer in the RootFS section.
|
|
||||||
EmptyLayer bool `json:"empty_layer,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema2Image is an Image in docker/docker/image.
|
|
||||||
type Schema2Image struct {
|
|
||||||
Schema2V1Image
|
|
||||||
Parent digest.Digest `json:"parent,omitempty"`
|
|
||||||
RootFS *Schema2RootFS `json:"rootfs,omitempty"`
|
|
||||||
History []Schema2History `json:"history,omitempty"`
|
|
||||||
OSVersion string `json:"os.version,omitempty"`
|
|
||||||
OSFeatures []string `json:"os.features,omitempty"`
|
|
||||||
|
|
||||||
// rawJSON caches the immutable JSON associated with this image.
|
|
||||||
rawJSON []byte
|
|
||||||
|
|
||||||
// computedID is the ID computed from the hash of the image config.
|
|
||||||
// Not to be confused with the legacy V1 ID in V1Image.
|
|
||||||
computedID digest.Digest
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob.
|
|
||||||
func Schema2FromManifest(manifest []byte) (*Schema2, error) {
|
|
||||||
s2 := Schema2{}
|
|
||||||
if err := json.Unmarshal(manifest, &s2); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &s2, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema2FromComponents creates an Schema2 manifest instance from the supplied data.
|
|
||||||
func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 {
|
|
||||||
return &Schema2{
|
|
||||||
SchemaVersion: 2,
|
|
||||||
MediaType: DockerV2Schema2MediaType,
|
|
||||||
ConfigDescriptor: config,
|
|
||||||
LayersDescriptors: layers,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema2Clone creates a copy of the supplied Schema2 manifest.
|
|
||||||
func Schema2Clone(src *Schema2) *Schema2 {
|
|
||||||
copy := *src
|
|
||||||
return ©
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
|
||||||
func (m *Schema2) ConfigInfo() types.BlobInfo {
|
|
||||||
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, MediaType: DockerV2Schema2ConfigMediaType}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
|
||||||
func (m *Schema2) LayerInfos() []types.BlobInfo {
|
|
||||||
blobs := []types.BlobInfo{}
|
|
||||||
for _, layer := range m.LayersDescriptors {
|
|
||||||
blobs = append(blobs, types.BlobInfo{
|
|
||||||
Digest: layer.Digest,
|
|
||||||
Size: layer.Size,
|
|
||||||
URLs: layer.URLs,
|
|
||||||
MediaType: layer.MediaType,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return blobs
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
|
||||||
func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|
||||||
if len(m.LayersDescriptors) != len(layerInfos) {
|
|
||||||
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
|
|
||||||
}
|
|
||||||
original := m.LayersDescriptors
|
|
||||||
m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
|
|
||||||
for i, info := range layerInfos {
|
|
||||||
m.LayersDescriptors[i].MediaType = original[i].MediaType
|
|
||||||
m.LayersDescriptors[i].Digest = info.Digest
|
|
||||||
m.LayersDescriptors[i].Size = info.Size
|
|
||||||
m.LayersDescriptors[i].URLs = info.URLs
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize returns the manifest in a blob format.
|
|
||||||
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
|
|
||||||
func (m *Schema2) Serialize() ([]byte, error) {
|
|
||||||
return json.Marshal(*m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
|
|
||||||
func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
|
|
||||||
config, err := configGetter(m.ConfigInfo())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s2 := &Schema2Image{}
|
|
||||||
if err := json.Unmarshal(config, s2); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &types.ImageInspectInfo{
|
|
||||||
Tag: "",
|
|
||||||
Created: s2.Created,
|
|
||||||
DockerVersion: s2.DockerVersion,
|
|
||||||
Labels: s2.Config.Labels,
|
|
||||||
Architecture: s2.Architecture,
|
|
||||||
Os: s2.OS,
|
|
||||||
Layers: []string{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
73
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
73
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
|
@ -2,9 +2,7 @@ package manifest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/containers/image/types"
|
|
||||||
"github.com/docker/libtrust"
|
"github.com/docker/libtrust"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
@ -40,33 +38,6 @@ var DefaultRequestedManifestMIMETypes = []string{
|
||||||
// DockerV2ListMediaType, // FIXME: Restore this ASAP
|
// DockerV2ListMediaType, // FIXME: Restore this ASAP
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manifest is an interface for parsing, modifying image manifests in isolation.
|
|
||||||
// Callers can either use this abstract interface without understanding the details of the formats,
|
|
||||||
// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members
|
|
||||||
// directly.
|
|
||||||
//
|
|
||||||
// See types.Image for functionality not limited to manifests, including format conversions and config parsing.
|
|
||||||
// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image.
|
|
||||||
type Manifest interface {
|
|
||||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
|
||||||
ConfigInfo() types.BlobInfo
|
|
||||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
|
||||||
LayerInfos() []types.BlobInfo
|
|
||||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
|
||||||
UpdateLayerInfos(layerInfos []types.BlobInfo) error
|
|
||||||
|
|
||||||
// Inspect returns various information for (skopeo inspect) parsed from the manifest,
|
|
||||||
// incorporating information from a configuration blob returned by configGetter, if
|
|
||||||
// the underlying image format is expected to include a configuration blob.
|
|
||||||
Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error)
|
|
||||||
|
|
||||||
// Serialize returns the manifest in a blob format.
|
|
||||||
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
|
|
||||||
Serialize() ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
||||||
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
|
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
|
||||||
// but we may not have such metadata available (e.g. when the manifest is a local file).
|
// but we may not have such metadata available (e.g. when the manifest is a local file).
|
||||||
|
@ -171,47 +142,3 @@ func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
return js.PrettySignature("signatures")
|
return js.PrettySignature("signatures")
|
||||||
}
|
}
|
||||||
|
|
||||||
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
|
|
||||||
// centralizing various workarounds.
|
|
||||||
func NormalizedMIMEType(input string) string {
|
|
||||||
switch input {
|
|
||||||
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
|
|
||||||
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
|
|
||||||
// need to happen within the ImageSource.
|
|
||||||
case "application/json":
|
|
||||||
return DockerV2Schema1SignedMediaType
|
|
||||||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
|
|
||||||
imgspecv1.MediaTypeImageManifest,
|
|
||||||
DockerV2Schema2MediaType,
|
|
||||||
DockerV2ListMediaType:
|
|
||||||
return input
|
|
||||||
default:
|
|
||||||
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
|
|
||||||
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
|
|
||||||
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
|
|
||||||
//
|
|
||||||
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
|
|
||||||
// This makes no real sense, but it happens
|
|
||||||
// because requests for manifests are
|
|
||||||
// redirected to a content distribution
|
|
||||||
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
|
|
||||||
return DockerV2Schema1SignedMediaType
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type
|
|
||||||
func FromBlob(manblob []byte, mt string) (Manifest, error) {
|
|
||||||
switch NormalizedMIMEType(mt) {
|
|
||||||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType:
|
|
||||||
return Schema1FromManifest(manblob)
|
|
||||||
case imgspecv1.MediaTypeImageManifest:
|
|
||||||
return OCI1FromManifest(manblob)
|
|
||||||
case DockerV2Schema2MediaType:
|
|
||||||
return Schema2FromManifest(manblob)
|
|
||||||
case DockerV2ListMediaType:
|
|
||||||
return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
|
|
||||||
default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
|
|
||||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
108
vendor/github.com/containers/image/manifest/oci.go
generated
vendored
108
vendor/github.com/containers/image/manifest/oci.go
generated
vendored
|
@ -1,108 +0,0 @@
|
||||||
package manifest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containers/image/types"
|
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OCI1 is a manifest.Manifest implementation for OCI images.
|
|
||||||
// The underlying data from imgspecv1.Manifest is also available.
|
|
||||||
type OCI1 struct {
|
|
||||||
imgspecv1.Manifest
|
|
||||||
}
|
|
||||||
|
|
||||||
// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob.
|
|
||||||
func OCI1FromManifest(manifest []byte) (*OCI1, error) {
|
|
||||||
oci1 := OCI1{}
|
|
||||||
if err := json.Unmarshal(manifest, &oci1); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &oci1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OCI1FromComponents creates an OCI1 manifest instance from the supplied data.
|
|
||||||
func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 {
|
|
||||||
return &OCI1{
|
|
||||||
imgspecv1.Manifest{
|
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
|
||||||
Config: config,
|
|
||||||
Layers: layers,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OCI1Clone creates a copy of the supplied OCI1 manifest.
|
|
||||||
func OCI1Clone(src *OCI1) *OCI1 {
|
|
||||||
return &OCI1{
|
|
||||||
Manifest: src.Manifest,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
|
||||||
func (m *OCI1) ConfigInfo() types.BlobInfo {
|
|
||||||
return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations, MediaType: imgspecv1.MediaTypeImageConfig}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
|
||||||
func (m *OCI1) LayerInfos() []types.BlobInfo {
|
|
||||||
blobs := []types.BlobInfo{}
|
|
||||||
for _, layer := range m.Layers {
|
|
||||||
blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
|
|
||||||
}
|
|
||||||
return blobs
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
|
||||||
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|
||||||
if len(m.Layers) != len(layerInfos) {
|
|
||||||
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
|
|
||||||
}
|
|
||||||
original := m.Layers
|
|
||||||
m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
|
|
||||||
for i, info := range layerInfos {
|
|
||||||
m.Layers[i].MediaType = original[i].MediaType
|
|
||||||
m.Layers[i].Digest = info.Digest
|
|
||||||
m.Layers[i].Size = info.Size
|
|
||||||
m.Layers[i].Annotations = info.Annotations
|
|
||||||
m.Layers[i].URLs = info.URLs
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize returns the manifest in a blob format.
|
|
||||||
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
|
|
||||||
func (m *OCI1) Serialize() ([]byte, error) {
|
|
||||||
return json.Marshal(*m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
|
|
||||||
func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
|
|
||||||
config, err := configGetter(m.ConfigInfo())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
v1 := &imgspecv1.Image{}
|
|
||||||
if err := json.Unmarshal(config, v1); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
created := time.Time{}
|
|
||||||
if v1.Created != nil {
|
|
||||||
created = *v1.Created
|
|
||||||
}
|
|
||||||
return &types.ImageInspectInfo{
|
|
||||||
Tag: "",
|
|
||||||
Created: created,
|
|
||||||
DockerVersion: "",
|
|
||||||
Labels: v1.Config.Labels,
|
|
||||||
Architecture: v1.Architecture,
|
|
||||||
Os: v1.OS,
|
|
||||||
Layers: []string{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
4
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
4
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
|
@ -86,7 +86,3 @@ func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int
|
||||||
func (s *ociArchiveImageSource) GetSignatures(c context.Context) ([][]byte, error) {
|
func (s *ociArchiveImageSource) GetSignatures(c context.Context) ([][]byte, error) {
|
||||||
return s.unpackedSrc.GetSignatures(c)
|
return s.unpackedSrc.GetSignatures(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ociArchiveImageSource) UpdatedLayerInfos() []types.BlobInfo {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
67
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
67
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
|
@ -27,12 +27,23 @@ func newImageDestination(ref ociReference) (types.ImageDestination, error) {
|
||||||
if ref.image == "" {
|
if ref.image == "" {
|
||||||
return nil, errors.Errorf("cannot save image with empty image.ref.name")
|
return nil, errors.Errorf("cannot save image with empty image.ref.name")
|
||||||
}
|
}
|
||||||
index := imgspecv1.Index{
|
|
||||||
Versioned: imgspec.Versioned{
|
var index *imgspecv1.Index
|
||||||
SchemaVersion: 2,
|
if indexExists(ref) {
|
||||||
},
|
var err error
|
||||||
|
index, err = ref.getIndex()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
index = &imgspecv1.Index{
|
||||||
|
Versioned: imgspec.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return &ociImageDestination{ref: ref, index: index}, nil
|
|
||||||
|
return &ociImageDestination{ref: ref, index: *index}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||||
|
@ -191,23 +202,20 @@ func (d *ociImageDestination) PutManifest(m []byte) error {
|
||||||
Architecture: runtime.GOARCH,
|
Architecture: runtime.GOARCH,
|
||||||
OS: runtime.GOOS,
|
OS: runtime.GOOS,
|
||||||
}
|
}
|
||||||
d.index.Manifests = append(d.index.Manifests, desc)
|
d.addManifest(&desc)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensureDirectoryExists(path string) error {
|
func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
|
||||||
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
for i, manifest := range d.index.Manifests {
|
||||||
if err := os.MkdirAll(path, 0755); err != nil {
|
if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] {
|
||||||
return err
|
// TODO Should there first be a cleanup based on the descriptor we are going to replace?
|
||||||
|
d.index.Manifests[i] = *desc
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
d.index.Manifests = append(d.index.Manifests, *desc)
|
||||||
}
|
|
||||||
|
|
||||||
// ensureParentDirectoryExists ensures the parent of the supplied path exists.
|
|
||||||
func ensureParentDirectoryExists(path string) error {
|
|
||||||
return ensureDirectoryExists(filepath.Dir(path))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *ociImageDestination) PutSignatures(signatures [][]byte) error {
|
func (d *ociImageDestination) PutSignatures(signatures [][]byte) error {
|
||||||
|
@ -231,3 +239,30 @@ func (d *ociImageDestination) Commit() error {
|
||||||
}
|
}
|
||||||
return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ensureDirectoryExists(path string) error {
|
||||||
|
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||||
|
if err := os.MkdirAll(path, 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureParentDirectoryExists ensures the parent of the supplied path exists.
|
||||||
|
func ensureParentDirectoryExists(path string) error {
|
||||||
|
return ensureDirectoryExists(filepath.Dir(path))
|
||||||
|
}
|
||||||
|
|
||||||
|
// indexExists checks whether the index location specified in the OCI reference exists.
|
||||||
|
// The implementation is opinionated, since in case of unexpected errors false is returned
|
||||||
|
func indexExists(ref ociReference) bool {
|
||||||
|
_, err := os.Stat(ref.indexPath())
|
||||||
|
if err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
5
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
5
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
|
@ -133,11 +133,6 @@ func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, e
|
||||||
return nil, 0, errWrap
|
return nil, 0, errWrap
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
|
|
||||||
func (s *ociImageSource) UpdatedLayerInfos() []types.BlobInfo {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBlobSize(resp *http.Response) int64 {
|
func getBlobSize(resp *http.Response) int64 {
|
||||||
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
19
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
19
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
|
@ -189,14 +189,25 @@ func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error)
|
||||||
return image.FromSource(src)
|
return image.FromSource(src)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
|
// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together
|
||||||
|
// with an error.
|
||||||
|
func (ref ociReference) getIndex() (*imgspecv1.Index, error) {
|
||||||
indexJSON, err := os.Open(ref.indexPath())
|
indexJSON, err := os.Open(ref.indexPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return imgspecv1.Descriptor{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer indexJSON.Close()
|
defer indexJSON.Close()
|
||||||
index := imgspecv1.Index{}
|
|
||||||
if err := json.NewDecoder(indexJSON).Decode(&index); err != nil {
|
index := &imgspecv1.Index{}
|
||||||
|
if err := json.NewDecoder(indexJSON).Decode(index); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return index, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
|
||||||
|
index, err := ref.getIndex()
|
||||||
|
if err != nil {
|
||||||
return imgspecv1.Descriptor{}, err
|
return imgspecv1.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
4
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
4
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
|
@ -242,10 +242,6 @@ func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, err
|
||||||
return sigs, nil
|
return sigs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *openshiftImageSource) UpdatedLayerInfos() []types.BlobInfo {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
|
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
|
||||||
func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
|
func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
|
||||||
if s.docker != nil {
|
if s.docker != nil {
|
||||||
|
|
1026
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
1026
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
File diff suppressed because it is too large
Load diff
33
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
33
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
|
@ -1,3 +1,5 @@
|
||||||
|
// +build !containers_image_storage_stub
|
||||||
|
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -6,7 +8,6 @@ import (
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
digest "github.com/opencontainers/go-digest"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
@ -19,11 +20,9 @@ type storageReference struct {
|
||||||
reference string
|
reference string
|
||||||
id string
|
id string
|
||||||
name reference.Named
|
name reference.Named
|
||||||
tag string
|
|
||||||
digest digest.Digest
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference {
|
func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference {
|
||||||
// We take a copy of the transport, which contains a pointer to the
|
// We take a copy of the transport, which contains a pointer to the
|
||||||
// store that it used for resolving this reference, so that the
|
// store that it used for resolving this reference, so that the
|
||||||
// transport that we'll return from Transport() won't be affected by
|
// transport that we'll return from Transport() won't be affected by
|
||||||
|
@ -33,8 +32,6 @@ func newReference(transport storageTransport, reference, id string, name referen
|
||||||
reference: reference,
|
reference: reference,
|
||||||
id: id,
|
id: id,
|
||||||
name: name,
|
name: name,
|
||||||
tag: tag,
|
|
||||||
digest: digest,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,21 +78,8 @@ func (s storageReference) Transport() types.ImageTransport {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a name with a tag or digest, if we have either, else return it bare.
|
// Return a name with a tag, if we have a name to base them on.
|
||||||
func (s storageReference) DockerReference() reference.Named {
|
func (s storageReference) DockerReference() reference.Named {
|
||||||
if s.name == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if s.tag != "" {
|
|
||||||
if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil {
|
|
||||||
return namedTagged
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if s.digest != "" {
|
|
||||||
if canonical, err := reference.WithDigest(s.name, s.digest); err == nil {
|
|
||||||
return canonical
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s.name
|
return s.name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,7 +93,7 @@ func (s storageReference) StringWithinTransport() string {
|
||||||
optionsList = ":" + strings.Join(options, ",")
|
optionsList = ":" + strings.Join(options, ",")
|
||||||
}
|
}
|
||||||
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
|
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
|
||||||
if s.reference == "" {
|
if s.name == nil {
|
||||||
return storeSpec + "@" + s.id
|
return storeSpec + "@" + s.id
|
||||||
}
|
}
|
||||||
if s.id == "" {
|
if s.id == "" {
|
||||||
|
@ -138,8 +122,11 @@ func (s storageReference) PolicyConfigurationNamespaces() []string {
|
||||||
driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]"
|
driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]"
|
||||||
namespaces := []string{}
|
namespaces := []string{}
|
||||||
if s.name != nil {
|
if s.name != nil {
|
||||||
name := reference.TrimNamed(s.name)
|
if s.id != "" {
|
||||||
components := strings.Split(name.String(), "/")
|
// The reference without the ID is also a valid namespace.
|
||||||
|
namespaces = append(namespaces, storeSpec+s.reference)
|
||||||
|
}
|
||||||
|
components := strings.Split(s.name.Name(), "/")
|
||||||
for len(components) > 0 {
|
for len(components) > 0 {
|
||||||
namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
|
namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
|
||||||
components = components[:len(components)-1]
|
components = components[:len(components)-1]
|
||||||
|
|
179
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
179
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
|
@ -1,3 +1,5 @@
|
||||||
|
// +build !containers_image_storage_stub
|
||||||
|
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -11,14 +13,11 @@ import (
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
digest "github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
|
ddigest "github.com/opencontainers/go-digest"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
minimumTruncatedIDLength = 3
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
transports.Register(Transport)
|
transports.Register(Transport)
|
||||||
}
|
}
|
||||||
|
@ -104,124 +103,60 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
|
||||||
// relative to the given store, and returns it in a reference object.
|
// relative to the given store, and returns it in a reference object.
|
||||||
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
|
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
|
||||||
var name reference.Named
|
var name reference.Named
|
||||||
|
var sum digest.Digest
|
||||||
|
var err error
|
||||||
if ref == "" {
|
if ref == "" {
|
||||||
return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference")
|
return nil, ErrInvalidReference
|
||||||
}
|
}
|
||||||
if ref[0] == '[' {
|
if ref[0] == '[' {
|
||||||
// Ignore the store specifier.
|
// Ignore the store specifier.
|
||||||
closeIndex := strings.IndexRune(ref, ']')
|
closeIndex := strings.IndexRune(ref, ']')
|
||||||
if closeIndex < 1 {
|
if closeIndex < 1 {
|
||||||
return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref)
|
return nil, ErrInvalidReference
|
||||||
}
|
}
|
||||||
ref = ref[closeIndex+1:]
|
ref = ref[closeIndex+1:]
|
||||||
}
|
}
|
||||||
|
refInfo := strings.SplitN(ref, "@", 2)
|
||||||
// The last segment, if there's more than one, is either a digest from a reference, or an image ID.
|
if len(refInfo) == 1 {
|
||||||
split := strings.LastIndex(ref, "@")
|
// A name.
|
||||||
idOrDigest := ""
|
name, err = reference.ParseNormalizedNamed(refInfo[0])
|
||||||
if split != -1 {
|
if err != nil {
|
||||||
// Peel off that last bit so that we can work on the rest.
|
return nil, err
|
||||||
idOrDigest = ref[split+1:]
|
|
||||||
if idOrDigest == "" {
|
|
||||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
|
|
||||||
}
|
}
|
||||||
ref = ref[:split]
|
} else if len(refInfo) == 2 {
|
||||||
|
// An ID, possibly preceded by a name.
|
||||||
|
if refInfo[0] != "" {
|
||||||
|
name, err = reference.ParseNormalizedNamed(refInfo[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sum, err = digest.Parse(refInfo[1])
|
||||||
|
if err != nil || sum.Validate() != nil {
|
||||||
|
sum, err = digest.Parse("sha256:" + refInfo[1])
|
||||||
|
if err != nil || sum.Validate() != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else { // Coverage: len(refInfo) is always 1 or 2
|
||||||
|
// Anything else: store specified in a form we don't
|
||||||
|
// recognize.
|
||||||
|
return nil, ErrInvalidReference
|
||||||
}
|
}
|
||||||
|
|
||||||
// The middle segment (now the last segment), if there is one, is a digest.
|
|
||||||
split = strings.LastIndex(ref, "@")
|
|
||||||
sum := digest.Digest("")
|
|
||||||
if split != -1 {
|
|
||||||
sum = digest.Digest(ref[split+1:])
|
|
||||||
if sum == "" {
|
|
||||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
|
|
||||||
}
|
|
||||||
ref = ref[:split]
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have something that unambiguously should be a digest, validate it, and then the third part,
|
|
||||||
// if we have one, as an ID.
|
|
||||||
id := ""
|
|
||||||
if sum != "" {
|
|
||||||
if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil {
|
|
||||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest)
|
|
||||||
}
|
|
||||||
if err := sum.Validate(); err != nil {
|
|
||||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
|
|
||||||
}
|
|
||||||
id = idOrDigest
|
|
||||||
if img, err := store.Image(idOrDigest); err == nil && img != nil && len(id) >= minimumTruncatedIDLength {
|
|
||||||
// The ID is a truncated version of the ID of an image that's present in local storage,
|
|
||||||
// so we might as well use the expanded value.
|
|
||||||
id = img.ID
|
|
||||||
}
|
|
||||||
} else if idOrDigest != "" {
|
|
||||||
// There was no middle portion, so the final portion could be either a digest or an ID.
|
|
||||||
if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil {
|
|
||||||
// It's an ID.
|
|
||||||
id = idOrDigest
|
|
||||||
} else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil {
|
|
||||||
// It's a digest.
|
|
||||||
sum = idSum
|
|
||||||
} else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength {
|
|
||||||
// It's a truncated version of the ID of an image that's present in local storage,
|
|
||||||
// and we may need the expanded value.
|
|
||||||
id = img.ID
|
|
||||||
} else {
|
|
||||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's
|
|
||||||
// at least of what we guess is a reasonable minimum length, because we don't want a really short value
|
|
||||||
// like "a" matching an image by ID prefix when the input was actually meant to specify an image name.
|
|
||||||
if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" {
|
|
||||||
if img, err := store.Image(idOrDigest); err == nil && img != nil {
|
|
||||||
// It's a truncated version of the ID of an image that's present in local storage;
|
|
||||||
// we need to expand it.
|
|
||||||
id = img.ID
|
|
||||||
ref = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The initial portion is probably a name, possibly with a tag.
|
|
||||||
if ref != "" {
|
|
||||||
var err error
|
|
||||||
if name, err = reference.ParseNormalizedNamed(ref); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "error parsing named reference %q", ref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if name == nil && sum == "" && id == "" {
|
|
||||||
return nil, errors.Errorf("error parsing reference")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct a copy of the store spec.
|
|
||||||
optionsList := ""
|
optionsList := ""
|
||||||
options := store.GraphOptions()
|
options := store.GraphOptions()
|
||||||
if len(options) > 0 {
|
if len(options) > 0 {
|
||||||
optionsList = ":" + strings.Join(options, ",")
|
optionsList = ":" + strings.Join(options, ",")
|
||||||
}
|
}
|
||||||
storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]"
|
storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]"
|
||||||
|
id := ""
|
||||||
// Convert the name back into a reference string, if we got a name.
|
if sum.Validate() == nil {
|
||||||
|
id = sum.Hex()
|
||||||
|
}
|
||||||
refname := ""
|
refname := ""
|
||||||
tag := ""
|
|
||||||
if name != nil {
|
if name != nil {
|
||||||
if sum.Validate() == nil {
|
name = reference.TagNameOnly(name)
|
||||||
canonical, err := reference.WithDigest(name, sum)
|
refname = verboseName(name)
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum)
|
|
||||||
}
|
|
||||||
refname = verboseName(canonical)
|
|
||||||
} else {
|
|
||||||
name = reference.TagNameOnly(name)
|
|
||||||
tagged, ok := name.(reference.Tagged)
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.Errorf("error parsing possibly-tagless name %q", ref)
|
|
||||||
}
|
|
||||||
refname = verboseName(name)
|
|
||||||
tag = tagged.Tag()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if refname == "" {
|
if refname == "" {
|
||||||
logrus.Debugf("parsed reference into %q", storeSpec+"@"+id)
|
logrus.Debugf("parsed reference into %q", storeSpec+"@"+id)
|
||||||
|
@ -230,7 +165,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
|
||||||
} else {
|
} else {
|
||||||
logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id)
|
logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id)
|
||||||
}
|
}
|
||||||
return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil
|
return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *storageTransport) GetStore() (storage.Store, error) {
|
func (s *storageTransport) GetStore() (storage.Store, error) {
|
||||||
|
@ -249,14 +184,11 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
|
||||||
return s.store, nil
|
return s.store, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseReference takes a name and a tag or digest and/or ID
|
// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"),
|
||||||
// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"),
|
|
||||||
// possibly prefixed with a store specifier in the form "[_graphroot_]" or
|
// possibly prefixed with a store specifier in the form "[_graphroot_]" or
|
||||||
// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or
|
// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or
|
||||||
// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]",
|
// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]",
|
||||||
// tries to figure out which it is, and returns it in a reference object.
|
// tries to figure out which it is, and returns it in a reference object.
|
||||||
// If _id_ is the ID of an image that's present in local storage, it can be truncated, and
|
|
||||||
// even be specified as if it were a _name_, value.
|
|
||||||
func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
|
func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
|
||||||
var store storage.Store
|
var store storage.Store
|
||||||
// Check if there's a store location prefix. If there is, then it
|
// Check if there's a store location prefix. If there is, then it
|
||||||
|
@ -405,7 +337,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = digest.Parse("sha256:" + scopeInfo[1])
|
_, err = ddigest.Parse("sha256:" + scopeInfo[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -415,28 +347,11 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func verboseName(r reference.Reference) string {
|
func verboseName(name reference.Named) string {
|
||||||
if r == nil {
|
name = reference.TagNameOnly(name)
|
||||||
return ""
|
|
||||||
}
|
|
||||||
named, isNamed := r.(reference.Named)
|
|
||||||
digested, isDigested := r.(reference.Digested)
|
|
||||||
tagged, isTagged := r.(reference.Tagged)
|
|
||||||
name := ""
|
|
||||||
tag := ""
|
tag := ""
|
||||||
sum := ""
|
if tagged, ok := name.(reference.NamedTagged); ok {
|
||||||
if isNamed {
|
tag = ":" + tagged.Tag()
|
||||||
name = (reference.TrimNamed(named)).String()
|
|
||||||
}
|
}
|
||||||
if isTagged {
|
return name.Name() + tag
|
||||||
if tagged.Tag() != "" {
|
|
||||||
tag = ":" + tagged.Tag()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if isDigested {
|
|
||||||
if digested.Digest().Validate() == nil {
|
|
||||||
sum = "@" + digested.Digest().String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return name + tag + sum
|
|
||||||
}
|
}
|
||||||
|
|
48
vendor/github.com/containers/image/tarball/doc.go
generated
vendored
Normal file
48
vendor/github.com/containers/image/tarball/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
// Package tarball provides a way to generate images using one or more layer
|
||||||
|
// tarballs and an optional template configuration.
|
||||||
|
//
|
||||||
|
// An example:
|
||||||
|
// package main
|
||||||
|
//
|
||||||
|
// import (
|
||||||
|
// "fmt"
|
||||||
|
//
|
||||||
|
// cp "github.com/containers/image/copy"
|
||||||
|
// "github.com/containers/image/tarball"
|
||||||
|
// "github.com/containers/image/transports/alltransports"
|
||||||
|
//
|
||||||
|
// imgspecv1 "github.com/containers/image/transports/alltransports"
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// func imageFromTarball() {
|
||||||
|
// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz")
|
||||||
|
// // - or -
|
||||||
|
// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz")
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
// updater, ok := src.(tarball.ConfigUpdater)
|
||||||
|
// if !ok {
|
||||||
|
// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater")
|
||||||
|
// }
|
||||||
|
// config := imgspecv1.Image{
|
||||||
|
// Config: imgspecv1.ImageConfig{
|
||||||
|
// Cmd: []string{"/bin/bash"},
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
// annotations := make(map[string]string)
|
||||||
|
// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache"
|
||||||
|
// err = updater.ConfigUpdate(config, annotations)
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest")
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
// err = cp.Image(nil, dest, src, nil)
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
package tarball
|
88
vendor/github.com/containers/image/tarball/tarball_reference.go
generated
vendored
Normal file
88
vendor/github.com/containers/image/tarball/tarball_reference.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
package tarball
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containers/image/docker/reference"
|
||||||
|
"github.com/containers/image/image"
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigUpdater is an interface that ImageReferences for "tarball" images also
|
||||||
|
// implement. It can be used to set values for a configuration, and to set
|
||||||
|
// image annotations which will be present in the images returned by the
|
||||||
|
// reference's NewImage() or NewImageSource() methods.
|
||||||
|
type ConfigUpdater interface {
|
||||||
|
ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type tarballReference struct {
|
||||||
|
transport types.ImageTransport
|
||||||
|
config imgspecv1.Image
|
||||||
|
annotations map[string]string
|
||||||
|
filenames []string
|
||||||
|
stdin []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigUpdate updates the image's default configuration and adds annotations
|
||||||
|
// which will be visible in source images created using this reference.
|
||||||
|
func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error {
|
||||||
|
r.config = config
|
||||||
|
if r.annotations == nil {
|
||||||
|
r.annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
for k, v := range annotations {
|
||||||
|
r.annotations[k] = v
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *tarballReference) Transport() types.ImageTransport {
|
||||||
|
return r.transport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *tarballReference) StringWithinTransport() string {
|
||||||
|
return strings.Join(r.filenames, ":")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *tarballReference) DockerReference() reference.Named {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *tarballReference) PolicyConfigurationIdentity() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *tarballReference) PolicyConfigurationNamespaces() []string {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *tarballReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||||
|
src, err := r.NewImageSource(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
img, err := image.FromSource(src)
|
||||||
|
if err != nil {
|
||||||
|
src.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return img, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *tarballReference) DeleteImage(ctx *types.SystemContext) error {
|
||||||
|
for _, filename := range r.filenames {
|
||||||
|
if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("error removing %q: %v", filename, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *tarballReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||||
|
return nil, fmt.Errorf("destination not implemented yet")
|
||||||
|
}
|
250
vendor/github.com/containers/image/tarball/tarball_src.go
generated
vendored
Normal file
250
vendor/github.com/containers/image/tarball/tarball_src.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
||||||
|
package tarball
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
imgspecs "github.com/opencontainers/image-spec/specs-go"
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type tarballImageSource struct {
|
||||||
|
reference tarballReference
|
||||||
|
filenames []string
|
||||||
|
diffIDs []digest.Digest
|
||||||
|
diffSizes []int64
|
||||||
|
blobIDs []digest.Digest
|
||||||
|
blobSizes []int64
|
||||||
|
blobTypes []string
|
||||||
|
config []byte
|
||||||
|
configID digest.Digest
|
||||||
|
configSize int64
|
||||||
|
manifest []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *tarballReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||||
|
// Gather up the digests, sizes, and date information for all of the files.
|
||||||
|
filenames := []string{}
|
||||||
|
diffIDs := []digest.Digest{}
|
||||||
|
diffSizes := []int64{}
|
||||||
|
blobIDs := []digest.Digest{}
|
||||||
|
blobSizes := []int64{}
|
||||||
|
blobTimes := []time.Time{}
|
||||||
|
blobTypes := []string{}
|
||||||
|
for _, filename := range r.filenames {
|
||||||
|
var file *os.File
|
||||||
|
var err error
|
||||||
|
var blobSize int64
|
||||||
|
var blobTime time.Time
|
||||||
|
var reader io.Reader
|
||||||
|
if filename == "-" {
|
||||||
|
blobSize = int64(len(r.stdin))
|
||||||
|
blobTime = time.Now()
|
||||||
|
reader = bytes.NewReader(r.stdin)
|
||||||
|
} else {
|
||||||
|
file, err = os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error opening %q for reading: %v", filename, err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
reader = file
|
||||||
|
fileinfo, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading size of %q: %v", filename, err)
|
||||||
|
}
|
||||||
|
blobSize = fileinfo.Size()
|
||||||
|
blobTime = fileinfo.ModTime()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to assuming the layer is compressed.
|
||||||
|
layerType := imgspecv1.MediaTypeImageLayerGzip
|
||||||
|
|
||||||
|
// Set up to digest the file as it is.
|
||||||
|
blobIDdigester := digest.Canonical.Digester()
|
||||||
|
reader = io.TeeReader(reader, blobIDdigester.Hash())
|
||||||
|
|
||||||
|
// Set up to digest the file after we maybe decompress it.
|
||||||
|
diffIDdigester := digest.Canonical.Digester()
|
||||||
|
uncompressed, err := gzip.NewReader(reader)
|
||||||
|
if err == nil {
|
||||||
|
// It is compressed, so the diffID is the digest of the uncompressed version
|
||||||
|
reader = io.TeeReader(uncompressed, diffIDdigester.Hash())
|
||||||
|
} else {
|
||||||
|
// It is not compressed, so the diffID and the blobID are going to be the same
|
||||||
|
diffIDdigester = blobIDdigester
|
||||||
|
layerType = imgspecv1.MediaTypeImageLayer
|
||||||
|
uncompressed = nil
|
||||||
|
}
|
||||||
|
n, err := io.Copy(ioutil.Discard, reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading %q: %v", filename, err)
|
||||||
|
}
|
||||||
|
if uncompressed != nil {
|
||||||
|
uncompressed.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grab our uncompressed and possibly-compressed digests and sizes.
|
||||||
|
filenames = append(filenames, filename)
|
||||||
|
diffIDs = append(diffIDs, diffIDdigester.Digest())
|
||||||
|
diffSizes = append(diffSizes, n)
|
||||||
|
blobIDs = append(blobIDs, blobIDdigester.Digest())
|
||||||
|
blobSizes = append(blobSizes, blobSize)
|
||||||
|
blobTimes = append(blobTimes, blobTime)
|
||||||
|
blobTypes = append(blobTypes, layerType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the rootfs and history for the configuration blob.
|
||||||
|
rootfs := imgspecv1.RootFS{
|
||||||
|
Type: "layers",
|
||||||
|
DiffIDs: diffIDs,
|
||||||
|
}
|
||||||
|
created := time.Time{}
|
||||||
|
history := []imgspecv1.History{}
|
||||||
|
// Pick up the layer comment from the configuration's history list, if one is set.
|
||||||
|
comment := "imported from tarball"
|
||||||
|
if len(r.config.History) > 0 && r.config.History[0].Comment != "" {
|
||||||
|
comment = r.config.History[0].Comment
|
||||||
|
}
|
||||||
|
for i := range diffIDs {
|
||||||
|
createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator)
|
||||||
|
history = append(history, imgspecv1.History{
|
||||||
|
Created: &blobTimes[i],
|
||||||
|
CreatedBy: createdBy,
|
||||||
|
Comment: comment,
|
||||||
|
})
|
||||||
|
// Use the mtime of the most recently modified file as the image's creation time.
|
||||||
|
if created.Before(blobTimes[i]) {
|
||||||
|
created = blobTimes[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pick up other defaults from the config in the reference.
|
||||||
|
config := r.config
|
||||||
|
if config.Created == nil {
|
||||||
|
config.Created = &created
|
||||||
|
}
|
||||||
|
if config.Architecture == "" {
|
||||||
|
config.Architecture = runtime.GOARCH
|
||||||
|
}
|
||||||
|
if config.OS == "" {
|
||||||
|
config.OS = runtime.GOOS
|
||||||
|
}
|
||||||
|
config.RootFS = rootfs
|
||||||
|
config.History = history
|
||||||
|
|
||||||
|
// Encode and digest the image configuration blob.
|
||||||
|
configBytes, err := json.Marshal(&config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err)
|
||||||
|
}
|
||||||
|
configID := digest.Canonical.FromBytes(configBytes)
|
||||||
|
configSize := int64(len(configBytes))
|
||||||
|
|
||||||
|
// Populate a manifest with the configuration blob and the file as the single layer.
|
||||||
|
layerDescriptors := []imgspecv1.Descriptor{}
|
||||||
|
for i := range blobIDs {
|
||||||
|
layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{
|
||||||
|
Digest: blobIDs[i],
|
||||||
|
Size: blobSizes[i],
|
||||||
|
MediaType: blobTypes[i],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
annotations := make(map[string]string)
|
||||||
|
for k, v := range r.annotations {
|
||||||
|
annotations[k] = v
|
||||||
|
}
|
||||||
|
manifest := imgspecv1.Manifest{
|
||||||
|
Versioned: imgspecs.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
|
},
|
||||||
|
Config: imgspecv1.Descriptor{
|
||||||
|
Digest: configID,
|
||||||
|
Size: configSize,
|
||||||
|
MediaType: imgspecv1.MediaTypeImageConfig,
|
||||||
|
},
|
||||||
|
Layers: layerDescriptors,
|
||||||
|
Annotations: annotations,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode the manifest.
|
||||||
|
manifestBytes, err := json.Marshal(&manifest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the image.
|
||||||
|
src := &tarballImageSource{
|
||||||
|
reference: *r,
|
||||||
|
filenames: filenames,
|
||||||
|
diffIDs: diffIDs,
|
||||||
|
diffSizes: diffSizes,
|
||||||
|
blobIDs: blobIDs,
|
||||||
|
blobSizes: blobSizes,
|
||||||
|
blobTypes: blobTypes,
|
||||||
|
config: configBytes,
|
||||||
|
configID: configID,
|
||||||
|
configSize: configSize,
|
||||||
|
manifest: manifestBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
return src, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (is *tarballImageSource) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (is *tarballImageSource) GetBlob(blobinfo types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||||
|
// We should only be asked about things in the manifest. Maybe the configuration blob.
|
||||||
|
if blobinfo.Digest == is.configID {
|
||||||
|
return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
|
||||||
|
}
|
||||||
|
// Maybe one of the layer blobs.
|
||||||
|
for i := range is.blobIDs {
|
||||||
|
if blobinfo.Digest == is.blobIDs[i] {
|
||||||
|
// We want to read that layer: open the file or memory block and hand it back.
|
||||||
|
if is.filenames[i] == "-" {
|
||||||
|
return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
|
||||||
|
}
|
||||||
|
reader, err := os.Open(is.filenames[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err)
|
||||||
|
}
|
||||||
|
return reader, is.blobSizes[i], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (is *tarballImageSource) GetManifest() ([]byte, string, error) {
|
||||||
|
return is.manifest, imgspecv1.MediaTypeImageManifest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*tarballImageSource) GetSignatures(context.Context) ([][]byte, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*tarballImageSource) GetTargetManifest(digest.Digest) ([]byte, string, error) {
|
||||||
|
return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (is *tarballImageSource) Reference() types.ImageReference {
|
||||||
|
return &is.reference
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
|
||||||
|
func (*tarballImageSource) UpdatedLayerInfos() []types.BlobInfo {
|
||||||
|
return nil
|
||||||
|
}
|
66
vendor/github.com/containers/image/tarball/tarball_transport.go
generated
vendored
Normal file
66
vendor/github.com/containers/image/tarball/tarball_transport.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
package tarball
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containers/image/transports"
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
transportName = "tarball"
|
||||||
|
separator = ":"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Transport implements the types.ImageTransport interface for "tarball:" images,
|
||||||
|
// which are makeshift images constructed using one or more possibly-compressed tar
|
||||||
|
// archives.
|
||||||
|
Transport = &tarballTransport{}
|
||||||
|
)
|
||||||
|
|
||||||
|
type tarballTransport struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tarballTransport) Name() string {
|
||||||
|
return transportName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) {
|
||||||
|
var stdin []byte
|
||||||
|
var err error
|
||||||
|
filenames := strings.Split(reference, separator)
|
||||||
|
for _, filename := range filenames {
|
||||||
|
if filename == "-" {
|
||||||
|
stdin, err = ioutil.ReadAll(os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error buffering stdin: %v", err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
f, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error opening %q: %v", filename, err)
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
ref := &tarballReference{
|
||||||
|
transport: t,
|
||||||
|
filenames: filenames,
|
||||||
|
stdin: stdin,
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||||
|
// See the explanation in daemonReference.PolicyConfigurationIdentity.
|
||||||
|
return errors.New(`tarball: does not support any scopes except the default "" one`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
transports.Register(Transport)
|
||||||
|
}
|
3
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
3
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
|
@ -13,8 +13,9 @@ import (
|
||||||
_ "github.com/containers/image/oci/archive"
|
_ "github.com/containers/image/oci/archive"
|
||||||
_ "github.com/containers/image/oci/layout"
|
_ "github.com/containers/image/oci/layout"
|
||||||
_ "github.com/containers/image/openshift"
|
_ "github.com/containers/image/openshift"
|
||||||
|
_ "github.com/containers/image/tarball"
|
||||||
// The ostree transport is registered by ostree*.go
|
// The ostree transport is registered by ostree*.go
|
||||||
_ "github.com/containers/image/storage"
|
// The storage transport is registered by storage*.go
|
||||||
"github.com/containers/image/transports"
|
"github.com/containers/image/transports"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
8
vendor/github.com/containers/image/transports/alltransports/storage.go
generated
vendored
Normal file
8
vendor/github.com/containers/image/transports/alltransports/storage.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
// +build !containers_image_storage_stub
|
||||||
|
|
||||||
|
package alltransports
|
||||||
|
|
||||||
|
import (
|
||||||
|
// Register the storage transport
|
||||||
|
_ "github.com/containers/image/storage"
|
||||||
|
)
|
9
vendor/github.com/containers/image/transports/alltransports/storage_stub.go
generated
vendored
Normal file
9
vendor/github.com/containers/image/transports/alltransports/storage_stub.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
// +build containers_image_storage_stub
|
||||||
|
|
||||||
|
package alltransports
|
||||||
|
|
||||||
|
import "github.com/containers/image/transports"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
transports.Register(transports.NewStubTransport("storage"))
|
||||||
|
}
|
8
vendor/github.com/containers/image/types/types.go
generated
vendored
8
vendor/github.com/containers/image/types/types.go
generated
vendored
|
@ -123,10 +123,6 @@ type ImageSource interface {
|
||||||
GetBlob(BlobInfo) (io.ReadCloser, int64, error)
|
GetBlob(BlobInfo) (io.ReadCloser, int64, error)
|
||||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||||
GetSignatures(context.Context) ([][]byte, error)
|
GetSignatures(context.Context) ([][]byte, error)
|
||||||
// UpdatedLayerInfos returns either nil (meaning there are no updates), or updated values for the layer blobsums that are listed in the image's manifest.
|
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
|
||||||
UpdatedLayerInfos() []BlobInfo
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
|
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
|
||||||
|
@ -211,10 +207,6 @@ type UnparsedImage interface {
|
||||||
Manifest() ([]byte, string, error)
|
Manifest() ([]byte, string, error)
|
||||||
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
||||||
Signatures(ctx context.Context) ([][]byte, error)
|
Signatures(ctx context.Context) ([][]byte, error)
|
||||||
// UpdatedLayerInfos returns either nil (meaning there are no updates), or updated values for the layer blobsums that are listed in the image's manifest.
|
|
||||||
// The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
|
||||||
UpdatedLayerInfos() []BlobInfo
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Image is the primary API for inspecting properties of images.
|
// Image is the primary API for inspecting properties of images.
|
||||||
|
|
5
vendor/github.com/containers/image/vendor.conf
generated
vendored
5
vendor/github.com/containers/image/vendor.conf
generated
vendored
|
@ -1,5 +1,5 @@
|
||||||
github.com/sirupsen/logrus v1.0.0
|
github.com/sirupsen/logrus v1.0.0
|
||||||
github.com/containers/storage 9e0c323a4b425557f8310ee8d125634acd39d8f5
|
github.com/containers/storage 47536c89fcc545a87745e1a1573addc439409165
|
||||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||||
|
@ -36,5 +36,4 @@ github.com/tchap/go-patricia v2.2.6
|
||||||
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
|
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
|
||||||
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
|
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
|
||||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||||
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
|
github.com/gogo/protobuf/proto fcdc5011193ff531a548e9b0301828d5a5b97fd8
|
||||||
github.com/pquerna/ffjson master
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue