Merge pull request #424 from runcom/fix-schema1-config
read image config from docker v2s1 manifests
This commit is contained in:
commit
2b5dca3950
40 changed files with 1284 additions and 548 deletions
|
@ -1,5 +1,5 @@
|
||||||
{
|
{
|
||||||
"memo": "c69b3d661a6a19376099d94142598a44f109f155bc519d9b7943ac86e8ebcb8a",
|
"memo": "1290be673a75036ce5bea81021073dd7041dc3f421446912b6b7ae0ed511fe93",
|
||||||
"projects": [
|
"projects": [
|
||||||
{
|
{
|
||||||
"name": "github.com/BurntSushi/toml",
|
"name": "github.com/BurntSushi/toml",
|
||||||
|
@ -50,7 +50,7 @@
|
||||||
{
|
{
|
||||||
"name": "github.com/containers/image",
|
"name": "github.com/containers/image",
|
||||||
"branch": "master",
|
"branch": "master",
|
||||||
"revision": "1d7e25b91705e4d1cddb5396baf112caeb1119f3",
|
"revision": "9fcd2ba2c6983f74026db5f2c0f79b529a098dee",
|
||||||
"packages": [
|
"packages": [
|
||||||
"copy",
|
"copy",
|
||||||
"directory",
|
"directory",
|
||||||
|
@ -159,10 +159,13 @@
|
||||||
"api/types/versions",
|
"api/types/versions",
|
||||||
"api/types/volume",
|
"api/types/volume",
|
||||||
"client",
|
"client",
|
||||||
|
"pkg/longpath",
|
||||||
"pkg/random",
|
"pkg/random",
|
||||||
"pkg/registrar",
|
"pkg/registrar",
|
||||||
"pkg/stringid",
|
"pkg/stringid",
|
||||||
"pkg/stringutils",
|
"pkg/stringutils",
|
||||||
|
"pkg/symlink",
|
||||||
|
"pkg/system",
|
||||||
"pkg/tlsconfig",
|
"pkg/tlsconfig",
|
||||||
"pkg/truncindex",
|
"pkg/truncindex",
|
||||||
"utils/templates"
|
"utils/templates"
|
||||||
|
|
|
@ -210,19 +210,11 @@ func (r *runtimeService) createContainerOrPodSandbox(systemContext *types.System
|
||||||
return ContainerInfo{}, err
|
return ContainerInfo{}, err
|
||||||
}
|
}
|
||||||
defer image.Close()
|
defer image.Close()
|
||||||
var imageConfig *v1.Image
|
|
||||||
configBlob, err := image.ConfigBlob()
|
imageConfig, err := image.OCIConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ContainerInfo{}, err
|
return ContainerInfo{}, err
|
||||||
}
|
}
|
||||||
if len(configBlob) > 0 {
|
|
||||||
config := v1.Image{}
|
|
||||||
err = json.Unmarshal(configBlob, &config)
|
|
||||||
if err != nil {
|
|
||||||
return ContainerInfo{}, err
|
|
||||||
}
|
|
||||||
imageConfig = &config
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the image name and ID.
|
// Update the image name and ID.
|
||||||
if imageName == "" && len(img.Names) > 0 {
|
if imageName == "" && len(img.Names) > 0 {
|
||||||
|
|
|
@ -71,23 +71,7 @@ func buildOCIProcessArgs(containerKubeConfig *pb.ContainerConfig, imageOCIConfig
|
||||||
kubeArgs := containerKubeConfig.Args
|
kubeArgs := containerKubeConfig.Args
|
||||||
|
|
||||||
if imageOCIConfig == nil {
|
if imageOCIConfig == nil {
|
||||||
// HACK We should error out here, not being able to get an Image config is fatal.
|
return nil, fmt.Errorf("empty image config for %s", containerKubeConfig.Image.Image)
|
||||||
// When https://github.com/kubernetes-incubator/cri-o/issues/395 is fixed
|
|
||||||
// we'll remove that one and return an error here.
|
|
||||||
if containerKubeConfig.Metadata != nil {
|
|
||||||
logrus.Errorf("empty image config for %s", containerKubeConfig.Metadata.Name)
|
|
||||||
|
|
||||||
// HACK until https://github.com/kubernetes-incubator/cri-o/issues/395 is fixed.
|
|
||||||
// If the container is kubeadm's dummy, imageOCIConfig is nil, and both
|
|
||||||
// kubeCommands and kubeArgs are empty. So we set processArgs to /pause as the
|
|
||||||
// dummy container is just a pause one.
|
|
||||||
// (See https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/master/templates.go)
|
|
||||||
if containerKubeConfig.Metadata.Name == "dummy" {
|
|
||||||
return []string{podInfraCommand}, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logrus.Errorf("empty image config for %s", containerKubeConfig.Image.Image)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We got an OCI Image configuration.
|
// We got an OCI Image configuration.
|
||||||
|
@ -98,22 +82,15 @@ func buildOCIProcessArgs(containerKubeConfig *pb.ContainerConfig, imageOCIConfig
|
||||||
// The kubelet command slice is prioritized.
|
// The kubelet command slice is prioritized.
|
||||||
processEntryPoint = kubeCommands
|
processEntryPoint = kubeCommands
|
||||||
} else {
|
} else {
|
||||||
// Here the kubelet command slice is empty.
|
// Here the kubelet command slice is empty but
|
||||||
if imageOCIConfig != nil {
|
// we know that our OCI Image configuration is not empty.
|
||||||
// If the OCI image config has an ENTRYPOINT we
|
// If the OCI image config has an ENTRYPOINT we use it as
|
||||||
// use it as our process command.
|
// our process command.
|
||||||
// Otherwise we use the CMD slice if it's not
|
// Otherwise we use the CMD slice if it's not empty.
|
||||||
// empty.
|
if imageOCIConfig.Config.Entrypoint != nil {
|
||||||
if imageOCIConfig.Config.Entrypoint != nil {
|
processEntryPoint = imageOCIConfig.Config.Entrypoint
|
||||||
processEntryPoint = imageOCIConfig.Config.Entrypoint
|
} else if imageOCIConfig.Config.Cmd != nil {
|
||||||
} else if imageOCIConfig.Config.Cmd != nil {
|
processEntryPoint = imageOCIConfig.Config.Cmd
|
||||||
processEntryPoint = imageOCIConfig.Config.Cmd
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// We neither have a kubelet command not an image OCI config.
|
|
||||||
// Missing an image OCI config will no longer be supported after
|
|
||||||
// https://github.com/kubernetes-incubator/cri-o/issues/395 is fixed.
|
|
||||||
processEntryPoint = []string{"/bin/sh", "-c"}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,16 +112,9 @@ func buildOCIProcessArgs(containerKubeConfig *pb.ContainerConfig, imageOCIConfig
|
||||||
// we use the CMD slice as the process arguments.
|
// we use the CMD slice as the process arguments.
|
||||||
// Otherwise, we already picked CMD as our process
|
// Otherwise, we already picked CMD as our process
|
||||||
// command and we must not add the CMD slice twice.
|
// command and we must not add the CMD slice twice.
|
||||||
if imageOCIConfig != nil {
|
if imageOCIConfig.Config.Entrypoint != nil {
|
||||||
if imageOCIConfig.Config.Entrypoint != nil {
|
processCmd = imageOCIConfig.Config.Cmd
|
||||||
processCmd = imageOCIConfig.Config.Cmd
|
|
||||||
} else {
|
|
||||||
processCmd = []string{}
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Missing an image OCI config will no longer
|
|
||||||
// be supported after https://github.com/kubernetes-incubator/cri-o/issues/395
|
|
||||||
// is fixed.
|
|
||||||
processCmd = []string{}
|
processCmd = []string{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
5
vendor/github.com/containers/image/.travis.yml
generated
vendored
5
vendor/github.com/containers/image/.travis.yml
generated
vendored
|
@ -5,7 +5,10 @@
|
||||||
email: false
|
email: false
|
||||||
go:
|
go:
|
||||||
- 1.7
|
- 1.7
|
||||||
script: make tools .gitvalidation validate test test-skopeo
|
env:
|
||||||
|
- BUILDTAGS='btrfs_noversion libdm_no_deferred_remove'
|
||||||
|
- BUILDTAGS='btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
|
||||||
|
script: make tools .gitvalidation validate test test-skopeo BUILDTAGS="$BUILDTAGS"
|
||||||
dist: trusty
|
dist: trusty
|
||||||
os:
|
os:
|
||||||
- linux
|
- linux
|
||||||
|
|
2
vendor/github.com/containers/image/Makefile
generated
vendored
2
vendor/github.com/containers/image/Makefile
generated
vendored
|
@ -44,7 +44,7 @@ test-skopeo:
|
||||||
rm -rf $${vendor_path} && cp -r . $${vendor_path} && rm -rf $${vendor_path}/vendor && \
|
rm -rf $${vendor_path} && cp -r . $${vendor_path} && rm -rf $${vendor_path}/vendor && \
|
||||||
cd $${skopeo_path} && \
|
cd $${skopeo_path} && \
|
||||||
make BUILDTAGS="$(BUILDTAGS)" binary-local test-all-local && \
|
make BUILDTAGS="$(BUILDTAGS)" binary-local test-all-local && \
|
||||||
$(SUDO) make check && \
|
$(SUDO) make BUILDTAGS="$(BUILDTAGS)" check && \
|
||||||
rm -rf $${skopeo_path}
|
rm -rf $${skopeo_path}
|
||||||
|
|
||||||
validate: lint
|
validate: lint
|
||||||
|
|
10
vendor/github.com/containers/image/README.md
generated
vendored
10
vendor/github.com/containers/image/README.md
generated
vendored
|
@ -32,6 +32,16 @@ libraries you should use with this package in your own projects.
|
||||||
What this project tests against dependencies-wise is located
|
What this project tests against dependencies-wise is located
|
||||||
[here](https://github.com/containers/image/blob/master/vendor.conf).
|
[here](https://github.com/containers/image/blob/master/vendor.conf).
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
For ordinary use, `go build ./...` is sufficient.
|
||||||
|
|
||||||
|
When developing this library, please use `make` to take advantage of the tests and validation.
|
||||||
|
|
||||||
|
Optionally, you can use the `containers_image_openpgp` build tag (using `go build -tags …`, or `make … BUILDTAGS=…`).
|
||||||
|
This will use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation;
|
||||||
|
the primary downside is that creating new signatures with the Golang-only implementation is not supported.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
ASL 2.0
|
ASL 2.0
|
||||||
|
|
7
vendor/github.com/containers/image/copy/copy.go
generated
vendored
7
vendor/github.com/containers/image/copy/copy.go
generated
vendored
|
@ -235,6 +235,11 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Error initializing GPG")
|
return errors.Wrap(err, "Error initializing GPG")
|
||||||
}
|
}
|
||||||
|
defer mech.Close()
|
||||||
|
if err := mech.SupportsSigning(); err != nil {
|
||||||
|
return errors.Wrap(err, "Signing not supported")
|
||||||
|
}
|
||||||
|
|
||||||
dockerReference := dest.Reference().DockerReference()
|
dockerReference := dest.Reference().DockerReference()
|
||||||
if dockerReference == nil {
|
if dockerReference == nil {
|
||||||
return errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
|
return errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
|
||||||
|
@ -349,7 +354,7 @@ type diffIDResult struct {
|
||||||
func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
|
func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
|
||||||
// Check if we already have a blob with this digest
|
// Check if we already have a blob with this digest
|
||||||
haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo)
|
haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo)
|
||||||
if err != nil && err != types.ErrBlobNotFound {
|
if err != nil {
|
||||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
|
return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
|
||||||
}
|
}
|
||||||
// If we already have a cached diffID for this blob, we don't need to compute it
|
// If we already have a cached diffID for this blob, we don't need to compute it
|
||||||
|
|
6
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
6
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
|
@ -95,6 +95,10 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||||
|
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||||
|
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||||
|
// it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||||
if info.Digest == "" {
|
if info.Digest == "" {
|
||||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||||
|
@ -102,7 +106,7 @@ func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error)
|
||||||
blobPath := d.ref.layerPath(info.Digest)
|
blobPath := d.ref.layerPath(info.Digest)
|
||||||
finfo, err := os.Stat(blobPath)
|
finfo, err := os.Stat(blobPath)
|
||||||
if err != nil && os.IsNotExist(err) {
|
if err != nil && os.IsNotExist(err) {
|
||||||
return false, -1, types.ErrBlobNotFound
|
return false, -1, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, -1, err
|
return false, -1, err
|
||||||
|
|
12
vendor/github.com/containers/image/docker/daemon/daemon_dest.go
generated
vendored
12
vendor/github.com/containers/image/docker/daemon/daemon_dest.go
generated
vendored
|
@ -151,7 +151,11 @@ func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||||
return types.BlobInfo{}, errors.Errorf(`Can not stream a blob with unknown digest to "docker-daemon:"`)
|
return types.BlobInfo{}, errors.Errorf(`Can not stream a blob with unknown digest to "docker-daemon:"`)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ok, size, err := d.HasBlob(inputInfo); err == nil && ok {
|
ok, size, err := d.HasBlob(inputInfo)
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,6 +190,10 @@ func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||||
return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
|
return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||||
|
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||||
|
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||||
|
// it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *daemonImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
func (d *daemonImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||||
if info.Digest == "" {
|
if info.Digest == "" {
|
||||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||||
|
@ -193,7 +201,7 @@ func (d *daemonImageDestination) HasBlob(info types.BlobInfo) (bool, int64, erro
|
||||||
if blob, ok := d.blobs[info.Digest]; ok {
|
if blob, ok := d.blobs[info.Digest]; ok {
|
||||||
return true, blob.Size, nil
|
return true, blob.Size, nil
|
||||||
}
|
}
|
||||||
return false, -1, types.ErrBlobNotFound
|
return false, -1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *daemonImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
func (d *daemonImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
||||||
|
|
108
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
108
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
|
@ -18,8 +18,10 @@ import (
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage/pkg/homedir"
|
"github.com/containers/storage/pkg/homedir"
|
||||||
|
"github.com/docker/distribution/registry/client"
|
||||||
"github.com/docker/go-connections/sockets"
|
"github.com/docker/go-connections/sockets"
|
||||||
"github.com/docker/go-connections/tlsconfig"
|
"github.com/docker/go-connections/tlsconfig"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -32,20 +34,38 @@ const (
|
||||||
dockerCfgFileName = "config.json"
|
dockerCfgFileName = "config.json"
|
||||||
dockerCfgObsolete = ".dockercfg"
|
dockerCfgObsolete = ".dockercfg"
|
||||||
|
|
||||||
baseURL = "%s://%s/v2/"
|
resolvedPingV2URL = "%s://%s/v2/"
|
||||||
baseURLV1 = "%s://%s/v1/_ping"
|
resolvedPingV1URL = "%s://%s/v1/_ping"
|
||||||
tagsURL = "%s/tags/list"
|
tagsPath = "/v2/%s/tags/list"
|
||||||
manifestURL = "%s/manifests/%s"
|
manifestPath = "/v2/%s/manifests/%s"
|
||||||
blobsURL = "%s/blobs/%s"
|
blobsPath = "/v2/%s/blobs/%s"
|
||||||
blobUploadURL = "%s/blobs/uploads/"
|
blobUploadPath = "/v2/%s/blobs/uploads/"
|
||||||
|
extensionsSignaturePath = "/extensions/v2/%s/signatures/%s"
|
||||||
|
|
||||||
minimumTokenLifetimeSeconds = 60
|
minimumTokenLifetimeSeconds = 60
|
||||||
|
|
||||||
|
extensionSignatureSchemaVersion = 2 // extensionSignature.Version
|
||||||
|
extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrV1NotSupported is returned when we're trying to talk to a
|
// ErrV1NotSupported is returned when we're trying to talk to a
|
||||||
// docker V1 registry.
|
// docker V1 registry.
|
||||||
var ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
|
var ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
|
||||||
|
|
||||||
|
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
|
||||||
|
// signature represents a Docker image signature.
|
||||||
|
type extensionSignature struct {
|
||||||
|
Version int `json:"schemaVersion"` // Version specifies the schema version
|
||||||
|
Name string `json:"name"` // Name must be in "sha256:<digest>@signatureName" format
|
||||||
|
Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1"
|
||||||
|
Content []byte `json:"content"` // Content contains the signature
|
||||||
|
}
|
||||||
|
|
||||||
|
// signatureList represents list of Docker image signatures.
|
||||||
|
type extensionSignatureList struct {
|
||||||
|
Signatures []extensionSignature `json:"signatures"`
|
||||||
|
}
|
||||||
|
|
||||||
type bearerToken struct {
|
type bearerToken struct {
|
||||||
Token string `json:"token"`
|
Token string `json:"token"`
|
||||||
ExpiresIn int `json:"expires_in"`
|
ExpiresIn int `json:"expires_in"`
|
||||||
|
@ -54,15 +74,20 @@ type bearerToken struct {
|
||||||
|
|
||||||
// dockerClient is configuration for dealing with a single Docker registry.
|
// dockerClient is configuration for dealing with a single Docker registry.
|
||||||
type dockerClient struct {
|
type dockerClient struct {
|
||||||
ctx *types.SystemContext
|
// The following members are set by newDockerClient and do not change afterwards.
|
||||||
registry string
|
ctx *types.SystemContext
|
||||||
username string
|
registry string
|
||||||
password string
|
username string
|
||||||
scheme string // Cache of a value returned by a successful ping() if not empty
|
password string
|
||||||
client *http.Client
|
client *http.Client
|
||||||
signatureBase signatureStorageBase
|
signatureBase signatureStorageBase
|
||||||
challenges []challenge
|
scope authScope
|
||||||
scope authScope
|
// The following members are detected registry properties:
|
||||||
|
// They are set after a successful detectProperties(), and never change afterwards.
|
||||||
|
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
|
||||||
|
challenges []challenge
|
||||||
|
supportsSignatures bool
|
||||||
|
// The following members are private state for setupRequestAuth, both are valid if token != nil.
|
||||||
token *bearerToken
|
token *bearerToken
|
||||||
tokenExpiration time.Time
|
tokenExpiration time.Time
|
||||||
}
|
}
|
||||||
|
@ -209,15 +234,13 @@ func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||||
// url is NOT an absolute URL, but a path relative to the /v2/ top-level API path. The host name and schema is taken from the client or autodetected.
|
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
|
||||||
func (c *dockerClient) makeRequest(method, url string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
func (c *dockerClient) makeRequest(method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
||||||
if c.scheme == "" {
|
if err := c.detectProperties(); err != nil {
|
||||||
if err := c.ping(); err != nil {
|
return nil, err
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
url = fmt.Sprintf(baseURL, c.scheme, c.registry) + url
|
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||||
return c.makeRequestToResolvedURL(method, url, headers, stream, -1, true)
|
return c.makeRequestToResolvedURL(method, url, headers, stream, -1, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -398,21 +421,28 @@ func getAuth(ctx *types.SystemContext, registry string) (string, string, error)
|
||||||
return "", "", nil
|
return "", "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *dockerClient) ping() error {
|
// detectProperties detects various properties of the registry.
|
||||||
|
// See the dockerClient documentation for members which are affected by this.
|
||||||
|
func (c *dockerClient) detectProperties() error {
|
||||||
|
if c.scheme != "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
ping := func(scheme string) error {
|
ping := func(scheme string) error {
|
||||||
url := fmt.Sprintf(baseURL, scheme, c.registry)
|
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||||
resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true)
|
resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true)
|
||||||
logrus.Debugf("Ping %s err %#v", url, err)
|
logrus.Debugf("Ping %s err %#v", url, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
logrus.Debugf("Ping %s status %d", scheme+"://"+c.registry+"/v2/", resp.StatusCode)
|
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
|
||||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||||
return errors.Errorf("error pinging repository, response code %d", resp.StatusCode)
|
return errors.Errorf("error pinging repository, response code %d", resp.StatusCode)
|
||||||
}
|
}
|
||||||
c.challenges = parseAuthHeader(resp.Header)
|
c.challenges = parseAuthHeader(resp.Header)
|
||||||
c.scheme = scheme
|
c.scheme = scheme
|
||||||
|
c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1"
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err := ping("https")
|
err := ping("https")
|
||||||
|
@ -426,14 +456,14 @@ func (c *dockerClient) ping() error {
|
||||||
}
|
}
|
||||||
// best effort to understand if we're talking to a V1 registry
|
// best effort to understand if we're talking to a V1 registry
|
||||||
pingV1 := func(scheme string) bool {
|
pingV1 := func(scheme string) bool {
|
||||||
url := fmt.Sprintf(baseURLV1, scheme, c.registry)
|
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
||||||
resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true)
|
resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true)
|
||||||
logrus.Debugf("Ping %s err %#v", url, err)
|
logrus.Debugf("Ping %s err %#v", url, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
logrus.Debugf("Ping %s status %d", scheme+"://"+c.registry+"/v1/_ping", resp.StatusCode)
|
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
|
||||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -450,6 +480,30 @@ func (c *dockerClient) ping() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
|
||||||
|
// using the original data structures.
|
||||||
|
func (c *dockerClient) getExtensionsSignatures(ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||||
|
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||||
|
res, err := c.makeRequest("GET", path, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
return nil, client.HandleErrorResponse(res)
|
||||||
|
}
|
||||||
|
body, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var parsedBody extensionSignatureList
|
||||||
|
if err := json.Unmarshal(body, &parsedBody); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "Error decoding signature list")
|
||||||
|
}
|
||||||
|
return &parsedBody, nil
|
||||||
|
}
|
||||||
|
|
||||||
func getDefaultConfigDir(confPath string) string {
|
func getDefaultConfigDir(confPath string) string {
|
||||||
return filepath.Join(homedir.Get(), confPath)
|
return filepath.Join(homedir.Get(), confPath)
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
4
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
|
@ -40,8 +40,8 @@ func (i *Image) SourceRefFullName() string {
|
||||||
|
|
||||||
// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any.
|
// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any.
|
||||||
func (i *Image) GetRepositoryTags() ([]string, error) {
|
func (i *Image) GetRepositoryTags() ([]string, error) {
|
||||||
url := fmt.Sprintf(tagsURL, reference.Path(i.src.ref.ref))
|
path := fmt.Sprintf(tagsPath, reference.Path(i.src.ref.ref))
|
||||||
res, err := i.src.c.makeRequest("GET", url, nil, nil)
|
res, err := i.src.c.makeRequest("GET", path, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
151
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
151
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
|
@ -2,6 +2,8 @@ package docker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -42,7 +44,7 @@ type dockerImageDestination struct {
|
||||||
|
|
||||||
// newImageDestination creates a new ImageDestination for the specified image reference.
|
// newImageDestination creates a new ImageDestination for the specified image reference.
|
||||||
func newImageDestination(ctx *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
|
func newImageDestination(ctx *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
|
||||||
c, err := newDockerClient(ctx, ref, true, "push")
|
c, err := newDockerClient(ctx, ref, true, "pull,push")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -70,7 +72,17 @@ func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
|
||||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||||
func (d *dockerImageDestination) SupportsSignatures() error {
|
func (d *dockerImageDestination) SupportsSignatures() error {
|
||||||
return errors.Errorf("Pushing signatures to a Docker Registry is not supported")
|
if err := d.c.detectProperties(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case d.c.signatureBase != nil:
|
||||||
|
return nil
|
||||||
|
case d.c.supportsSignatures:
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||||
|
@ -101,26 +113,25 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
|
||||||
func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
||||||
if inputInfo.Digest.String() != "" {
|
if inputInfo.Digest.String() != "" {
|
||||||
haveBlob, size, err := d.HasBlob(inputInfo)
|
haveBlob, size, err := d.HasBlob(inputInfo)
|
||||||
if err != nil && err != types.ErrBlobNotFound {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
// Now err == nil || err == types.ErrBlobNotFound
|
if haveBlob {
|
||||||
if err == nil && haveBlob {
|
|
||||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME? Chunked upload, progress reporting, etc.
|
// FIXME? Chunked upload, progress reporting, etc.
|
||||||
uploadURL := fmt.Sprintf(blobUploadURL, reference.Path(d.ref.ref))
|
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
||||||
logrus.Debugf("Uploading %s", uploadURL)
|
logrus.Debugf("Uploading %s", uploadPath)
|
||||||
res, err := d.c.makeRequest("POST", uploadURL, nil, nil)
|
res, err := d.c.makeRequest("POST", uploadPath, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode != http.StatusAccepted {
|
if res.StatusCode != http.StatusAccepted {
|
||||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||||
return types.BlobInfo{}, errors.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode)
|
return types.BlobInfo{}, errors.Errorf("Error initiating layer upload to %s, status %d", uploadPath, res.StatusCode)
|
||||||
}
|
}
|
||||||
uploadLocation, err := res.Location()
|
uploadLocation, err := res.Location()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -132,7 +143,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||||
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
||||||
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
|
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Error uploading layer chunked, response %#v", *res)
|
logrus.Debugf("Error uploading layer chunked, response %#v", res)
|
||||||
return types.BlobInfo{}, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
|
@ -163,14 +174,18 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||||
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
|
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||||
|
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||||
|
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||||
|
// it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||||
if info.Digest == "" {
|
if info.Digest == "" {
|
||||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||||
}
|
}
|
||||||
checkURL := fmt.Sprintf(blobsURL, reference.Path(d.ref.ref), info.Digest.String())
|
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
|
||||||
|
|
||||||
logrus.Debugf("Checking %s", checkURL)
|
logrus.Debugf("Checking %s", checkPath)
|
||||||
res, err := d.c.makeRequest("HEAD", checkURL, nil, nil)
|
res, err := d.c.makeRequest("HEAD", checkPath, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, -1, err
|
return false, -1, err
|
||||||
}
|
}
|
||||||
|
@ -184,7 +199,7 @@ func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, erro
|
||||||
return false, -1, errors.Errorf("not authorized to read from destination repository %s", reference.Path(d.ref.ref))
|
return false, -1, errors.Errorf("not authorized to read from destination repository %s", reference.Path(d.ref.ref))
|
||||||
case http.StatusNotFound:
|
case http.StatusNotFound:
|
||||||
logrus.Debugf("... not present")
|
logrus.Debugf("... not present")
|
||||||
return false, -1, types.ErrBlobNotFound
|
return false, -1, nil
|
||||||
default:
|
default:
|
||||||
return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
|
return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
|
||||||
}
|
}
|
||||||
|
@ -205,14 +220,14 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
url := fmt.Sprintf(manifestURL, reference.Path(d.ref.ref), refTail)
|
path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
|
||||||
|
|
||||||
headers := map[string][]string{}
|
headers := map[string][]string{}
|
||||||
mimeType := manifest.GuessMIMEType(m)
|
mimeType := manifest.GuessMIMEType(m)
|
||||||
if mimeType != "" {
|
if mimeType != "" {
|
||||||
headers["Content-Type"] = []string{mimeType}
|
headers["Content-Type"] = []string{mimeType}
|
||||||
}
|
}
|
||||||
res, err := d.c.makeRequest("PUT", url, headers, bytes.NewReader(m))
|
res, err := d.c.makeRequest("PUT", path, headers, bytes.NewReader(m))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -223,12 +238,32 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||||
logrus.Debugf("Error body %s", string(body))
|
logrus.Debugf("Error body %s", string(body))
|
||||||
}
|
}
|
||||||
logrus.Debugf("Error uploading manifest, status %d, %#v", res.StatusCode, res)
|
logrus.Debugf("Error uploading manifest, status %d, %#v", res.StatusCode, res)
|
||||||
return errors.Errorf("Error uploading manifest to %s, status %d", url, res.StatusCode)
|
return errors.Errorf("Error uploading manifest to %s, status %d", path, res.StatusCode)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||||
|
// Do not fail if we don’t really need to support signatures.
|
||||||
|
if len(signatures) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := d.c.detectProperties(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case d.c.signatureBase != nil:
|
||||||
|
return d.putSignaturesToLookaside(signatures)
|
||||||
|
case d.c.supportsSignatures:
|
||||||
|
return d.putSignaturesToAPIExtension(signatures)
|
||||||
|
default:
|
||||||
|
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
|
||||||
|
// which is not nil.
|
||||||
|
func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error {
|
||||||
// FIXME? This overwrites files one at a time, definitely not atomic.
|
// FIXME? This overwrites files one at a time, definitely not atomic.
|
||||||
// A failure when updating signatures with a reordered copy could lose some of them.
|
// A failure when updating signatures with a reordered copy could lose some of them.
|
||||||
|
|
||||||
|
@ -236,15 +271,13 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||||
if len(signatures) == 0 {
|
if len(signatures) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if d.c.signatureBase == nil {
|
|
||||||
return errors.Errorf("Pushing signatures to a Docker Registry is not supported, and there is no applicable signature storage configured")
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.manifestDigest.String() == "" {
|
if d.manifestDigest.String() == "" {
|
||||||
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
|
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
|
||||||
return errors.Errorf("Unknown manifest digest, can't add signatures")
|
return errors.Errorf("Unknown manifest digest, can't add signatures")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
for i, signature := range signatures {
|
for i, signature := range signatures {
|
||||||
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
|
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
|
||||||
if url == nil {
|
if url == nil {
|
||||||
|
@ -278,6 +311,7 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// putOneSignature stores one signature to url.
|
// putOneSignature stores one signature to url.
|
||||||
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
|
func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
|
||||||
switch url.Scheme {
|
switch url.Scheme {
|
||||||
case "file":
|
case "file":
|
||||||
|
@ -301,6 +335,7 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte)
|
||||||
|
|
||||||
// deleteOneSignature deletes a signature from url, if it exists.
|
// deleteOneSignature deletes a signature from url, if it exists.
|
||||||
// If it successfully determines that the signature does not exist, returns (true, nil)
|
// If it successfully determines that the signature does not exist, returns (true, nil)
|
||||||
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
|
func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
|
||||||
switch url.Scheme {
|
switch url.Scheme {
|
||||||
case "file":
|
case "file":
|
||||||
|
@ -318,6 +353,82 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
|
||||||
|
func (d *dockerImageDestination) putSignaturesToAPIExtension(signatures [][]byte) error {
|
||||||
|
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
|
||||||
|
if len(signatures) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.manifestDigest.String() == "" {
|
||||||
|
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
|
||||||
|
return errors.Errorf("Unknown manifest digest, can't add signatures")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because image signatures are a shared resource in Atomic Registry, the default upload
|
||||||
|
// always adds signatures. Eventually we should also allow removing signatures,
|
||||||
|
// but the X-Registry-Supports-Signatures API extension does not support that yet.
|
||||||
|
|
||||||
|
existingSignatures, err := d.c.getExtensionsSignatures(d.ref, d.manifestDigest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
existingSigNames := map[string]struct{}{}
|
||||||
|
for _, sig := range existingSignatures.Signatures {
|
||||||
|
existingSigNames[sig.Name] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
sigExists:
|
||||||
|
for _, newSig := range signatures {
|
||||||
|
for _, existingSig := range existingSignatures.Signatures {
|
||||||
|
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
|
||||||
|
continue sigExists
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
|
||||||
|
var signatureName string
|
||||||
|
for {
|
||||||
|
randBytes := make([]byte, 16)
|
||||||
|
n, err := rand.Read(randBytes)
|
||||||
|
if err != nil || n != 16 {
|
||||||
|
return errors.Wrapf(err, "Error generating random signature len %d", n)
|
||||||
|
}
|
||||||
|
signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes)
|
||||||
|
if _, ok := existingSigNames[signatureName]; !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sig := extensionSignature{
|
||||||
|
Version: extensionSignatureSchemaVersion,
|
||||||
|
Name: signatureName,
|
||||||
|
Type: extensionSignatureTypeAtomic,
|
||||||
|
Content: newSig,
|
||||||
|
}
|
||||||
|
body, err := json.Marshal(sig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
|
||||||
|
res, err := d.c.makeRequest("PUT", path, nil, bytes.NewReader(body))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode != http.StatusCreated {
|
||||||
|
body, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err == nil {
|
||||||
|
logrus.Debugf("Error body %s", string(body))
|
||||||
|
}
|
||||||
|
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
|
||||||
|
return errors.Errorf("Error uploading signature to %s, status %d", path, res.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
// WARNING: This does not have any transactional semantics:
|
// WARNING: This does not have any transactional semantics:
|
||||||
// - Uploaded data MAY be visible to others before Commit() is called
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
|
|
60
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
60
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
|
@ -93,10 +93,10 @@ func (s *dockerImageSource) GetManifest() ([]byte, string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *dockerImageSource) fetchManifest(tagOrDigest string) ([]byte, string, error) {
|
func (s *dockerImageSource) fetchManifest(tagOrDigest string) ([]byte, string, error) {
|
||||||
url := fmt.Sprintf(manifestURL, reference.Path(s.ref.ref), tagOrDigest)
|
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
|
||||||
headers := make(map[string][]string)
|
headers := make(map[string][]string)
|
||||||
headers["Accept"] = s.requestedManifestMIMETypes
|
headers["Accept"] = s.requestedManifestMIMETypes
|
||||||
res, err := s.c.makeRequest("GET", url, headers, nil)
|
res, err := s.c.makeRequest("GET", path, headers, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
@ -179,9 +179,9 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
|
||||||
return s.getExternalBlob(info.URLs)
|
return s.getExternalBlob(info.URLs)
|
||||||
}
|
}
|
||||||
|
|
||||||
url := fmt.Sprintf(blobsURL, reference.Path(s.ref.ref), info.Digest.String())
|
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
|
||||||
logrus.Debugf("Downloading %s", url)
|
logrus.Debugf("Downloading %s", path)
|
||||||
res, err := s.c.makeRequest("GET", url, nil, nil)
|
res, err := s.c.makeRequest("GET", path, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
@ -193,10 +193,22 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
||||||
if s.c.signatureBase == nil { // Skip dealing with the manifest digest if not necessary.
|
if err := s.c.detectProperties(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case s.c.signatureBase != nil:
|
||||||
|
return s.getSignaturesFromLookaside()
|
||||||
|
case s.c.supportsSignatures:
|
||||||
|
return s.getSignaturesFromAPIExtension()
|
||||||
|
default:
|
||||||
return [][]byte{}, nil
|
return [][]byte{}, nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
|
||||||
|
// which is not nil.
|
||||||
|
func (s *dockerImageSource) getSignaturesFromLookaside() ([][]byte, error) {
|
||||||
if err := s.ensureManifestIsLoaded(); err != nil {
|
if err := s.ensureManifestIsLoaded(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -205,6 +217,7 @@ func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
signatures := [][]byte{}
|
signatures := [][]byte{}
|
||||||
for i := 0; ; i++ {
|
for i := 0; ; i++ {
|
||||||
url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
|
url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
|
||||||
|
@ -225,6 +238,7 @@ func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
||||||
|
|
||||||
// getOneSignature downloads one signature from url.
|
// getOneSignature downloads one signature from url.
|
||||||
// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
|
// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
|
||||||
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, missing bool, err error) {
|
func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, missing bool, err error) {
|
||||||
switch url.Scheme {
|
switch url.Scheme {
|
||||||
case "file":
|
case "file":
|
||||||
|
@ -261,6 +275,30 @@ func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, mis
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
|
||||||
|
func (s *dockerImageSource) getSignaturesFromAPIExtension() ([][]byte, error) {
|
||||||
|
if err := s.ensureManifestIsLoaded(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
manifestDigest, err := manifest.Digest(s.cachedManifest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedBody, err := s.c.getExtensionsSignatures(s.ref, manifestDigest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var sigs [][]byte
|
||||||
|
for _, sig := range parsedBody.Signatures {
|
||||||
|
if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
|
||||||
|
sigs = append(sigs, sig.Content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sigs, nil
|
||||||
|
}
|
||||||
|
|
||||||
// deleteImage deletes the named image from the registry, if supported.
|
// deleteImage deletes the named image from the registry, if supported.
|
||||||
func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||||
c, err := newDockerClient(ctx, ref, true, "push")
|
c, err := newDockerClient(ctx, ref, true, "push")
|
||||||
|
@ -277,8 +315,8 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
getURL := fmt.Sprintf(manifestURL, reference.Path(ref.ref), refTail)
|
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
||||||
get, err := c.makeRequest("GET", getURL, headers, nil)
|
get, err := c.makeRequest("GET", getPath, headers, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -296,11 +334,11 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
digest := get.Header.Get("Docker-Content-Digest")
|
digest := get.Header.Get("Docker-Content-Digest")
|
||||||
deleteURL := fmt.Sprintf(manifestURL, reference.Path(ref.ref), digest)
|
deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest)
|
||||||
|
|
||||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||||
delete, err := c.makeRequest("DELETE", deleteURL, headers, nil)
|
delete, err := c.makeRequest("DELETE", deletePath, headers, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -311,7 +349,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if delete.StatusCode != http.StatusAccepted {
|
if delete.StatusCode != http.StatusAccepted {
|
||||||
return errors.Errorf("Failed to delete %v: %s (%v)", deleteURL, string(body), delete.Status)
|
return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.signatureBase != nil {
|
if c.signatureBase != nil {
|
||||||
|
|
14
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
14
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
|
@ -9,12 +9,12 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/containers/image/docker/reference"
|
||||||
|
"github.com/containers/image/types"
|
||||||
"github.com/ghodss/yaml"
|
"github.com/ghodss/yaml"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
|
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
|
||||||
|
@ -63,9 +63,10 @@ func configuredSignatureStorageBase(ctx *types.SystemContext, ref dockerReferenc
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
|
return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
|
||||||
}
|
}
|
||||||
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
// FIXME? Restrict to explicitly supported schemes?
|
// FIXME? Restrict to explicitly supported schemes?
|
||||||
repo := ref.ref.Name() // Note that this is without a tag or digest.
|
repo := reference.Path(ref.ref) // Note that this is without a tag or digest.
|
||||||
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
|
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
|
||||||
return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
|
return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
|
||||||
}
|
}
|
||||||
url.Path = url.Path + "/" + repo
|
url.Path = url.Path + "/" + repo
|
||||||
|
@ -190,11 +191,12 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
|
||||||
|
|
||||||
// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable.
|
// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable.
|
||||||
// Returns nil iff base == nil.
|
// Returns nil iff base == nil.
|
||||||
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
|
func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
|
||||||
if base == nil {
|
if base == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
url := *base
|
url := *base
|
||||||
url.Path = fmt.Sprintf("%s@%s/signature-%d", url.Path, manifestDigest.String(), index+1)
|
url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
||||||
return &url
|
return &url
|
||||||
}
|
}
|
||||||
|
|
21
vendor/github.com/containers/image/docker/lookaside_test.go
generated
vendored
21
vendor/github.com/containers/image/docker/lookaside_test.go
generated
vendored
|
@ -46,7 +46,7 @@ func TestConfiguredSignatureStorageBase(t *testing.T) {
|
||||||
dockerRefFromString(t, "//example.com/my/project"), false)
|
dockerRefFromString(t, "//example.com/my/project"), false)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
require.NotNil(t, base)
|
require.NotNil(t, base)
|
||||||
assert.Equal(t, "https://sigstore.example.com/example.com/my/project", (*url.URL)(base).String())
|
assert.Equal(t, "https://sigstore.example.com/my/project", (*url.URL)(base).String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRegistriesDirPath(t *testing.T) {
|
func TestRegistriesDirPath(t *testing.T) {
|
||||||
|
@ -252,26 +252,27 @@ func TestRegistryNamespaceSignatureTopLevel(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSignatureStorageBaseSignatureStorageURL(t *testing.T) {
|
func TestSignatureStorageBaseSignatureStorageURL(t *testing.T) {
|
||||||
const md = "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
const mdInput = "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||||
|
const mdMapped = "sha256=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||||
|
|
||||||
assert.True(t, signatureStorageURL(nil, md, 0) == nil)
|
assert.True(t, signatureStorageURL(nil, mdInput, 0) == nil)
|
||||||
for _, c := range []struct {
|
for _, c := range []struct {
|
||||||
base string
|
base string
|
||||||
index int
|
index int
|
||||||
expected string
|
expected string
|
||||||
}{
|
}{
|
||||||
{"file:///tmp", 0, "file:///tmp@" + md + "/signature-1"},
|
{"file:///tmp", 0, "file:///tmp@" + mdMapped + "/signature-1"},
|
||||||
{"file:///tmp", 1, "file:///tmp@" + md + "/signature-2"},
|
{"file:///tmp", 1, "file:///tmp@" + mdMapped + "/signature-2"},
|
||||||
{"https://localhost:5555/root", 0, "https://localhost:5555/root@" + md + "/signature-1"},
|
{"https://localhost:5555/root", 0, "https://localhost:5555/root@" + mdMapped + "/signature-1"},
|
||||||
{"https://localhost:5555/root", 1, "https://localhost:5555/root@" + md + "/signature-2"},
|
{"https://localhost:5555/root", 1, "https://localhost:5555/root@" + mdMapped + "/signature-2"},
|
||||||
{"http://localhost:5555/root", 0, "http://localhost:5555/root@" + md + "/signature-1"},
|
{"http://localhost:5555/root", 0, "http://localhost:5555/root@" + mdMapped + "/signature-1"},
|
||||||
{"http://localhost:5555/root", 1, "http://localhost:5555/root@" + md + "/signature-2"},
|
{"http://localhost:5555/root", 1, "http://localhost:5555/root@" + mdMapped + "/signature-2"},
|
||||||
} {
|
} {
|
||||||
url, err := url.Parse(c.base)
|
url, err := url.Parse(c.base)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedURL, err := url.Parse(c.expected)
|
expectedURL, err := url.Parse(c.expected)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
res := signatureStorageURL(url, md, c.index)
|
res := signatureStorageURL(url, mdInput, c.index)
|
||||||
assert.Equal(t, expectedURL, res, c.expected)
|
assert.Equal(t, expectedURL, res, c.expected)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
136
vendor/github.com/containers/image/docs/signature-protocols.md
generated
vendored
Normal file
136
vendor/github.com/containers/image/docs/signature-protocols.md
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
# Signature access protocols
|
||||||
|
|
||||||
|
The `github.com/containers/image` library supports signatures implemented as blobs “attached to” an image.
|
||||||
|
Some image transports (local storage formats and remote procotocols) implement these signatures natively
|
||||||
|
or trivially; for others, the protocol extensions described below are necessary.
|
||||||
|
|
||||||
|
## docker/distribution registries—separate storage
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
Any existing docker/distribution registry, whether or not it natively supports signatures,
|
||||||
|
can be augmented with separate signature storage by configuring a signature storage URL in [`registries.d`](registries.d.md).
|
||||||
|
`registries.d` can be configured to use one storage URL for a whole docker/distribution server,
|
||||||
|
or also separate URLs for smaller namespaces or individual repositories within the server
|
||||||
|
(which e.g. allows image authors to manage their own signature storage while publishing
|
||||||
|
the images on the public `docker.io` server).
|
||||||
|
|
||||||
|
The signature storage URL defines a root of a path hierarchy.
|
||||||
|
It can be either a `file:///…` URL, pointing to a local directory structure,
|
||||||
|
or a `http`/`https` URL, pointing to a remote server.
|
||||||
|
`file:///` signature storage can be both read and written, `http`/`https` only supports reading.
|
||||||
|
|
||||||
|
The same path hierarchy is used in both cases, so the HTTP/HTTPS server can be
|
||||||
|
a simple static web server serving a directory structure created by writing to a `file:///` signature storage.
|
||||||
|
(This of course does not prevent other server implementations,
|
||||||
|
e.g. a HTTP server reading signatures from a database.)
|
||||||
|
|
||||||
|
The usual workflow for producing and distributing images using the separate storage mechanism
|
||||||
|
is to configure the repository in `registries.d` with `sigstore-staging` URL pointing to a private
|
||||||
|
`file:///` staging area, and a `sigstore` URL pointing to a public web server.
|
||||||
|
To publish an image, the image author would sign the image as necessary (e.g. using `skopeo copy`),
|
||||||
|
and then copy the created directory structure from the `file:///` staging area
|
||||||
|
to a subdirectory of a webroot of the public web server so that they are accessible using the public `sigstore` URL.
|
||||||
|
The author would also instruct consumers of the image to, or provide a `registries.d` configuration file to,
|
||||||
|
set up a `sigstore` URL pointing to the public web server.
|
||||||
|
|
||||||
|
### Path structure
|
||||||
|
|
||||||
|
Given a _base_ signature storage URL configured in `registries.d` as mentioned above,
|
||||||
|
and a container image stored in a docker/distribution registry using the _fully-expanded_ name
|
||||||
|
_hostname_`/`_namespaces_`/`_name_{`@`_digest_,`:`_tag_} (e.g. for `docker.io/library/busybox:latest`,
|
||||||
|
_namespaces_ is `library`, even if the user refers to the image using the shorter syntax as `busybox:latest`),
|
||||||
|
signatures are accessed using URLs of the form
|
||||||
|
> _base_`/`_namespaces_`/`_name_`@`_digest-algo_`=`_digest-value_`/signature-`_index_
|
||||||
|
|
||||||
|
where _digest-algo_`:`_digest-value_ is a manifest digest usable for referencing the relevant image manifest
|
||||||
|
(i.e. even if the user referenced the image using a tag,
|
||||||
|
the signature storage is always disambiguated using digest references).
|
||||||
|
Note that in the URLs used for signatures,
|
||||||
|
_digest-algo_ and _digest-value_ are separated using the `=` character,
|
||||||
|
not `:` like when acessing the manifest using the docker/distribution API.
|
||||||
|
|
||||||
|
Within the URL, _index_ is a decimal integer (in the canonical form), starting with 1.
|
||||||
|
Signatures are stored at URLs with successive _index_ values; to read all of them, start with _index_=1,
|
||||||
|
and continue reading signatures and increasing _index_ as long as signatures with these _index_ values exist.
|
||||||
|
Similarly, to add one more signature to an image, find the first _index_ which does not exist, and
|
||||||
|
then store the new signature using that _index_ value.
|
||||||
|
|
||||||
|
There is no way to list existing signatures other than iterating through the successive _index_ values,
|
||||||
|
and no way to download all of the signatures at once.
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
For a docker/distribution image available as `busybox@sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e`
|
||||||
|
(or as `busybox:latest` if the `latest` tag points to to a manifest with the same digest),
|
||||||
|
and with a `registries.d` configuration specifying a `sigstore` URL `https://example.com/sigstore` for the same image,
|
||||||
|
the following URLs would be accessed to download all signatures:
|
||||||
|
> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-1`
|
||||||
|
> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-2`
|
||||||
|
> - …
|
||||||
|
|
||||||
|
For a docker/distribution image available as `example.com/ns1/ns2/ns3/repo@somedigest:digestvalue` and the same
|
||||||
|
`sigstore` URL, the signatures would be available at
|
||||||
|
> `https://example.com/sigstore/ns1/ns2/ns3/repo@somedigest=digestvalue/signature-1`
|
||||||
|
|
||||||
|
and so on.
|
||||||
|
|
||||||
|
## (OpenShift) docker/distribution API extension
|
||||||
|
|
||||||
|
As of https://github.com/openshift/origin/pull/12504/ , the OpenShift-embedded registry also provides
|
||||||
|
an extension of the docker/distribution API which allows simpler access to the signatures,
|
||||||
|
using only the docker/distribution API endpoint.
|
||||||
|
|
||||||
|
This API is not inherently OpenShift-specific (e.g. the client does not need to know the OpenShift API endpoint,
|
||||||
|
and credentials sufficient to access the docker/distribution API server are sufficient to access signatures as well),
|
||||||
|
and it is the preferred way implement signature storage in registries.
|
||||||
|
|
||||||
|
See https://github.com/openshift/openshift-docs/pull/3556 for the upstream documentation of the API.
|
||||||
|
|
||||||
|
To read the signature, any user with access to an image can use the `/extensions/v2/…/signatures/…`
|
||||||
|
path to read an array of signatures. Use only the signature objects
|
||||||
|
which have `version` equal to `2`, `type` equal to `atomic`, and read the signature from `content`;
|
||||||
|
ignore the other fields of the signature object.
|
||||||
|
|
||||||
|
To add a single signature, `PUT` a new object with `version` set to `2`, `type` set to `atomic`,
|
||||||
|
and `content` set to the signature. Also set `name` to an unique name with the form
|
||||||
|
_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (also used in the URL),
|
||||||
|
and _per-image-name_ is any unique identifier.
|
||||||
|
|
||||||
|
To add more than one signature, add them one at a time. This API does not allow deleting signatures.
|
||||||
|
|
||||||
|
Note that because signatures are stored within the cluster-wide image objects,
|
||||||
|
i.e. different namespaces can not associate different sets of signatures to the same image,
|
||||||
|
updating signatures requires a cluster-wide access to the `imagesignatures` resource
|
||||||
|
(by default available to the `system:image-signer` role),
|
||||||
|
|
||||||
|
## OpenShift-embedded registries
|
||||||
|
|
||||||
|
The OpenShift-embedded registry implements the ordinary docker/distribution API,
|
||||||
|
and it also exposes images through the OpenShift REST API (available through the “API master” servers).
|
||||||
|
|
||||||
|
Note: OpenShift versions 1.5 and later support the above-described [docker/distribution API extension](#openshift-dockerdistribution-api-extension),
|
||||||
|
which is easier to set up and should usually be preferred.
|
||||||
|
Continue reading for details on using older versions of OpenShift.
|
||||||
|
|
||||||
|
As of https://github.com/openshift/origin/pull/9181,
|
||||||
|
signatures are exposed through the OpenShift API
|
||||||
|
(i.e. to access the complete image, it is necessary to use both APIs,
|
||||||
|
in particular to know the URLs for both the docker/distribution and the OpenShift API master endpoints).
|
||||||
|
|
||||||
|
To read the signature, any user with access to an image can use the `imagestreamimages` namespaced
|
||||||
|
resource to read an `Image` object and its `Signatures` array. Use only the `ImageSignature` objects
|
||||||
|
which have `Type` equal to `atomic`, and read the signature from `Content`; ignore the other fields of
|
||||||
|
the `ImageSignature` object.
|
||||||
|
|
||||||
|
To add or remove signatures, use the cluster-wide (non-namespaced) `imagesignatures` resource,
|
||||||
|
with `Type` set to `atomic` and `Content` set to the signature. Signature names must have the form
|
||||||
|
_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (OpenShift “image name”),
|
||||||
|
and _per-image-name_ is any unique identifier.
|
||||||
|
|
||||||
|
Note that because signatures are stored within the cluster-wide image objects,
|
||||||
|
i.e. different namespaces can not associate different sets of signatures to the same image,
|
||||||
|
updating signatures requires a cluster-wide access to the `imagesignatures` resource
|
||||||
|
(by default available to the `system:image-signer` role),
|
||||||
|
and deleting signatures is strongly discouraged
|
||||||
|
(it deletes the signature from all namespaces which contain the same image).
|
28
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
28
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -112,6 +113,17 @@ func (m *manifestSchema1) ConfigBlob() ([]byte, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||||
|
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||||
|
// old image manifests work (docker v2s1 especially).
|
||||||
|
func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) {
|
||||||
|
v2s2, err := m.convertToManifestSchema2(nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return v2s2.OCIConfig()
|
||||||
|
}
|
||||||
|
|
||||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||||
|
@ -243,10 +255,10 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
|
||||||
if len(m.History) != len(m.FSLayers) {
|
if len(m.History) != len(m.FSLayers) {
|
||||||
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
|
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
|
||||||
}
|
}
|
||||||
if len(uploadedLayerInfos) != len(m.FSLayers) {
|
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) {
|
||||||
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
|
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
|
||||||
}
|
}
|
||||||
if len(layerDiffIDs) != len(m.FSLayers) {
|
if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) {
|
||||||
return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
|
return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,12 +285,20 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
|
||||||
}
|
}
|
||||||
|
|
||||||
if !v1compat.ThrowAway {
|
if !v1compat.ThrowAway {
|
||||||
|
var size int64
|
||||||
|
if uploadedLayerInfos != nil {
|
||||||
|
size = uploadedLayerInfos[v2Index].Size
|
||||||
|
}
|
||||||
|
var d digest.Digest
|
||||||
|
if layerDiffIDs != nil {
|
||||||
|
d = layerDiffIDs[v2Index]
|
||||||
|
}
|
||||||
layers = append(layers, descriptor{
|
layers = append(layers, descriptor{
|
||||||
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||||
Size: uploadedLayerInfos[v2Index].Size,
|
Size: size,
|
||||||
Digest: m.FSLayers[v1Index].BlobSum,
|
Digest: m.FSLayers[v1Index].BlobSum,
|
||||||
})
|
})
|
||||||
rootFS.DiffIDs = append(rootFS.DiffIDs, layerDiffIDs[v2Index])
|
rootFS.DiffIDs = append(rootFS.DiffIDs, d)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history)
|
configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history)
|
||||||
|
|
26
vendor/github.com/containers/image/image/docker_schema1_test.go
generated
vendored
Normal file
26
vendor/github.com/containers/image/image/docker_schema1_test.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
package image
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func manifestSchema1FromFixture(t *testing.T, fixture string) genericManifest {
|
||||||
|
manifest, err := ioutil.ReadFile(filepath.Join("fixtures", fixture))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
m, err := manifestSchema1FromManifest(manifest)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestManifestSchema1ToOCIConfig(t *testing.T) {
|
||||||
|
m := manifestSchema1FromFixture(t, "schema1-to-oci-config.json")
|
||||||
|
configOCI, err := m.OCIConfig()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "/pause", configOCI.Config.Entrypoint[0])
|
||||||
|
}
|
27
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
27
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
|
@ -78,6 +78,24 @@ func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
|
||||||
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
|
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||||
|
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||||
|
// old image manifests work (docker v2s1 especially).
|
||||||
|
func (m *manifestSchema2) OCIConfig() (*imgspecv1.Image, error) {
|
||||||
|
configBlob, err := m.ConfigBlob()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
|
||||||
|
// than OCI v1. This unmarshal makes sure we drop docker v2s2
|
||||||
|
// fields that aren't needed in OCI v1.
|
||||||
|
configOCI := &imgspecv1.Image{}
|
||||||
|
if err := json.Unmarshal(configBlob, configOCI); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return configOCI, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||||
// The result is cached; it is OK to call this however often you need.
|
// The result is cached; it is OK to call this however often you need.
|
||||||
func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
|
func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
|
||||||
|
@ -177,17 +195,10 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
|
func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
|
||||||
configBlob, err := m.ConfigBlob()
|
configOCI, err := m.OCIConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
|
|
||||||
// than OCI v1. This unmarshal, then re-marshal makes sure we drop docker v2s2
|
|
||||||
// fields that aren't needed in OCI v1.
|
|
||||||
configOCI := &imgspecv1.Image{}
|
|
||||||
if err := json.Unmarshal(configBlob, configOCI); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
configOCIBytes, err := json.Marshal(configOCI)
|
configOCIBytes, err := json.Marshal(configOCI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
29
vendor/github.com/containers/image/image/fixtures/schema1-to-oci-config.json
generated
vendored
Normal file
29
vendor/github.com/containers/image/image/fixtures/schema1-to-oci-config.json
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
{
|
||||||
|
"schemaVersion": 1,
|
||||||
|
"name": "google_containers/pause-amd64",
|
||||||
|
"tag": "3.0",
|
||||||
|
"architecture": "amd64",
|
||||||
|
"fsLayers": [
|
||||||
|
{
|
||||||
|
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"blobSum": "sha256:f112334343777b75be77ec1f835e3bbbe7d7bd46e27b6a2ae35c6b3cfea0987c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"history": [
|
||||||
|
{
|
||||||
|
"v1Compatibility": "{\"id\":\"bb497e16a2d55195649174d1fadac52b00fa2c14124d73009712606909286bc5\",\"parent\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"created\":\"2016-05-04T06:26:41.522308365Z\",\"container\":\"a9873535145fe72b464d3055efbac36aab70d059914e221cbbd7fe3cac53ef6b\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT \\u0026{[\\\"/pause\\\"]}\"],\"Image\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":[\"/pause\"],\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":[\"/pause\"],\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\"}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"v1Compatibility": "{\"id\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"parent\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"created\":\"2016-05-04T06:26:41.091672218Z\",\"container\":\"e1b38778b023f25642273ed9e7f4846b4bf38b22a8b55755880b2e6ab6019811\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ADD file:b7eb6a5df9d5fbe509cac16ed89f8d6513a4362017184b14c6a5fae151eee5c5 in /pause\"],\"Image\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":746888}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"v1Compatibility": "{\"id\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"created\":\"2016-05-04T06:26:40.628395649Z\",\"container\":\"95722352e41d57660259fbede4413d06889a28eb07a7302d2a7b3f9c71ceaa46\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ARG ARCH\"],\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\"}"
|
||||||
|
}
|
||||||
|
],"signatures":[{"header":{"alg":"ES256","jwk":{"crv":"P-256","kid":"ORN4:M47W:3KP3:TZRZ:C3UF:5MFQ:INZV:TCMY:LHNV:EYQU:IRGJ:IJLJ","kty":"EC","x":"yJ0ZQ19NBZUQn8LV60sFEabhlgky9svozfK0VGVou7Y","y":"gOJScOkkLVY1f8aAx-6XXpVM5rJaDYLkCNJ1dvcQGMs"}},"protected":"eyJmb3JtYXRMZW5ndGgiOjQxMzMsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNi0wNS0wNFQwNjoyODo1MVoifQ","signature":"77_7DVx1IZ3PiKNnO7QnvoF7Sgik4GI4bnlVJdtQW461dSyYzd-nSdBmky8Jew3InEW8Cuv_t5w4GmOSwXvL7g"}]
|
||||||
|
|
||||||
|
}
|
4
vendor/github.com/containers/image/image/manifest.go
generated
vendored
4
vendor/github.com/containers/image/image/manifest.go
generated
vendored
|
@ -64,6 +64,10 @@ type genericManifest interface {
|
||||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||||
// The result is cached; it is OK to call this however often you need.
|
// The result is cached; it is OK to call this however often you need.
|
||||||
ConfigBlob() ([]byte, error)
|
ConfigBlob() ([]byte, error)
|
||||||
|
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||||
|
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||||
|
// old image manifests work (docker v2s1 especially).
|
||||||
|
OCIConfig() (*imgspecv1.Image, error)
|
||||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||||
|
|
15
vendor/github.com/containers/image/image/oci.go
generated
vendored
15
vendor/github.com/containers/image/image/oci.go
generated
vendored
|
@ -81,6 +81,21 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
|
||||||
return m.configBlob, nil
|
return m.configBlob, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||||
|
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||||
|
// old image manifests work (docker v2s1 especially).
|
||||||
|
func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) {
|
||||||
|
cb, err := m.ConfigBlob()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
configOCI := &imgspecv1.Image{}
|
||||||
|
if err := json.Unmarshal(cb, configOCI); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return configOCI, nil
|
||||||
|
}
|
||||||
|
|
||||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||||
|
|
6
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
6
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
|
@ -112,6 +112,10 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||||
|
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||||
|
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||||
|
// it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||||
if info.Digest == "" {
|
if info.Digest == "" {
|
||||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||||
|
@ -122,7 +126,7 @@ func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error)
|
||||||
}
|
}
|
||||||
finfo, err := os.Stat(blobPath)
|
finfo, err := os.Stat(blobPath)
|
||||||
if err != nil && os.IsNotExist(err) {
|
if err != nil && os.IsNotExist(err) {
|
||||||
return false, -1, types.ErrBlobNotFound
|
return false, -1, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, -1, err
|
return false, -1, err
|
||||||
|
|
4
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
4
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
|
@ -371,6 +371,10 @@ func (d *openshiftImageDestination) PutBlob(stream io.Reader, inputInfo types.Bl
|
||||||
return d.docker.PutBlob(stream, inputInfo)
|
return d.docker.PutBlob(stream, inputInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||||
|
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||||
|
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||||
|
// it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *openshiftImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
func (d *openshiftImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||||
return d.docker.HasBlob(info)
|
return d.docker.HasBlob(info)
|
||||||
}
|
}
|
||||||
|
|
7
vendor/github.com/containers/image/signature/docker_test.go
generated
vendored
7
vendor/github.com/containers/image/signature/docker_test.go
generated
vendored
|
@ -11,6 +11,12 @@ import (
|
||||||
func TestSignDockerManifest(t *testing.T) {
|
func TestSignDockerManifest(t *testing.T) {
|
||||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
|
||||||
|
if err := mech.SupportsSigning(); err != nil {
|
||||||
|
t.Skipf("Signing not supported: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
manifest, err := ioutil.ReadFile("fixtures/image.manifest.json")
|
manifest, err := ioutil.ReadFile("fixtures/image.manifest.json")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -41,6 +47,7 @@ func TestSignDockerManifest(t *testing.T) {
|
||||||
func TestVerifyDockerManifestSignature(t *testing.T) {
|
func TestVerifyDockerManifestSignature(t *testing.T) {
|
||||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
manifest, err := ioutil.ReadFile("fixtures/image.manifest.json")
|
manifest, err := ioutil.ReadFile("fixtures/image.manifest.json")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
signature, err := ioutil.ReadFile("fixtures/image.signature")
|
signature, err := ioutil.ReadFile("fixtures/image.signature")
|
||||||
|
|
80
vendor/github.com/containers/image/signature/json.go
generated
vendored
80
vendor/github.com/containers/image/signature/json.go
generated
vendored
|
@ -14,64 +14,6 @@ func (err jsonFormatError) Error() string {
|
||||||
return string(err)
|
return string(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateExactMapKeys returns an error if the keys of m are not exactly expectedKeys, which must be pairwise distinct
|
|
||||||
func validateExactMapKeys(m map[string]interface{}, expectedKeys ...string) error {
|
|
||||||
if len(m) != len(expectedKeys) {
|
|
||||||
return jsonFormatError("Unexpected keys in a JSON object")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, k := range expectedKeys {
|
|
||||||
if _, ok := m[k]; !ok {
|
|
||||||
return jsonFormatError(fmt.Sprintf("Key %s missing in a JSON object", k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Assuming expectedKeys are pairwise distinct, we know m contains len(expectedKeys) different values in expectedKeys.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// int64Field returns a member fieldName of m, if it is an int64, or an error.
|
|
||||||
func int64Field(m map[string]interface{}, fieldName string) (int64, error) {
|
|
||||||
untyped, ok := m[fieldName]
|
|
||||||
if !ok {
|
|
||||||
return -1, jsonFormatError(fmt.Sprintf("Field %s missing", fieldName))
|
|
||||||
}
|
|
||||||
f, ok := untyped.(float64)
|
|
||||||
if !ok {
|
|
||||||
return -1, jsonFormatError(fmt.Sprintf("Field %s is not a number", fieldName))
|
|
||||||
}
|
|
||||||
v := int64(f)
|
|
||||||
if float64(v) != f {
|
|
||||||
return -1, jsonFormatError(fmt.Sprintf("Field %s is not an integer", fieldName))
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mapField returns a member fieldName of m, if it is a JSON map, or an error.
|
|
||||||
func mapField(m map[string]interface{}, fieldName string) (map[string]interface{}, error) {
|
|
||||||
untyped, ok := m[fieldName]
|
|
||||||
if !ok {
|
|
||||||
return nil, jsonFormatError(fmt.Sprintf("Field %s missing", fieldName))
|
|
||||||
}
|
|
||||||
v, ok := untyped.(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
return nil, jsonFormatError(fmt.Sprintf("Field %s is not a JSON object", fieldName))
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringField returns a member fieldName of m, if it is a string, or an error.
|
|
||||||
func stringField(m map[string]interface{}, fieldName string) (string, error) {
|
|
||||||
untyped, ok := m[fieldName]
|
|
||||||
if !ok {
|
|
||||||
return "", jsonFormatError(fmt.Sprintf("Field %s missing", fieldName))
|
|
||||||
}
|
|
||||||
v, ok := untyped.(string)
|
|
||||||
if !ok {
|
|
||||||
return "", jsonFormatError(fmt.Sprintf("Field %s is not a string", fieldName))
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
|
// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
|
||||||
// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
|
// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
|
||||||
// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
|
// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
|
||||||
|
@ -122,3 +64,25 @@ func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
|
||||||
|
// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
|
||||||
|
// must be present exactly once, and none other fields are accepted.
|
||||||
|
func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
|
||||||
|
seenKeys := map[string]struct{}{}
|
||||||
|
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||||
|
if valuePtr, ok := exactFields[key]; ok {
|
||||||
|
seenKeys[key] = struct{}{}
|
||||||
|
return valuePtr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for key := range exactFields {
|
||||||
|
if _, ok := seenKeys[key]; !ok {
|
||||||
|
return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
129
vendor/github.com/containers/image/signature/json_test.go
generated
vendored
129
vendor/github.com/containers/image/signature/json_test.go
generated
vendored
|
@ -2,8 +2,6 @@ package signature
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -24,91 +22,6 @@ func x(m mSI, fields ...string) mSI {
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateExactMapKeys(t *testing.T) {
|
|
||||||
// Empty map and keys
|
|
||||||
err := validateExactMapKeys(mSI{})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Success
|
|
||||||
err = validateExactMapKeys(mSI{"a": nil, "b": 1}, "b", "a")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Extra map keys
|
|
||||||
err = validateExactMapKeys(mSI{"a": nil, "b": 1}, "a")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
// Extra expected keys
|
|
||||||
err = validateExactMapKeys(mSI{"a": 1}, "b", "a")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
// Unexpected key values
|
|
||||||
err = validateExactMapKeys(mSI{"a": 1}, "b")
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInt64Field(t *testing.T) {
|
|
||||||
// Field not found
|
|
||||||
_, err := int64Field(mSI{"a": "x"}, "b")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
// Field has a wrong type
|
|
||||||
_, err = int64Field(mSI{"a": "string"}, "a")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
for _, value := range []float64{
|
|
||||||
0.5, // Fractional input
|
|
||||||
math.Inf(1), // Infinity
|
|
||||||
math.NaN(), // NaN
|
|
||||||
} {
|
|
||||||
_, err = int64Field(mSI{"a": value}, "a")
|
|
||||||
assert.Error(t, err, fmt.Sprintf("%f", value))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Success
|
|
||||||
// The float64 type has 53 bits of effective precision, so ±1FFFFFFFFFFFFF is the
|
|
||||||
// range of integer values which can all be represented exactly (beyond that,
|
|
||||||
// some are representable if they are divisible by a high enough power of 2,
|
|
||||||
// but most are not).
|
|
||||||
for _, value := range []int64{0, 1, -1, 0x1FFFFFFFFFFFFF, -0x1FFFFFFFFFFFFF} {
|
|
||||||
testName := fmt.Sprintf("%d", value)
|
|
||||||
v, err := int64Field(mSI{"a": float64(value), "b": nil}, "a")
|
|
||||||
require.NoError(t, err, testName)
|
|
||||||
assert.Equal(t, value, v, testName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMapField(t *testing.T) {
|
|
||||||
// Field not found
|
|
||||||
_, err := mapField(mSI{"a": mSI{}}, "b")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
// Field has a wrong type
|
|
||||||
_, err = mapField(mSI{"a": 1}, "a")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
// Success
|
|
||||||
// FIXME? We can't use mSI as the type of child, that type apparently can't be converted to the raw map type.
|
|
||||||
child := map[string]interface{}{"b": mSI{}}
|
|
||||||
m, err := mapField(mSI{"a": child, "b": nil}, "a")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, child, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStringField(t *testing.T) {
|
|
||||||
// Field not found
|
|
||||||
_, err := stringField(mSI{"a": "x"}, "b")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
// Field has a wrong type
|
|
||||||
_, err = stringField(mSI{"a": 1}, "a")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
// Success
|
|
||||||
s, err := stringField(mSI{"a": "x", "b": nil}, "a")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "x", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// implementsUnmarshalJSON is a minimalistic type used to detect that
|
// implementsUnmarshalJSON is a minimalistic type used to detect that
|
||||||
// paranoidUnmarshalJSONObject uses the json.Unmarshaler interface of resolved
|
// paranoidUnmarshalJSONObject uses the json.Unmarshaler interface of resolved
|
||||||
// pointers.
|
// pointers.
|
||||||
|
@ -180,3 +93,45 @@ func TestParanoidUnmarshalJSONObject(t *testing.T) {
|
||||||
assert.Error(t, err, input)
|
assert.Error(t, err, input)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParanoidUnmarshalJSONObjectExactFields(t *testing.T) {
|
||||||
|
var stringValue string
|
||||||
|
var float64Value float64
|
||||||
|
var rawValue json.RawMessage
|
||||||
|
var unmarshallCalled implementsUnmarshalJSON
|
||||||
|
exactFields := map[string]interface{}{
|
||||||
|
"string": &stringValue,
|
||||||
|
"float64": &float64Value,
|
||||||
|
"raw": &rawValue,
|
||||||
|
"unmarshaller": &unmarshallCalled,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty object
|
||||||
|
err := paranoidUnmarshalJSONObjectExactFields([]byte(`{}`), map[string]interface{}{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Success
|
||||||
|
err = paranoidUnmarshalJSONObjectExactFields([]byte(`{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`), exactFields)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "a", stringValue)
|
||||||
|
assert.Equal(t, 3.5, float64Value)
|
||||||
|
assert.Equal(t, json.RawMessage(`{"a":"b"}`), rawValue)
|
||||||
|
assert.Equal(t, implementsUnmarshalJSON(true), unmarshallCalled)
|
||||||
|
|
||||||
|
// Various kinds of invalid input
|
||||||
|
for _, input := range []string{
|
||||||
|
``, // Empty input
|
||||||
|
`&`, // Entirely invalid JSON
|
||||||
|
`1`, // Not an object
|
||||||
|
`{&}`, // Invalid key JSON
|
||||||
|
`{1:1}`, // Key not a string
|
||||||
|
`{"string": "a", "string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Duplicate key
|
||||||
|
`{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true, "thisisunknown", 1}`, // Unknown key
|
||||||
|
`{"string": &, "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Invalid value JSON
|
||||||
|
`{"string": 1, "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Type mismatch
|
||||||
|
`{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}{}`, // Extra data after object
|
||||||
|
} {
|
||||||
|
err := paranoidUnmarshalJSONObjectExactFields([]byte(input), exactFields)
|
||||||
|
assert.Error(t, err, input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
120
vendor/github.com/containers/image/signature/mechanism.go
generated
vendored
120
vendor/github.com/containers/image/signature/mechanism.go
generated
vendored
|
@ -9,19 +9,20 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/mtrmac/gpgme"
|
|
||||||
"golang.org/x/crypto/openpgp"
|
"golang.org/x/crypto/openpgp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
|
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
|
||||||
|
// Each mechanism should eventually be closed by calling Close().
|
||||||
// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
|
// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
|
||||||
// eliminate ambiguities, support CA signatures and perhaps other key properties)
|
// eliminate ambiguities, support CA signatures and perhaps other key properties)
|
||||||
type SigningMechanism interface {
|
type SigningMechanism interface {
|
||||||
// ImportKeysFromBytes imports public keys from the supplied blob and returns their identities.
|
// Close removes resources associated with the mechanism, if any.
|
||||||
// The blob is assumed to have an appropriate format (the caller is expected to know which one).
|
Close() error
|
||||||
// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism).
|
// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
|
||||||
ImportKeysFromBytes(blob []byte) ([]string, error)
|
SupportsSigning() error
|
||||||
// Sign creates a (non-detached) signature of input using keyidentity
|
// Sign creates a (non-detached) signature of input using keyIdentity.
|
||||||
|
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
|
||||||
Sign(input []byte, keyIdentity string) ([]byte, error)
|
Sign(input []byte, keyIdentity string) ([]byte, error)
|
||||||
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
||||||
Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
|
Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
|
||||||
|
@ -33,109 +34,34 @@ type SigningMechanism interface {
|
||||||
UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
|
UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A GPG/OpenPGP signing mechanism.
|
// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that.
|
||||||
type gpgSigningMechanism struct {
|
type SigningNotSupportedError string
|
||||||
ctx *gpgme.Context
|
|
||||||
|
func (err SigningNotSupportedError) Error() string {
|
||||||
|
return string(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism.
|
// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default
|
||||||
|
// GPG configuration ($GNUPGHOME / ~/.gnupg)
|
||||||
|
// The caller must call .Close() on the returned SigningMechanism.
|
||||||
func NewGPGSigningMechanism() (SigningMechanism, error) {
|
func NewGPGSigningMechanism() (SigningMechanism, error) {
|
||||||
return newGPGSigningMechanismInDirectory("")
|
return newGPGSigningMechanismInDirectory("")
|
||||||
}
|
}
|
||||||
|
|
||||||
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
|
// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
|
||||||
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
|
// recognizes _only_ public keys from the supplied blob, and returns the identities
|
||||||
ctx, err := gpgme.New()
|
// of these keys.
|
||||||
if err != nil {
|
// The caller must call .Close() on the returned SigningMechanism.
|
||||||
return nil, err
|
func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
|
||||||
}
|
return newEphemeralGPGSigningMechanism(blob)
|
||||||
if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if optionalDir != "" {
|
|
||||||
err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ctx.SetArmor(false)
|
|
||||||
ctx.SetTextMode(false)
|
|
||||||
return gpgSigningMechanism{ctx: ctx}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImportKeysFromBytes implements SigningMechanism.ImportKeysFromBytes
|
// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
||||||
func (m gpgSigningMechanism) ImportKeysFromBytes(blob []byte) ([]string, error) {
|
|
||||||
inputData, err := gpgme.NewDataBytes(blob)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res, err := m.ctx.Import(inputData)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
keyIdentities := []string{}
|
|
||||||
for _, i := range res.Imports {
|
|
||||||
if i.Result == nil {
|
|
||||||
keyIdentities = append(keyIdentities, i.Fingerprint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return keyIdentities, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign implements SigningMechanism.Sign
|
|
||||||
func (m gpgSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
|
||||||
key, err := m.ctx.GetKey(keyIdentity, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
inputData, err := gpgme.NewDataBytes(input)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var sigBuffer bytes.Buffer
|
|
||||||
sigData, err := gpgme.NewDataWriter(&sigBuffer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return sigBuffer.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify implements SigningMechanism.Verify
|
|
||||||
func (m gpgSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
|
||||||
signedBuffer := bytes.Buffer{}
|
|
||||||
signedData, err := gpgme.NewDataWriter(&signedBuffer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
_, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
if len(sigs) != 1 {
|
|
||||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
|
|
||||||
}
|
|
||||||
sig := sigs[0]
|
|
||||||
// This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
|
|
||||||
if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
|
|
||||||
// FIXME: Better error reporting eventually
|
|
||||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
|
|
||||||
}
|
|
||||||
return signedBuffer.Bytes(), sig.Fingerprint, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
|
||||||
// along with a short identifier of the key used for signing.
|
// along with a short identifier of the key used for signing.
|
||||||
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
||||||
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
||||||
// the values may have no recognizable relationship if the public key is not available.
|
// the values may have no recognizable relationship if the public key is not available.
|
||||||
func (m gpgSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||||
// This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography.
|
// This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography.
|
||||||
md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil)
|
md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
175
vendor/github.com/containers/image/signature/mechanism_gpgme.go
generated
vendored
Normal file
175
vendor/github.com/containers/image/signature/mechanism_gpgme.go
generated
vendored
Normal file
|
@ -0,0 +1,175 @@
|
||||||
|
// +build !containers_image_openpgp
|
||||||
|
|
||||||
|
package signature
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/mtrmac/gpgme"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A GPG/OpenPGP signing mechanism, implemented using gpgme.
|
||||||
|
type gpgmeSigningMechanism struct {
|
||||||
|
ctx *gpgme.Context
|
||||||
|
ephemeralDir string // If not "", a directory to be removed on Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
|
||||||
|
// The caller must call .Close() on the returned SigningMechanism.
|
||||||
|
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
|
||||||
|
ctx, err := newGPGMEContext(optionalDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &gpgmeSigningMechanism{
|
||||||
|
ctx: ctx,
|
||||||
|
ephemeralDir: "",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
|
||||||
|
// recognizes _only_ public keys from the supplied blob, and returns the identities
|
||||||
|
// of these keys.
|
||||||
|
// The caller must call .Close() on the returned SigningMechanism.
|
||||||
|
func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
|
||||||
|
dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
removeDir := true
|
||||||
|
defer func() {
|
||||||
|
if removeDir {
|
||||||
|
os.RemoveAll(dir)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
ctx, err := newGPGMEContext(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
mech := &gpgmeSigningMechanism{
|
||||||
|
ctx: ctx,
|
||||||
|
ephemeralDir: dir,
|
||||||
|
}
|
||||||
|
keyIdentities, err := mech.importKeysFromBytes(blob)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
removeDir = false
|
||||||
|
return mech, keyIdentities, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty.
|
||||||
|
func newGPGMEContext(optionalDir string) (*gpgme.Context, error) {
|
||||||
|
ctx, err := gpgme.New()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if optionalDir != "" {
|
||||||
|
err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx.SetArmor(false)
|
||||||
|
ctx.SetTextMode(false)
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gpgmeSigningMechanism) Close() error {
|
||||||
|
if m.ephemeralDir != "" {
|
||||||
|
os.RemoveAll(m.ephemeralDir) // Ignore an error, if any
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// importKeysFromBytes imports public keys from the supplied blob and returns their identities.
|
||||||
|
// The blob is assumed to have an appropriate format (the caller is expected to know which one).
|
||||||
|
// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism);
|
||||||
|
// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism.
|
||||||
|
func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) {
|
||||||
|
inputData, err := gpgme.NewDataBytes(blob)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res, err := m.ctx.Import(inputData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
keyIdentities := []string{}
|
||||||
|
for _, i := range res.Imports {
|
||||||
|
if i.Result == nil {
|
||||||
|
keyIdentities = append(keyIdentities, i.Fingerprint)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return keyIdentities, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
|
||||||
|
func (m *gpgmeSigningMechanism) SupportsSigning() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign creates a (non-detached) signature of input using keyIdentity.
|
||||||
|
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
|
||||||
|
func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
||||||
|
key, err := m.ctx.GetKey(keyIdentity, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
inputData, err := gpgme.NewDataBytes(input)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var sigBuffer bytes.Buffer
|
||||||
|
sigData, err := gpgme.NewDataWriter(&sigBuffer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return sigBuffer.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
||||||
|
func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
||||||
|
signedBuffer := bytes.Buffer{}
|
||||||
|
signedData, err := gpgme.NewDataWriter(&signedBuffer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
_, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
if len(sigs) != 1 {
|
||||||
|
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
|
||||||
|
}
|
||||||
|
sig := sigs[0]
|
||||||
|
// This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
|
||||||
|
if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
|
||||||
|
// FIXME: Better error reporting eventually
|
||||||
|
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
|
||||||
|
}
|
||||||
|
return signedBuffer.Bytes(), sig.Fingerprint, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
||||||
|
// along with a short identifier of the key used for signing.
|
||||||
|
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
||||||
|
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
||||||
|
// the values may have no recognizable relationship if the public key is not available.
|
||||||
|
func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||||
|
return gpgUntrustedSignatureContents(untrustedSignature)
|
||||||
|
}
|
37
vendor/github.com/containers/image/signature/mechanism_gpgme_test.go
generated
vendored
Normal file
37
vendor/github.com/containers/image/signature/mechanism_gpgme_test.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
// +build !containers_image_openpgp
|
||||||
|
|
||||||
|
package signature
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGPGMESigningMechanismClose(t *testing.T) {
|
||||||
|
// Closing an ephemeral mechanism removes the directory.
|
||||||
|
// (The non-ephemeral case is tested in the common TestGPGSigningMechanismClose)
|
||||||
|
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
gpgMech, ok := mech.(*gpgmeSigningMechanism)
|
||||||
|
require.True(t, ok)
|
||||||
|
dir := gpgMech.ephemeralDir
|
||||||
|
assert.NotEmpty(t, dir)
|
||||||
|
_, err = os.Lstat(dir)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = mech.Close()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = os.Lstat(dir)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.True(t, os.IsNotExist(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGPGMESigningMechanismSupportsSigning(t *testing.T) {
|
||||||
|
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
err = mech.SupportsSigning()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
153
vendor/github.com/containers/image/signature/mechanism_openpgp.go
generated
vendored
Normal file
153
vendor/github.com/containers/image/signature/mechanism_openpgp.go
generated
vendored
Normal file
|
@ -0,0 +1,153 @@
|
||||||
|
// +build containers_image_openpgp
|
||||||
|
|
||||||
|
package signature
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/homedir"
|
||||||
|
"golang.org/x/crypto/openpgp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp.
|
||||||
|
type openpgpSigningMechanism struct {
|
||||||
|
keyring openpgp.EntityList
|
||||||
|
}
|
||||||
|
|
||||||
|
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
|
||||||
|
// The caller must call .Close() on the returned SigningMechanism.
|
||||||
|
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
|
||||||
|
m := &openpgpSigningMechanism{
|
||||||
|
keyring: openpgp.EntityList{},
|
||||||
|
}
|
||||||
|
|
||||||
|
gpgHome := optionalDir
|
||||||
|
if gpgHome == "" {
|
||||||
|
gpgHome = os.Getenv("GNUPGHOME")
|
||||||
|
if gpgHome == "" {
|
||||||
|
gpgHome = path.Join(homedir.Get(), ".gnupg")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg"))
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, err := m.importKeysFromBytes(pubring)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
|
||||||
|
// recognizes _only_ public keys from the supplied blob, and returns the identities
|
||||||
|
// of these keys.
|
||||||
|
// The caller must call .Close() on the returned SigningMechanism.
|
||||||
|
func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
|
||||||
|
m := &openpgpSigningMechanism{
|
||||||
|
keyring: openpgp.EntityList{},
|
||||||
|
}
|
||||||
|
keyIdentities, err := m.importKeysFromBytes(blob)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return m, keyIdentities, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *openpgpSigningMechanism) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// importKeysFromBytes imports public keys from the supplied blob and returns their identities.
|
||||||
|
// The blob is assumed to have an appropriate format (the caller is expected to know which one).
|
||||||
|
func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) {
|
||||||
|
keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob))
|
||||||
|
if err != nil {
|
||||||
|
k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob))
|
||||||
|
if e2 != nil {
|
||||||
|
return nil, err // The original error -- FIXME: is this better?
|
||||||
|
}
|
||||||
|
keyring = k
|
||||||
|
}
|
||||||
|
|
||||||
|
keyIdentities := []string{}
|
||||||
|
for _, entity := range keyring {
|
||||||
|
if entity.PrimaryKey == nil {
|
||||||
|
// Coverage: This should never happen, openpgp.ReadEntity fails with a
|
||||||
|
// openpgp.errors.StructuralError instead of returning an entity with this
|
||||||
|
// field set to nil.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Uppercase the fingerprint to be compatible with gpgme
|
||||||
|
keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint)))
|
||||||
|
m.keyring = append(m.keyring, entity)
|
||||||
|
}
|
||||||
|
return keyIdentities, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
|
||||||
|
func (m *openpgpSigningMechanism) SupportsSigning() error {
|
||||||
|
return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign creates a (non-detached) signature of input using keyIdentity.
|
||||||
|
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
|
||||||
|
func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
||||||
|
return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
||||||
|
func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
||||||
|
md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
if !md.IsSigned {
|
||||||
|
return nil, "", errors.New("not signed")
|
||||||
|
}
|
||||||
|
content, err := ioutil.ReadAll(md.UnverifiedBody)
|
||||||
|
if err != nil {
|
||||||
|
// Coverage: md.UnverifiedBody.Read only fails if the body is encrypted
|
||||||
|
// (and possibly also signed, but it _must_ be encrypted) and the signing
|
||||||
|
// “modification detection code” detects a mismatch. But in that case,
|
||||||
|
// we would expect the signature verification to fail as well, and that is checked
|
||||||
|
// first. Besides, we are not supplying any decryption keys, so we really
|
||||||
|
// can never reach this “encrypted data MDC mismatch” path.
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
if md.SignatureError != nil {
|
||||||
|
return nil, "", fmt.Errorf("signature error: %v", md.SignatureError)
|
||||||
|
}
|
||||||
|
if md.SignedBy == nil {
|
||||||
|
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)}
|
||||||
|
}
|
||||||
|
if md.Signature.SigLifetimeSecs != nil {
|
||||||
|
expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second)
|
||||||
|
if time.Now().After(expiry) {
|
||||||
|
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uppercase the fingerprint to be compatible with gpgme
|
||||||
|
return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
||||||
|
// along with a short identifier of the key used for signing.
|
||||||
|
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
||||||
|
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
||||||
|
// the values may have no recognizable relationship if the public key is not available.
|
||||||
|
func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||||
|
return gpgUntrustedSignatureContents(untrustedSignature)
|
||||||
|
}
|
28
vendor/github.com/containers/image/signature/mechanism_openpgp_test.go
generated
vendored
Normal file
28
vendor/github.com/containers/image/signature/mechanism_openpgp_test.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
// +build containers_image_openpgp
|
||||||
|
|
||||||
|
package signature
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOpenpgpSigningMechanismSupportsSigning(t *testing.T) {
|
||||||
|
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
err = mech.SupportsSigning()
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.IsType(t, SigningNotSupportedError(""), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOpenpgpSigningMechanismSign(t *testing.T) {
|
||||||
|
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
_, err = mech.Sign([]byte{}, TestKeyFingerprint)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.IsType(t, SigningNotSupportedError(""), err)
|
||||||
|
}
|
128
vendor/github.com/containers/image/signature/mechanism_test.go
generated
vendored
128
vendor/github.com/containers/image/signature/mechanism_test.go
generated
vendored
|
@ -1,9 +1,12 @@
|
||||||
package signature
|
package signature
|
||||||
|
|
||||||
|
// These tests are expected to pass unmodified for _both_ mechanism_gpgme.go and mechanism_openpgp.go.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -14,27 +17,88 @@ const (
|
||||||
testGPGHomeDirectory = "./fixtures"
|
testGPGHomeDirectory = "./fixtures"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestSigningNotSupportedError(t *testing.T) {
|
||||||
|
// A stupid test just to keep code coverage
|
||||||
|
s := "test"
|
||||||
|
err := SigningNotSupportedError(s)
|
||||||
|
assert.Equal(t, s, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
func TestNewGPGSigningMechanism(t *testing.T) {
|
func TestNewGPGSigningMechanism(t *testing.T) {
|
||||||
// A dumb test just for code coverage. We test more with newGPGSigningMechanismInDirectory().
|
// A dumb test just for code coverage. We test more with newGPGSigningMechanismInDirectory().
|
||||||
_, err := NewGPGSigningMechanism()
|
mech, err := NewGPGSigningMechanism()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
mech.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewGPGSigningMechanismInDirectory(t *testing.T) {
|
func TestNewGPGSigningMechanismInDirectory(t *testing.T) {
|
||||||
// A dumb test just for code coverage.
|
// A dumb test just for code coverage.
|
||||||
_, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
mech.Close()
|
||||||
// The various GPG failure cases are not obviously easy to reach.
|
// The various GPG failure cases are not obviously easy to reach.
|
||||||
|
|
||||||
|
// Test that using the default directory (presumably in user’s home)
|
||||||
|
// cannot use TestKeyFingerprint.
|
||||||
|
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
|
||||||
|
require.NoError(t, err)
|
||||||
|
mech, err = newGPGSigningMechanismInDirectory("")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
_, _, err = mech.Verify(signature)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Similarly, using a newly created empty directory makes TestKeyFingerprint
|
||||||
|
// unavailable
|
||||||
|
emptyDir, err := ioutil.TempDir("", "signing-empty-directory")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer os.RemoveAll(emptyDir)
|
||||||
|
mech, err = newGPGSigningMechanismInDirectory(emptyDir)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
_, _, err = mech.Verify(signature)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// If pubring.gpg is unreadable in the directory, either initializing
|
||||||
|
// the mechanism fails (with openpgp), or it succeeds (sadly, gpgme) and
|
||||||
|
// later verification fails.
|
||||||
|
unreadableDir, err := ioutil.TempDir("", "signing-unreadable-directory")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer os.RemoveAll(unreadableDir)
|
||||||
|
f, err := os.OpenFile(filepath.Join(unreadableDir, "pubring.gpg"), os.O_RDONLY|os.O_CREATE, 0000)
|
||||||
|
require.NoError(t, err)
|
||||||
|
f.Close()
|
||||||
|
mech, err = newGPGSigningMechanismInDirectory(unreadableDir)
|
||||||
|
if err == nil {
|
||||||
|
defer mech.Close()
|
||||||
|
_, _, err = mech.Verify(signature)
|
||||||
|
}
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Setting the directory parameter to testGPGHomeDirectory makes the key available.
|
||||||
|
mech, err = newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
_, _, err = mech.Verify(signature)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// If we use the default directory mechanism, GNUPGHOME is respected.
|
||||||
|
origGNUPGHOME := os.Getenv("GNUPGHOME")
|
||||||
|
defer os.Setenv("GNUPGHOME", origGNUPGHOME)
|
||||||
|
os.Setenv("GNUPGHOME", testGPGHomeDirectory)
|
||||||
|
mech, err = newGPGSigningMechanismInDirectory("")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
_, _, err = mech.Verify(signature)
|
||||||
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGPGSigningMechanismImportKeysFromBytes(t *testing.T) {
|
func TestNewEphemeralGPGSigningMechanism(t *testing.T) {
|
||||||
testDir, err := ioutil.TempDir("", "gpg-import-keys")
|
// Empty input: This is accepted anyway by GPG, just returns no keys.
|
||||||
|
mech, keyIdentities, err := NewEphemeralGPGSigningMechanism([]byte{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer os.RemoveAll(testDir)
|
defer mech.Close()
|
||||||
|
assert.Empty(t, keyIdentities)
|
||||||
mech, err := newGPGSigningMechanismInDirectory(testDir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Try validating a signature when the key is unknown.
|
// Try validating a signature when the key is unknown.
|
||||||
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
|
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -44,31 +108,57 @@ func TestGPGSigningMechanismImportKeysFromBytes(t *testing.T) {
|
||||||
// Successful import
|
// Successful import
|
||||||
keyBlob, err := ioutil.ReadFile("./fixtures/public-key.gpg")
|
keyBlob, err := ioutil.ReadFile("./fixtures/public-key.gpg")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
keyIdentities, err := mech.ImportKeysFromBytes(keyBlob)
|
mech, keyIdentities, err = NewEphemeralGPGSigningMechanism(keyBlob)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
assert.Equal(t, []string{TestKeyFingerprint}, keyIdentities)
|
assert.Equal(t, []string{TestKeyFingerprint}, keyIdentities)
|
||||||
|
|
||||||
// After import, the signature should validate.
|
// After import, the signature should validate.
|
||||||
content, signingFingerprint, err = mech.Verify(signature)
|
content, signingFingerprint, err = mech.Verify(signature)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, []byte("This is not JSON\n"), content)
|
assert.Equal(t, []byte("This is not JSON\n"), content)
|
||||||
assert.Equal(t, TestKeyFingerprint, signingFingerprint)
|
assert.Equal(t, TestKeyFingerprint, signingFingerprint)
|
||||||
|
|
||||||
// Two keys: just concatenate the valid input twice.
|
// Two keys: Read the binary-format pubring.gpg, and concatenate it twice.
|
||||||
keyIdentities, err = mech.ImportKeysFromBytes(bytes.Join([][]byte{keyBlob, keyBlob}, nil))
|
// (Using two copies of public-key.gpg, in the ASCII-armored format, works with
|
||||||
|
// gpgmeSigningMechanism but not openpgpSigningMechanism.)
|
||||||
|
keyBlob, err = ioutil.ReadFile("./fixtures/pubring.gpg")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
mech, keyIdentities, err = NewEphemeralGPGSigningMechanism(bytes.Join([][]byte{keyBlob, keyBlob}, nil))
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
assert.Equal(t, []string{TestKeyFingerprint, TestKeyFingerprint}, keyIdentities)
|
assert.Equal(t, []string{TestKeyFingerprint, TestKeyFingerprint}, keyIdentities)
|
||||||
|
|
||||||
// Invalid input: This is accepted anyway by GPG, just returns no keys.
|
// Invalid input: This is, sadly, accepted anyway by GPG, just returns no keys.
|
||||||
keyIdentities, err = mech.ImportKeysFromBytes([]byte("This is invalid"))
|
// For openpgpSigningMechanism we can detect this and fail.
|
||||||
require.NoError(t, err)
|
mech, keyIdentities, err = NewEphemeralGPGSigningMechanism([]byte("This is invalid"))
|
||||||
assert.Equal(t, []string{}, keyIdentities)
|
assert.True(t, err != nil || len(keyIdentities) == 0)
|
||||||
|
if err == nil {
|
||||||
|
mech.Close()
|
||||||
|
}
|
||||||
|
assert.Empty(t, keyIdentities)
|
||||||
// The various GPG/GPGME failures cases are not obviously easy to reach.
|
// The various GPG/GPGME failures cases are not obviously easy to reach.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGPGSigningMechanismClose(t *testing.T) {
|
||||||
|
// Closing a non-ephemeral mechanism does not remove anything in the directory.
|
||||||
|
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = mech.Close()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = os.Lstat(testGPGHomeDirectory)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, err = os.Lstat(filepath.Join(testGPGHomeDirectory, "pubring.gpg"))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestGPGSigningMechanismSign(t *testing.T) {
|
func TestGPGSigningMechanismSign(t *testing.T) {
|
||||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
|
||||||
|
if err := mech.SupportsSigning(); err != nil {
|
||||||
|
t.Skipf("Signing not supported: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Successful signing
|
// Successful signing
|
||||||
content := []byte("content")
|
content := []byte("content")
|
||||||
|
@ -95,6 +185,7 @@ func assertSigningError(t *testing.T, content []byte, fingerprint string, err er
|
||||||
func TestGPGSigningMechanismVerify(t *testing.T) {
|
func TestGPGSigningMechanismVerify(t *testing.T) {
|
||||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
|
||||||
// Successful verification
|
// Successful verification
|
||||||
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
|
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
|
||||||
|
@ -149,8 +240,9 @@ func TestGPGSigningMechanismVerify(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGPGSigningMechanismUntrustedSignatureContents(t *testing.T) {
|
func TestGPGSigningMechanismUntrustedSignatureContents(t *testing.T) {
|
||||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
|
||||||
// A valid signature
|
// A valid signature
|
||||||
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
|
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
|
||||||
|
|
84
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
84
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
|
@ -255,13 +255,8 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
|
||||||
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
|
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
|
||||||
*pr = prInsecureAcceptAnything{}
|
*pr = prInsecureAcceptAnything{}
|
||||||
var tmp prInsecureAcceptAnything
|
var tmp prInsecureAcceptAnything
|
||||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||||
switch key {
|
"type": &tmp.Type,
|
||||||
case "type":
|
|
||||||
return &tmp.Type
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -290,13 +285,8 @@ var _ json.Unmarshaler = (*prReject)(nil)
|
||||||
func (pr *prReject) UnmarshalJSON(data []byte) error {
|
func (pr *prReject) UnmarshalJSON(data []byte) error {
|
||||||
*pr = prReject{}
|
*pr = prReject{}
|
||||||
var tmp prReject
|
var tmp prReject
|
||||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||||
switch key {
|
"type": &tmp.Type,
|
||||||
case "type":
|
|
||||||
return &tmp.Type
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -465,15 +455,9 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
|
||||||
*pr = prSignedBaseLayer{}
|
*pr = prSignedBaseLayer{}
|
||||||
var tmp prSignedBaseLayer
|
var tmp prSignedBaseLayer
|
||||||
var baseLayerIdentity json.RawMessage
|
var baseLayerIdentity json.RawMessage
|
||||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||||
switch key {
|
"type": &tmp.Type,
|
||||||
case "type":
|
"baseLayerIdentity": &baseLayerIdentity,
|
||||||
return &tmp.Type
|
|
||||||
case "baseLayerIdentity":
|
|
||||||
return &baseLayerIdentity
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -481,9 +465,6 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
|
||||||
if tmp.Type != prTypeSignedBaseLayer {
|
if tmp.Type != prTypeSignedBaseLayer {
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||||
}
|
}
|
||||||
if baseLayerIdentity == nil {
|
|
||||||
return InvalidPolicyFormatError(fmt.Sprintf("baseLayerIdentity not specified"))
|
|
||||||
}
|
|
||||||
bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
|
bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -541,13 +522,8 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil)
|
||||||
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
|
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
|
||||||
*prm = prmMatchExact{}
|
*prm = prmMatchExact{}
|
||||||
var tmp prmMatchExact
|
var tmp prmMatchExact
|
||||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||||
switch key {
|
"type": &tmp.Type,
|
||||||
case "type":
|
|
||||||
return &tmp.Type
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -576,13 +552,8 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
|
||||||
func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
|
func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
|
||||||
*prm = prmMatchRepoDigestOrExact{}
|
*prm = prmMatchRepoDigestOrExact{}
|
||||||
var tmp prmMatchRepoDigestOrExact
|
var tmp prmMatchRepoDigestOrExact
|
||||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||||
switch key {
|
"type": &tmp.Type,
|
||||||
case "type":
|
|
||||||
return &tmp.Type
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -611,13 +582,8 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil)
|
||||||
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
||||||
*prm = prmMatchRepository{}
|
*prm = prmMatchRepository{}
|
||||||
var tmp prmMatchRepository
|
var tmp prmMatchRepository
|
||||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||||
switch key {
|
"type": &tmp.Type,
|
||||||
case "type":
|
|
||||||
return &tmp.Type
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -656,15 +622,9 @@ var _ json.Unmarshaler = (*prmExactReference)(nil)
|
||||||
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
||||||
*prm = prmExactReference{}
|
*prm = prmExactReference{}
|
||||||
var tmp prmExactReference
|
var tmp prmExactReference
|
||||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||||
switch key {
|
"type": &tmp.Type,
|
||||||
case "type":
|
"dockerReference": &tmp.DockerReference,
|
||||||
return &tmp.Type
|
|
||||||
case "dockerReference":
|
|
||||||
return &tmp.DockerReference
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -704,15 +664,9 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil)
|
||||||
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
|
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
|
||||||
*prm = prmExactRepository{}
|
*prm = prmExactRepository{}
|
||||||
var tmp prmExactRepository
|
var tmp prmExactRepository
|
||||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||||
switch key {
|
"type": &tmp.Type,
|
||||||
case "type":
|
"dockerRepository": &tmp.DockerRepository,
|
||||||
return &tmp.Type
|
|
||||||
case "dockerRepository":
|
|
||||||
return &tmp.DockerRepository
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/containers/image/signature/policy_config_test.go
generated
vendored
4
vendor/github.com/containers/image/signature/policy_config_test.go
generated
vendored
|
@ -297,8 +297,8 @@ func TestPolicyUnmarshalJSON(t *testing.T) {
|
||||||
|
|
||||||
// Various allowed modifications to the policy
|
// Various allowed modifications to the policy
|
||||||
allowedModificationFns := []func(mSI){
|
allowedModificationFns := []func(mSI){
|
||||||
// Delete the map of specific policies
|
// Delete the map of transport-specific scopes
|
||||||
func(v mSI) { delete(v, "specific") },
|
func(v mSI) { delete(v, "transports") },
|
||||||
// Use an empty map of transport-specific scopes
|
// Use an empty map of transport-specific scopes
|
||||||
func(v mSI) { v["transports"] = map[string]PolicyTransportScopes{} },
|
func(v mSI) { v["transports"] = map[string]PolicyTransportScopes{} },
|
||||||
}
|
}
|
||||||
|
|
14
vendor/github.com/containers/image/signature/policy_eval_signedby.go
generated
vendored
14
vendor/github.com/containers/image/signature/policy_eval_signedby.go
generated
vendored
|
@ -5,7 +5,6 @@ package signature
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -42,20 +41,11 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig [
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: move this to per-context initialization
|
// FIXME: move this to per-context initialization
|
||||||
dir, err := ioutil.TempDir("", "skopeo-signedBy-")
|
mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data)
|
||||||
if err != nil {
|
|
||||||
return sarRejected, nil, err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
mech, err := newGPGSigningMechanismInDirectory(dir)
|
|
||||||
if err != nil {
|
|
||||||
return sarRejected, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
trustedIdentities, err := mech.ImportKeysFromBytes(data)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return sarRejected, nil, err
|
return sarRejected, nil, err
|
||||||
}
|
}
|
||||||
|
defer mech.Close()
|
||||||
if len(trustedIdentities) == 0 {
|
if len(trustedIdentities) == 0 {
|
||||||
return sarRejected, nil, PolicyRequirementError("No public keys imported")
|
return sarRejected, nil, PolicyRequirementError("No public keys imported")
|
||||||
}
|
}
|
||||||
|
|
96
vendor/github.com/containers/image/signature/signature.go
generated
vendored
96
vendor/github.com/containers/image/signature/signature.go
generated
vendored
|
@ -120,78 +120,69 @@ func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
|
||||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
|
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
|
||||||
// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
|
// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
|
||||||
func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
||||||
var untyped interface{}
|
var critical, optional json.RawMessage
|
||||||
if err := json.Unmarshal(data, &untyped); err != nil {
|
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||||
return err
|
"critical": &critical,
|
||||||
}
|
"optional": &optional,
|
||||||
o, ok := untyped.(map[string]interface{})
|
}); err != nil {
|
||||||
if !ok {
|
|
||||||
return InvalidSignatureError{msg: "Invalid signature format"}
|
|
||||||
}
|
|
||||||
if err := validateExactMapKeys(o, "critical", "optional"); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := mapField(o, "critical")
|
var creatorID string
|
||||||
if err != nil {
|
var timestamp float64
|
||||||
return err
|
var gotCreatorID, gotTimestamp = false, false
|
||||||
}
|
if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} {
|
||||||
if err := validateExactMapKeys(c, "type", "image", "identity"); err != nil {
|
switch key {
|
||||||
return err
|
case "creator":
|
||||||
}
|
gotCreatorID = true
|
||||||
|
return &creatorID
|
||||||
optional, err := mapField(o, "optional")
|
case "timestamp":
|
||||||
if err != nil {
|
gotTimestamp = true
|
||||||
return err
|
return ×tamp
|
||||||
}
|
default:
|
||||||
if _, ok := optional["creator"]; ok {
|
var ignore interface{}
|
||||||
creatorID, err := stringField(optional, "creator")
|
return &ignore
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if gotCreatorID {
|
||||||
s.UntrustedCreatorID = &creatorID
|
s.UntrustedCreatorID = &creatorID
|
||||||
}
|
}
|
||||||
if _, ok := optional["timestamp"]; ok {
|
if gotTimestamp {
|
||||||
timestamp, err := int64Field(optional, "timestamp")
|
intTimestamp := int64(timestamp)
|
||||||
if err != nil {
|
if float64(intTimestamp) != timestamp {
|
||||||
return err
|
return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"}
|
||||||
}
|
}
|
||||||
s.UntrustedTimestamp = ×tamp
|
s.UntrustedTimestamp = &intTimestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
t, err := stringField(c, "type")
|
var t string
|
||||||
if err != nil {
|
var image, identity json.RawMessage
|
||||||
|
if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
|
||||||
|
"type": &t,
|
||||||
|
"image": &image,
|
||||||
|
"identity": &identity,
|
||||||
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if t != signatureType {
|
if t != signatureType {
|
||||||
return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
|
return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := mapField(c, "image")
|
var digestString string
|
||||||
if err != nil {
|
if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
|
||||||
return err
|
"docker-manifest-digest": &digestString,
|
||||||
}
|
}); err != nil {
|
||||||
if err := validateExactMapKeys(image, "docker-manifest-digest"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
digestString, err := stringField(image, "docker-manifest-digest")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
|
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
|
||||||
|
|
||||||
identity, err := mapField(c, "identity")
|
if err := paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
|
||||||
if err != nil {
|
"docker-reference": &s.UntrustedDockerReference,
|
||||||
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := validateExactMapKeys(identity, "docker-reference"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
reference, err := stringField(identity, "docker-reference")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.UntrustedDockerReference = reference
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -261,10 +252,11 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte
|
||||||
// (including things like “✅ Verified by $authority”)
|
// (including things like “✅ Verified by $authority”)
|
||||||
func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
|
func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
|
||||||
// NOTE: This should eventualy do format autodetection.
|
// NOTE: This should eventualy do format autodetection.
|
||||||
mech, err := NewGPGSigningMechanism()
|
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer mech.Close()
|
||||||
|
|
||||||
untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes)
|
untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
7
vendor/github.com/containers/image/signature/signature_test.go
generated
vendored
7
vendor/github.com/containers/image/signature/signature_test.go
generated
vendored
|
@ -153,6 +153,7 @@ func TestUnmarshalJSON(t *testing.T) {
|
||||||
func(v mSI) { x(v, "optional")["creator"] = 1 },
|
func(v mSI) { x(v, "optional")["creator"] = 1 },
|
||||||
// Invalid "timestamp"
|
// Invalid "timestamp"
|
||||||
func(v mSI) { x(v, "optional")["timestamp"] = "unexpected" },
|
func(v mSI) { x(v, "optional")["timestamp"] = "unexpected" },
|
||||||
|
func(v mSI) { x(v, "optional")["timestamp"] = 0.5 }, // Fractional input
|
||||||
}
|
}
|
||||||
for _, fn := range breakFns {
|
for _, fn := range breakFns {
|
||||||
err = tryUnmarshalModifiedSignature(t, &s, validJSON, fn)
|
err = tryUnmarshalModifiedSignature(t, &s, validJSON, fn)
|
||||||
|
@ -188,6 +189,11 @@ func TestUnmarshalJSON(t *testing.T) {
|
||||||
func TestSign(t *testing.T) {
|
func TestSign(t *testing.T) {
|
||||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
|
||||||
|
if err := mech.SupportsSigning(); err != nil {
|
||||||
|
t.Skipf("Signing not supported: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
sig := newUntrustedSignature("digest!@#", "reference#@!")
|
sig := newUntrustedSignature("digest!@#", "reference#@!")
|
||||||
|
|
||||||
|
@ -232,6 +238,7 @@ func TestSign(t *testing.T) {
|
||||||
func TestVerifyAndExtractSignature(t *testing.T) {
|
func TestVerifyAndExtractSignature(t *testing.T) {
|
||||||
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer mech.Close()
|
||||||
|
|
||||||
type triple struct {
|
type triple struct {
|
||||||
keyIdentity string
|
keyIdentity string
|
||||||
|
|
6
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
6
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
|
@ -290,6 +290,10 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
|
||||||
return s.putBlob(stream, blobinfo, true)
|
return s.putBlob(stream, blobinfo, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||||
|
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||||
|
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||||
|
// it returns a non-nil error only on an unexpected failure.
|
||||||
func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
|
func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
|
||||||
if blobinfo.Digest == "" {
|
if blobinfo.Digest == "" {
|
||||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||||
|
@ -299,7 +303,7 @@ func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64,
|
||||||
return true, blob.Size, nil
|
return true, blob.Size, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false, -1, types.ErrBlobNotFound
|
return false, -1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) {
|
func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) {
|
||||||
|
|
16
vendor/github.com/containers/image/types/types.go
generated
vendored
16
vendor/github.com/containers/image/types/types.go
generated
vendored
|
@ -6,7 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ImageTransport is a top-level namespace for ways to to store/load an image.
|
// ImageTransport is a top-level namespace for ways to to store/load an image.
|
||||||
|
@ -160,7 +160,10 @@ type ImageDestination interface {
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error)
|
PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error)
|
||||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. A false result will often be accompanied by an ErrBlobNotFound error.
|
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||||
|
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||||
|
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||||
|
// it returns a non-nil error only on an unexpected failure.
|
||||||
HasBlob(info BlobInfo) (bool, int64, error)
|
HasBlob(info BlobInfo) (bool, int64, error)
|
||||||
// ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
|
// ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
|
||||||
ReapplyBlob(info BlobInfo) (BlobInfo, error)
|
ReapplyBlob(info BlobInfo) (BlobInfo, error)
|
||||||
|
@ -202,6 +205,10 @@ type Image interface {
|
||||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||||
// The result is cached; it is OK to call this however often you need.
|
// The result is cached; it is OK to call this however often you need.
|
||||||
ConfigBlob() ([]byte, error)
|
ConfigBlob() ([]byte, error)
|
||||||
|
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||||
|
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||||
|
// old image manifests work (docker v2s1 especially).
|
||||||
|
OCIConfig() (*v1.Image, error)
|
||||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||||
|
@ -300,8 +307,3 @@ type ProgressProperties struct {
|
||||||
Artifact BlobInfo
|
Artifact BlobInfo
|
||||||
Offset uint64
|
Offset uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrBlobNotFound can be returned by an ImageDestination's HasBlob() method
|
|
||||||
ErrBlobNotFound = errors.New("no such blob present")
|
|
||||||
)
|
|
||||||
|
|
Loading…
Reference in a new issue