vendor: remove dep and use vndr

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-06-06 09:19:04 +02:00
parent 16f44674a4
commit 148e72d81e
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
16131 changed files with 73815 additions and 4235138 deletions

View file

@ -1,2 +0,0 @@
vendor
tools.timestamp

View file

@ -1,9 +0,0 @@
approve_by_comment: true
approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
reject_regex: ^Rejected
reset_on_push: false
reviewers:
teams:
- image-maintainers
name: default
required: 2

View file

@ -1,20 +0,0 @@
---
language: go
sudo: required
notifications:
email: false
go:
- 1.7
env:
- BUILDTAGS='btrfs_noversion libdm_no_deferred_remove'
- BUILDTAGS='btrfs_noversion libdm_no_deferred_remove containers_image_openpgp'
script: make tools .gitvalidation validate test test-skopeo BUILDTAGS="$BUILDTAGS"
dist: trusty
os:
- linux
addons:
apt:
packages:
- btrfs-tools
- libdevmapper-dev
- libgpgme11-dev

View file

@ -1,3 +0,0 @@
Antonio Murdaca <runcom@redhat.com> (@runcom)
Brandon Philips <brandon.philips@coreos.com> (@philips)
Miloslav Trmac <mitr@redhat.com> (@mtrmac)

View file

@ -1,72 +0,0 @@
.PHONY: all tools test validate lint
# Which github repostiory and branch to use for testing with skopeo
SKOPEO_REPO = projectatomic/skopeo
SKOPEO_BRANCH = master
# Set SUDO=sudo to run container integration tests using sudo.
SUDO =
BUILDTAGS = btrfs_noversion libdm_no_deferred_remove
BUILDFLAGS := -tags "$(BUILDTAGS)"
PACKAGES := $(shell go list ./... | grep -v github.com/containers/image/vendor)
all: tools .gitvalidation test validate
tools: tools.timestamp
tools.timestamp: Makefile
@go get -u $(BUILDFLAGS) github.com/golang/lint/golint
@go get $(BUILDFLAGS) github.com/vbatts/git-validation
@go get -u github.com/rancher/trash
@touch tools.timestamp
vendor: tools.timestamp vendor.conf
@trash
@touch vendor
clean:
rm -rf vendor tools.timestamp
test: vendor
@go test $(BUILDFLAGS) -cover $(PACKAGES)
# This is not run as part of (make all), but Travis CI does run this.
# Demonstarting a working version of skopeo (possibly with modified SKOPEO_REPO/SKOPEO_BRANCH, e.g.
# make test-skopeo SKOPEO_REPO=runcom/skopeo-1 SKOPEO_BRANCH=oci-3 SUDO=sudo
# ) is a requirement before merging; note that Travis will only test
# the master branch of the upstream repo.
test-skopeo:
@echo === Testing skopeo build
@export GOPATH=$$(mktemp -d) && \
skopeo_path=$${GOPATH}/src/github.com/projectatomic/skopeo && \
vendor_path=$${skopeo_path}/vendor/github.com/containers/image && \
git clone -b $(SKOPEO_BRANCH) https://github.com/$(SKOPEO_REPO) $${skopeo_path} && \
rm -rf $${vendor_path} && cp -r . $${vendor_path} && rm -rf $${vendor_path}/vendor && \
cd $${skopeo_path} && \
make BUILDTAGS="$(BUILDTAGS)" binary-local test-all-local && \
$(SUDO) make BUILDTAGS="$(BUILDTAGS)" check && \
rm -rf $${skopeo_path}
validate: lint
@go vet $(PACKAGES)
@test -z "$$(gofmt -s -l . | grep -ve '^vendor' | tee /dev/stderr)"
lint:
@out="$$(golint $(PACKAGES))"; \
if [ -n "$$out" ]; then \
echo "$$out"; \
exit 1; \
fi
.PHONY: .gitvalidation
EPOCH_TEST_COMMIT ?= e68e0e1110e64f906f9b482e548f17d73e02e6b1
# When this is running in travis, it will only check the travis commit range
.gitvalidation:
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found. Consider 'make clean && make tools'" && false)
ifeq ($(TRAVIS),true)
@git-validation -q -run DCO,short-subject,dangling-whitespace
else
@git-validation -q -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD
endif

View file

@ -1,124 +0,0 @@
package copy
import (
"bytes"
"io"
"os"
"testing"
"time"
"github.com/pkg/errors"
"github.com/containers/image/pkg/compression"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewDigestingReader(t *testing.T) {
// Only the failure cases, success is tested in TestDigestingReaderRead below.
source := bytes.NewReader([]byte("abc"))
for _, input := range []digest.Digest{
"abc", // Not algo:hexvalue
"crc32:", // Unknown algorithm, empty value
"crc32:012345678", // Unknown algorithm
"sha256:", // Empty value
"sha256:0", // Invalid hex value
"sha256:01", // Invalid length of hex value
} {
_, err := newDigestingReader(source, input)
assert.Error(t, err, input.String())
}
}
func TestDigestingReaderRead(t *testing.T) {
cases := []struct {
input []byte
digest digest.Digest
}{
{[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"},
{make([]byte, 65537, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"},
}
// Valid input
for _, c := range cases {
source := bytes.NewReader(c.input)
reader, err := newDigestingReader(source, c.digest)
require.NoError(t, err, c.digest.String())
dest := bytes.Buffer{}
n, err := io.Copy(&dest, reader)
assert.NoError(t, err, c.digest.String())
assert.Equal(t, int64(len(c.input)), n, c.digest.String())
assert.Equal(t, c.input, dest.Bytes(), c.digest.String())
assert.False(t, reader.validationFailed, c.digest.String())
}
// Modified input
for _, c := range cases {
source := bytes.NewReader(bytes.Join([][]byte{c.input, []byte("x")}, nil))
reader, err := newDigestingReader(source, c.digest)
require.NoError(t, err, c.digest.String())
dest := bytes.Buffer{}
_, err = io.Copy(&dest, reader)
assert.Error(t, err, c.digest.String())
assert.True(t, reader.validationFailed)
}
}
func goDiffIDComputationGoroutineWithTimeout(layerStream io.ReadCloser, decompressor compression.DecompressorFunc) *diffIDResult {
ch := make(chan diffIDResult)
go diffIDComputationGoroutine(ch, layerStream, nil)
timeout := time.After(time.Second)
select {
case res := <-ch:
return &res
case <-timeout:
return nil
}
}
func TestDiffIDComputationGoroutine(t *testing.T) {
stream, err := os.Open("fixtures/Hello.uncompressed")
require.NoError(t, err)
res := goDiffIDComputationGoroutineWithTimeout(stream, nil)
require.NotNil(t, res)
assert.NoError(t, res.err)
assert.Equal(t, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969", res.digest.String())
// Error reading input
reader, writer := io.Pipe()
writer.CloseWithError(errors.New("Expected error reading input in diffIDComputationGoroutine"))
res = goDiffIDComputationGoroutineWithTimeout(reader, nil)
require.NotNil(t, res)
assert.Error(t, res.err)
}
func TestComputeDiffID(t *testing.T) {
for _, c := range []struct {
filename string
decompressor compression.DecompressorFunc
result digest.Digest
}{
{"fixtures/Hello.uncompressed", nil, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
{"fixtures/Hello.gz", nil, "sha256:0bd4409dcd76476a263b8f3221b4ce04eb4686dec40bfdcc2e86a7403de13609"},
{"fixtures/Hello.gz", compression.GzipDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
} {
stream, err := os.Open(c.filename)
require.NoError(t, err, c.filename)
defer stream.Close()
diffID, err := computeDiffID(stream, c.decompressor)
require.NoError(t, err, c.filename)
assert.Equal(t, c.result, diffID)
}
// Error initializing decompression
_, err := computeDiffID(bytes.NewReader([]byte{}), compression.GzipDecompressor)
assert.Error(t, err)
// Error reading input
reader, writer := io.Pipe()
defer reader.Close()
writer.CloseWithError(errors.New("Expected error reading input in computeDiffID"))
_, err = computeDiffID(reader, nil)
assert.Error(t, err)
}

View file

@ -1,164 +0,0 @@
package copy
import (
"errors"
"fmt"
"testing"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOrderedSet(t *testing.T) {
for _, c := range []struct{ input, expected []string }{
{[]string{}, []string{}},
{[]string{"a", "b", "c"}, []string{"a", "b", "c"}},
{[]string{"a", "b", "a", "c"}, []string{"a", "b", "c"}},
} {
os := newOrderedSet()
for _, s := range c.input {
os.append(s)
}
assert.Equal(t, c.expected, os.list, fmt.Sprintf("%#v", c.input))
}
}
// fakeImageSource is an implementation of types.Image which only returns itself as a MIME type in Manifest
// except that "" means “reading the manifest should fail”
type fakeImageSource string
func (f fakeImageSource) Reference() types.ImageReference {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) Close() error {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) Manifest() ([]byte, string, error) {
if string(f) == "" {
return nil, "", errors.New("Manifest() directed to fail")
}
return nil, string(f), nil
}
func (f fakeImageSource) Signatures() ([][]byte, error) {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) ConfigInfo() types.BlobInfo {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) ConfigBlob() ([]byte, error) {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) OCIConfig() (*v1.Image, error) {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) LayerInfos() []types.BlobInfo {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) Inspect() (*types.ImageInspectInfo, error) {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) IsMultiImage() bool {
panic("Unexpected call to a mock function")
}
func (f fakeImageSource) Size() (int64, error) {
panic("Unexpected call to a mock function")
}
func TestDetermineManifestConversion(t *testing.T) {
supportS1S2OCI := []string{
v1.MediaTypeImageManifest,
manifest.DockerV2Schema2MediaType,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
}
supportS1OCI := []string{
v1.MediaTypeImageManifest,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
}
supportS1S2 := []string{
manifest.DockerV2Schema2MediaType,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
}
supportOnlyS1 := []string{
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
}
cases := []struct {
description string
sourceType string
destTypes []string
expectedUpdate string
expectedOtherCandidates []string
}{
// Destination accepts anything — no conversion necessary
{"s1→anything", manifest.DockerV2Schema1SignedMediaType, nil, "", []string{}},
{"s2→anything", manifest.DockerV2Schema2MediaType, nil, "", []string{}},
// Destination accepts the unmodified original
{"s1→s1s2", manifest.DockerV2Schema1SignedMediaType, supportS1S2, "", []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1MediaType}},
{"s2→s1s2", manifest.DockerV2Schema2MediaType, supportS1S2, "", supportOnlyS1},
{"s1→s1", manifest.DockerV2Schema1SignedMediaType, supportOnlyS1, "", []string{manifest.DockerV2Schema1MediaType}},
// Conversion necessary, a preferred format is acceptable
{"s2→s1", manifest.DockerV2Schema2MediaType, supportOnlyS1, manifest.DockerV2Schema1SignedMediaType, []string{manifest.DockerV2Schema1MediaType}},
// Conversion necessary, a preferred format is not acceptable
{"s2→OCI", manifest.DockerV2Schema2MediaType, []string{v1.MediaTypeImageManifest}, v1.MediaTypeImageManifest, []string{}},
// Conversion necessary, try the preferred formats in order.
{
"special→s2", "this needs conversion", supportS1S2OCI, manifest.DockerV2Schema2MediaType,
[]string{manifest.DockerV2Schema1SignedMediaType, v1.MediaTypeImageManifest, manifest.DockerV2Schema1MediaType},
},
{
"special→s1", "this needs conversion", supportS1OCI, manifest.DockerV2Schema1SignedMediaType,
[]string{v1.MediaTypeImageManifest, manifest.DockerV2Schema1MediaType},
},
{
"special→OCI", "this needs conversion", []string{v1.MediaTypeImageManifest, "other options", "with lower priority"}, v1.MediaTypeImageManifest,
[]string{"other options", "with lower priority"},
},
}
for _, c := range cases {
src := fakeImageSource(c.sourceType)
mu := types.ManifestUpdateOptions{}
preferredMIMEType, otherCandidates, err := determineManifestConversion(&mu, src, c.destTypes, true)
require.NoError(t, err, c.description)
assert.Equal(t, c.expectedUpdate, mu.ManifestMIMEType, c.description)
if c.expectedUpdate == "" {
assert.Equal(t, c.sourceType, preferredMIMEType, c.description)
} else {
assert.Equal(t, c.expectedUpdate, preferredMIMEType, c.description)
}
assert.Equal(t, c.expectedOtherCandidates, otherCandidates, c.description)
}
// Whatever the input is, with !canModifyManifest we return "keep the original as is"
for _, c := range cases {
src := fakeImageSource(c.sourceType)
mu := types.ManifestUpdateOptions{}
preferredMIMEType, otherCandidates, err := determineManifestConversion(&mu, src, c.destTypes, false)
require.NoError(t, err, c.description)
assert.Equal(t, "", mu.ManifestMIMEType, c.description)
assert.Equal(t, c.sourceType, preferredMIMEType, c.description)
assert.Equal(t, []string{}, otherCandidates, c.description)
}
// Error reading the manifest — smoke test only.
mu := types.ManifestUpdateOptions{}
_, _, err := determineManifestConversion(&mu, fakeImageSource(""), supportS1S2, true)
assert.Error(t, err)
}

View file

@ -1,72 +0,0 @@
package copy
import (
"io/ioutil"
"os"
"testing"
"github.com/containers/image/directory"
"github.com/containers/image/docker"
"github.com/containers/image/manifest"
"github.com/containers/image/signature"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testGPGHomeDirectory = "../signature/fixtures"
// TestKeyFingerprint is the fingerprint of the private key in testGPGHomeDirectory.
// Keep this in sync with signature/fixtures_info_test.go
testKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8"
)
func TestCreateSignature(t *testing.T) {
manifestBlob := []byte("Something")
manifestDigest, err := manifest.Digest(manifestBlob)
require.NoError(t, err)
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
require.NoError(t, err)
defer mech.Close()
if err := mech.SupportsSigning(); err != nil {
t.Skipf("Signing not supported: %v", err)
}
os.Setenv("GNUPGHOME", testGPGHomeDirectory)
defer os.Unsetenv("GNUPGHOME")
// Signing a directory: reference, which does not have a DockerRefrence(), fails.
tempDir, err := ioutil.TempDir("", "signature-dir-dest")
require.NoError(t, err)
defer os.RemoveAll(tempDir)
dirRef, err := directory.NewReference(tempDir)
require.NoError(t, err)
dirDest, err := dirRef.NewImageDestination(nil)
require.NoError(t, err)
defer dirDest.Close()
_, err = createSignature(dirDest, manifestBlob, testKeyFingerprint, ioutil.Discard)
assert.Error(t, err)
// Set up a docker: reference
dockerRef, err := docker.ParseReference("//busybox")
require.NoError(t, err)
dockerDest, err := dockerRef.NewImageDestination(&types.SystemContext{RegistriesDirPath: "/this/doesnt/exist"})
assert.NoError(t, err)
defer dockerDest.Close()
// Signing with an unknown key fails
_, err = createSignature(dockerDest, manifestBlob, "this key does not exist", ioutil.Discard)
assert.Error(t, err)
// Success
mech, err = signature.NewGPGSigningMechanism()
require.NoError(t, err)
defer mech.Close()
sig, err := createSignature(dockerDest, manifestBlob, testKeyFingerprint, ioutil.Discard)
require.NoError(t, err)
verified, err := signature.VerifyDockerManifestSignature(sig, manifestBlob, "docker.io/library/busybox:latest", mech, testKeyFingerprint)
require.NoError(t, err)
assert.Equal(t, "docker.io/library/busybox:latest", verified.DockerReference)
assert.Equal(t, manifestDigest, verified.DockerManifestDigest)
}

View file

@ -1,162 +0,0 @@
package directory
import (
"bytes"
"io/ioutil"
"os"
"testing"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDestinationReference(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
dest, err := ref.NewImageDestination(nil)
require.NoError(t, err)
defer dest.Close()
ref2 := dest.Reference()
assert.Equal(t, tmpDir, ref2.StringWithinTransport())
}
func TestGetPutManifest(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
man := []byte("test-manifest")
dest, err := ref.NewImageDestination(nil)
require.NoError(t, err)
defer dest.Close()
err = dest.PutManifest(man)
assert.NoError(t, err)
err = dest.Commit()
assert.NoError(t, err)
src, err := ref.NewImageSource(nil, nil)
require.NoError(t, err)
defer src.Close()
m, mt, err := src.GetManifest()
assert.NoError(t, err)
assert.Equal(t, man, m)
assert.Equal(t, "", mt)
}
func TestGetPutBlob(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
blob := []byte("test-blob")
dest, err := ref.NewImageDestination(nil)
require.NoError(t, err)
defer dest.Close()
compress := dest.ShouldCompressLayers()
assert.False(t, compress)
info, err := dest.PutBlob(bytes.NewReader(blob), types.BlobInfo{Digest: digest.Digest("sha256:digest-test"), Size: int64(9)})
assert.NoError(t, err)
err = dest.Commit()
assert.NoError(t, err)
assert.Equal(t, int64(9), info.Size)
assert.Equal(t, digest.FromBytes(blob), info.Digest)
src, err := ref.NewImageSource(nil, nil)
require.NoError(t, err)
defer src.Close()
rc, size, err := src.GetBlob(info)
assert.NoError(t, err)
defer rc.Close()
b, err := ioutil.ReadAll(rc)
assert.NoError(t, err)
assert.Equal(t, blob, b)
assert.Equal(t, int64(len(blob)), size)
}
// readerFromFunc allows implementing Reader by any function, e.g. a closure.
type readerFromFunc func([]byte) (int, error)
func (fn readerFromFunc) Read(p []byte) (int, error) {
return fn(p)
}
// TestPutBlobDigestFailure simulates behavior on digest verification failure.
func TestPutBlobDigestFailure(t *testing.T) {
const digestErrorString = "Simulated digest error"
const blobDigest = digest.Digest("sha256:test-digest")
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
dirRef, ok := ref.(dirReference)
require.True(t, ok)
blobPath := dirRef.layerPath(blobDigest)
firstRead := true
reader := readerFromFunc(func(p []byte) (int, error) {
_, err := os.Lstat(blobPath)
require.Error(t, err)
require.True(t, os.IsNotExist(err))
if firstRead {
if len(p) > 0 {
firstRead = false
}
for i := 0; i < len(p); i++ {
p[i] = 0xAA
}
return len(p), nil
}
return 0, errors.Errorf(digestErrorString)
})
dest, err := ref.NewImageDestination(nil)
require.NoError(t, err)
defer dest.Close()
_, err = dest.PutBlob(reader, types.BlobInfo{Digest: blobDigest, Size: -1})
assert.Error(t, err)
assert.Contains(t, digestErrorString, err.Error())
err = dest.Commit()
assert.NoError(t, err)
_, err = os.Lstat(blobPath)
require.Error(t, err)
require.True(t, os.IsNotExist(err))
}
func TestGetPutSignatures(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
dest, err := ref.NewImageDestination(nil)
require.NoError(t, err)
defer dest.Close()
signatures := [][]byte{
[]byte("sig1"),
[]byte("sig2"),
}
err = dest.SupportsSignatures()
assert.NoError(t, err)
err = dest.PutSignatures(signatures)
assert.NoError(t, err)
err = dest.Commit()
assert.NoError(t, err)
src, err := ref.NewImageSource(nil, nil)
require.NoError(t, err)
defer src.Close()
sigs, err := src.GetSignatures()
assert.NoError(t, err)
assert.Equal(t, signatures, sigs)
}
func TestSourceReference(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
src, err := ref.NewImageSource(nil, nil)
require.NoError(t, err)
defer src.Close()
ref2 := src.Reference()
assert.Equal(t, tmpDir, ref2.StringWithinTransport())
}

View file

@ -1,232 +0,0 @@
package directory
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestTransportName(t *testing.T) {
assert.Equal(t, "dir", Transport.Name())
}
func TestTransportParseReference(t *testing.T) {
testNewReference(t, Transport.ParseReference)
}
func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
for _, scope := range []string{
"/etc",
"/this/does/not/exist",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.NoError(t, err, scope)
}
for _, scope := range []string{
"relative/path",
"/double//slashes",
"/has/./dot",
"/has/dot/../dot",
"/trailing/slash/",
"/",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.Error(t, err, scope)
}
}
func TestNewReference(t *testing.T) {
testNewReference(t, NewReference)
}
// testNewReference is a test shared for Transport.ParseReference and NewReference.
func testNewReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
tmpDir, err := ioutil.TempDir("", "dir-transport-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, path := range []string{
"/",
"/etc",
tmpDir,
"relativepath",
tmpDir + "/thisdoesnotexist",
} {
ref, err := fn(path)
require.NoError(t, err, path)
dirRef, ok := ref.(dirReference)
require.True(t, ok)
assert.Equal(t, path, dirRef.path, path)
}
_, err = fn(tmpDir + "/thisparentdoesnotexist/something")
assert.Error(t, err)
}
// refToTempDir creates a temporary directory and returns a reference to it.
// The caller should
// defer os.RemoveAll(tmpDir)
func refToTempDir(t *testing.T) (ref types.ImageReference, tmpDir string) {
tmpDir, err := ioutil.TempDir("", "dir-transport-test")
require.NoError(t, err)
ref, err = NewReference(tmpDir)
require.NoError(t, err)
return ref, tmpDir
}
func TestReferenceTransport(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
assert.Equal(t, Transport, ref.Transport())
}
func TestReferenceStringWithinTransport(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
assert.Equal(t, tmpDir, ref.StringWithinTransport())
}
func TestReferenceDockerReference(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
assert.Nil(t, ref.DockerReference())
}
func TestReferencePolicyConfigurationIdentity(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity())
// A non-canonical path. Test just one, the various other cases are
// tested in explicitfilepath.ResolvePathToFullyExplicit.
ref, err := NewReference(tmpDir + "/.")
require.NoError(t, err)
assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity())
// "/" as a corner case.
ref, err = NewReference("/")
require.NoError(t, err)
assert.Equal(t, "/", ref.PolicyConfigurationIdentity())
}
func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
// We don't really know enough to make a full equality test here.
ns := ref.PolicyConfigurationNamespaces()
require.NotNil(t, ns)
assert.NotEmpty(t, ns)
assert.Equal(t, filepath.Dir(tmpDir), ns[0])
// Test with a known path which should exist. Test just one non-canonical
// path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
//
// It would be nice to test a deeper hierarchy, but it is not obvious what
// deeper path is always available in the various distros, AND is not likely
// to contains a symbolic link.
for _, path := range []string{"/etc/skel", "/etc/skel/./."} {
_, err := os.Lstat(path)
require.NoError(t, err)
ref, err := NewReference(path)
require.NoError(t, err)
ns := ref.PolicyConfigurationNamespaces()
require.NotNil(t, ns)
assert.Equal(t, []string{"/etc"}, ns)
}
// "/" as a corner case.
ref, err := NewReference("/")
require.NoError(t, err)
assert.Equal(t, []string{}, ref.PolicyConfigurationNamespaces())
}
func TestReferenceNewImage(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
dest, err := ref.NewImageDestination(nil)
require.NoError(t, err)
defer dest.Close()
mFixture, err := ioutil.ReadFile("../manifest/fixtures/v2s1.manifest.json")
require.NoError(t, err)
err = dest.PutManifest(mFixture)
assert.NoError(t, err)
err = dest.Commit()
assert.NoError(t, err)
img, err := ref.NewImage(nil)
assert.NoError(t, err)
defer img.Close()
}
func TestReferenceNewImageNoValidManifest(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
dest, err := ref.NewImageDestination(nil)
require.NoError(t, err)
defer dest.Close()
err = dest.PutManifest([]byte(`{"schemaVersion":1}`))
assert.NoError(t, err)
err = dest.Commit()
assert.NoError(t, err)
_, err = ref.NewImage(nil)
assert.Error(t, err)
}
func TestReferenceNewImageSource(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
src, err := ref.NewImageSource(nil, nil)
assert.NoError(t, err)
defer src.Close()
}
func TestReferenceNewImageDestination(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
dest, err := ref.NewImageDestination(nil)
assert.NoError(t, err)
defer dest.Close()
}
func TestReferenceDeleteImage(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
err := ref.DeleteImage(nil)
assert.Error(t, err)
}
func TestReferenceManifestPath(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
dirRef, ok := ref.(dirReference)
require.True(t, ok)
assert.Equal(t, tmpDir+"/manifest.json", dirRef.manifestPath())
}
func TestReferenceLayerPath(t *testing.T) {
const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
dirRef, ok := ref.(dirReference)
require.True(t, ok)
assert.Equal(t, tmpDir+"/"+hex+".tar", dirRef.layerPath("sha256:"+hex))
}
func TestReferenceSignaturePath(t *testing.T) {
ref, tmpDir := refToTempDir(t)
defer os.RemoveAll(tmpDir)
dirRef, ok := ref.(dirReference)
require.True(t, ok)
assert.Equal(t, tmpDir+"/signature-1", dirRef.signaturePath(0))
assert.Equal(t, tmpDir+"/signature-10", dirRef.signaturePath(9))
}

View file

@ -1,173 +0,0 @@
package explicitfilepath
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type pathResolvingTestCase struct {
setup func(*testing.T, string) string
expected string
}
var testCases = []pathResolvingTestCase{
{ // A straightforward subdirectory hierarchy
func(t *testing.T, top string) string {
err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755)
require.NoError(t, err)
return "dir1/dir2/dir3"
},
"dir1/dir2/dir3",
},
{ // Missing component
func(t *testing.T, top string) string {
return "thisismissing/dir2"
},
"",
},
{ // Symlink on the path
func(t *testing.T, top string) string {
err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755)
require.NoError(t, err)
err = os.Symlink("dir1", filepath.Join(top, "link1"))
require.NoError(t, err)
return "link1/dir2"
},
"dir1/dir2",
},
{ // Trailing symlink
func(t *testing.T, top string) string {
err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755)
require.NoError(t, err)
err = os.Symlink("dir2", filepath.Join(top, "dir1/link2"))
require.NoError(t, err)
return "dir1/link2"
},
"dir1/dir2",
},
{ // Symlink pointing nowhere, as a non-final component
func(t *testing.T, top string) string {
err := os.Symlink("thisismissing", filepath.Join(top, "link1"))
require.NoError(t, err)
return "link1/dir2"
},
"",
},
{ // Trailing symlink pointing nowhere (but note that a missing non-symlink would be accepted)
func(t *testing.T, top string) string {
err := os.Symlink("thisismissing", filepath.Join(top, "link1"))
require.NoError(t, err)
return "link1"
},
"",
},
{ // Relative components in a path
func(t *testing.T, top string) string {
err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755)
require.NoError(t, err)
return "dir1/./dir2/../dir2/dir3"
},
"dir1/dir2/dir3",
},
{ // Trailing relative components
func(t *testing.T, top string) string {
err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755)
require.NoError(t, err)
return "dir1/dir2/.."
},
"dir1",
},
{ // Relative components in symlink
func(t *testing.T, top string) string {
err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755)
require.NoError(t, err)
err = os.Symlink("../dir1/dir2", filepath.Join(top, "dir1/link2"))
require.NoError(t, err)
return "dir1/link2"
},
"dir1/dir2",
},
{ // Relative component pointing "into" a symlink
func(t *testing.T, top string) string {
err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755)
require.NoError(t, err)
err = os.Symlink("dir3", filepath.Join(top, "dir1/dir2/link3"))
require.NoError(t, err)
return "dir1/dir2/link3/../.."
},
"dir1",
},
{ // Unreadable directory
func(t *testing.T, top string) string {
err := os.MkdirAll(filepath.Join(top, "unreadable/dir2"), 0755)
require.NoError(t, err)
err = os.Chmod(filepath.Join(top, "unreadable"), 000)
require.NoError(t, err)
return "unreadable/dir2"
},
"",
},
}
func testPathsAreSameFile(t *testing.T, path1, path2, description string) {
fi1, err := os.Stat(path1)
require.NoError(t, err)
fi2, err := os.Stat(path2)
require.NoError(t, err)
assert.True(t, os.SameFile(fi1, fi2), description)
}
func runPathResolvingTestCase(t *testing.T, f func(string) (string, error), c pathResolvingTestCase, suffix string) {
topDir, err := ioutil.TempDir("", "pathResolving")
defer func() {
// Clean up after the "Unreadable directory" case; os.RemoveAll just fails.
_ = os.Chmod(filepath.Join(topDir, "unreadable"), 0755) // Ignore errors, especially if this does not exist.
os.RemoveAll(topDir)
}()
input := c.setup(t, topDir) + suffix // Do not call filepath.Join() on input, it calls filepath.Clean() internally!
description := fmt.Sprintf("%s vs. %s%s", input, c.expected, suffix)
fullOutput, err := ResolvePathToFullyExplicit(topDir + "/" + input)
if c.expected == "" {
assert.Error(t, err, description)
} else {
require.NoError(t, err, input)
fullExpected := topDir + "/" + c.expected + suffix
assert.Equal(t, fullExpected, fullOutput)
// Either the two paths resolve to the same existing file, or to the same name in the same existing parent.
if _, err := os.Lstat(fullExpected); err == nil {
testPathsAreSameFile(t, fullOutput, fullExpected, description)
} else {
require.True(t, os.IsNotExist(err))
_, err := os.Stat(fullOutput)
require.Error(t, err)
require.True(t, os.IsNotExist(err))
parentExpected, fileExpected := filepath.Split(fullExpected)
parentOutput, fileOutput := filepath.Split(fullOutput)
assert.Equal(t, fileExpected, fileOutput)
testPathsAreSameFile(t, parentOutput, parentExpected, description)
}
}
}
func TestResolvePathToFullyExplicit(t *testing.T) {
for _, c := range testCases {
runPathResolvingTestCase(t, ResolvePathToFullyExplicit, c, "")
runPathResolvingTestCase(t, ResolvePathToFullyExplicit, c, "/trailing")
}
}
func TestResolveExistingPathToFullyExplicit(t *testing.T) {
for _, c := range testCases {
runPathResolvingTestCase(t, resolveExistingPathToFullyExplicit, c, "")
}
}

View file

@ -1,29 +0,0 @@
// Package image provides libraries and commands to interact with containers images.
//
// package main
//
// import (
// "fmt"
//
// "github.com/containers/image/docker"
// )
//
// func main() {
// ref, err := docker.ParseReference("//fedora")
// if err != nil {
// panic(err)
// }
// img, err := ref.NewImage(nil)
// if err != nil {
// panic(err)
// }
// defer img.Close()
// b, _, err := img.Manifest()
// if err != nil {
// panic(err)
// }
// fmt.Printf("%s", string(b))
// }
//
// TODO(runcom)
package image

View file

@ -1,198 +0,0 @@
package archive
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
sha256digest = "@sha256:" + sha256digestHex
tarFixture = "fixtures/almostempty.tar"
)
func TestTransportName(t *testing.T) {
assert.Equal(t, "docker-archive", Transport.Name())
}
func TestTransportParseReference(t *testing.T) {
testParseReference(t, Transport.ParseReference)
}
func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
for _, scope := range []string{ // A semi-representative assortment of values; everything is rejected.
"docker.io/library/busybox:notlatest",
"docker.io/library/busybox",
"docker.io/library",
"docker.io",
"",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.Error(t, err, scope)
}
}
func TestParseReference(t *testing.T) {
testParseReference(t, ParseReference)
}
// testParseReference is a test shared for Transport.ParseReference and ParseReference.
func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
for _, c := range []struct{ input, expectedPath, expectedRef string }{
{"", "", ""}, // Empty input is explicitly rejected
{"/path", "/path", ""},
{"/path:busybox:notlatest", "/path", "docker.io/library/busybox:notlatest"}, // Explicit tag
{"/path:busybox" + sha256digest, "", ""}, // Digest references are forbidden
{"/path:busybox", "/path", "docker.io/library/busybox:latest"}, // Default tag
// A github.com/distribution/reference value can have a tag and a digest at the same time!
{"/path:busybox:latest" + sha256digest, "", ""}, // Both tag and digest is rejected
{"/path:docker.io/library/busybox:latest", "/path", "docker.io/library/busybox:latest"}, // All implied values explicitly specified
{"/path:UPPERCASEISINVALID", "", ""}, // Invalid input
} {
ref, err := fn(c.input)
if c.expectedPath == "" {
assert.Error(t, err, c.input)
} else {
require.NoError(t, err, c.input)
archiveRef, ok := ref.(archiveReference)
require.True(t, ok, c.input)
assert.Equal(t, c.expectedPath, archiveRef.path, c.input)
if c.expectedRef == "" {
assert.Nil(t, archiveRef.destinationRef, c.input)
} else {
require.NotNil(t, archiveRef.destinationRef, c.input)
assert.Equal(t, c.expectedRef, archiveRef.destinationRef.String(), c.input)
}
}
}
}
// refWithTagAndDigest is a reference.NamedTagged and reference.Canonical at the same time.
type refWithTagAndDigest struct{ reference.Canonical }
func (ref refWithTagAndDigest) Tag() string {
return "notLatest"
}
// A common list of reference formats to test for the various ImageReference methods.
var validReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{
{"/pathonly", "", "/pathonly"},
{"/path:busybox:notlatest", "docker.io/library/busybox:notlatest", "/path:docker.io/library/busybox:notlatest"}, // Explicit tag
{"/path:docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "/path:docker.io/library/busybox:latest"}, // All implied values explicitly specified
{"/path:example.com/ns/foo:bar", "example.com/ns/foo:bar", "/path:example.com/ns/foo:bar"}, // All values explicitly specified
}
func TestReferenceTransport(t *testing.T) {
ref, err := ParseReference("/tmp/archive.tar")
require.NoError(t, err)
assert.Equal(t, Transport, ref.Transport())
}
func TestReferenceStringWithinTransport(t *testing.T) {
for _, c := range validReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
stringRef := ref.StringWithinTransport()
assert.Equal(t, c.stringWithinTransport, stringRef, c.input)
// Do one more round to verify that the output can be parsed, to an equal value.
ref2, err := Transport.ParseReference(stringRef)
require.NoError(t, err, c.input)
stringRef2 := ref2.StringWithinTransport()
assert.Equal(t, stringRef, stringRef2, c.input)
}
}
func TestReferenceDockerReference(t *testing.T) {
for _, c := range validReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
dockerRef := ref.DockerReference()
if c.dockerRef != "" {
require.NotNil(t, dockerRef, c.input)
assert.Equal(t, c.dockerRef, dockerRef.String(), c.input)
} else {
require.Nil(t, dockerRef, c.input)
}
}
}
func TestReferencePolicyConfigurationIdentity(t *testing.T) {
for _, c := range validReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
assert.Equal(t, "", ref.PolicyConfigurationIdentity(), c.input)
}
}
func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
for _, c := range validReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
assert.Empty(t, "", ref.PolicyConfigurationNamespaces(), c.input)
}
}
func TestReferenceNewImage(t *testing.T) {
for _, suffix := range []string{"", ":thisisignoredbutaccepted"} {
ref, err := ParseReference(tarFixture + suffix)
require.NoError(t, err, suffix)
img, err := ref.NewImage(nil)
assert.NoError(t, err, suffix)
defer img.Close()
}
}
func TestReferenceNewImageSource(t *testing.T) {
for _, suffix := range []string{"", ":thisisignoredbutaccepted"} {
ref, err := ParseReference(tarFixture + suffix)
require.NoError(t, err, suffix)
src, err := ref.NewImageSource(nil, nil)
assert.NoError(t, err, suffix)
defer src.Close()
}
}
func TestReferenceNewImageDestination(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "docker-archive-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
ref, err := ParseReference(filepath.Join(tmpDir, "no-reference"))
require.NoError(t, err)
dest, err := ref.NewImageDestination(nil)
assert.Error(t, err)
ref, err = ParseReference(filepath.Join(tmpDir, "with-reference") + "busybox:latest")
require.NoError(t, err)
dest, err = ref.NewImageDestination(nil)
assert.NoError(t, err)
defer dest.Close()
}
func TestReferenceDeleteImage(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "docker-archive-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for i, suffix := range []string{"", ":thisisignoredbutaccepted"} {
testFile := filepath.Join(tmpDir, fmt.Sprintf("file%d.tar", i))
err := ioutil.WriteFile(testFile, []byte("nonempty"), 0644)
require.NoError(t, err, suffix)
ref, err := ParseReference(testFile + suffix)
require.NoError(t, err, suffix)
err = ref.DeleteImage(nil)
assert.Error(t, err, suffix)
_, err = os.Lstat(testFile)
assert.NoError(t, err, suffix)
}
}

View file

@ -1,228 +0,0 @@
package daemon
import (
"testing"
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
sha256digest = "sha256:" + sha256digestHex
)
func TestTransportName(t *testing.T) {
assert.Equal(t, "docker-daemon", Transport.Name())
}
func TestTransportParseReference(t *testing.T) {
testParseReference(t, Transport.ParseReference)
}
func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
for _, scope := range []string{ // A semi-representative assortment of values; everything is rejected.
sha256digestHex,
sha256digest,
"docker.io/library/busybox:latest",
"docker.io",
"",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.Error(t, err, scope)
}
}
func TestParseReference(t *testing.T) {
testParseReference(t, ParseReference)
}
// testParseReference is a test shared for Transport.ParseReference and ParseReference.
func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
for _, c := range []struct{ input, expectedID, expectedRef string }{
{sha256digest, sha256digest, ""}, // Valid digest format
{"sha512:" + sha256digestHex + sha256digestHex, "", ""}, // Non-digest.Canonical digest
{"sha256:ab", "", ""}, // Invalid digest value (too short)
{sha256digest + "ab", "", ""}, // Invalid digest value (too long)
{"sha256:XX23456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "", ""}, // Invalid digest value
{"UPPERCASEISINVALID", "", ""}, // Invalid reference input
{"busybox", "", ""}, // Missing tag or digest
{"busybox:latest", "", "docker.io/library/busybox:latest"}, // Explicit tag
{"busybox@" + sha256digest, "", "docker.io/library/busybox@" + sha256digest}, // Explicit digest
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// Most versions of docker/reference do not handle that (ignoring the tag), so we reject such input.
{"busybox:latest@" + sha256digest, "", ""}, // Both tag and digest
{"docker.io/library/busybox:latest", "", "docker.io/library/busybox:latest"}, // All implied values explicitly specified
} {
ref, err := fn(c.input)
if c.expectedID == "" && c.expectedRef == "" {
assert.Error(t, err, c.input)
} else {
require.NoError(t, err, c.input)
daemonRef, ok := ref.(daemonReference)
require.True(t, ok, c.input)
// If we don't reject the input, the interpretation must be consistent with reference.ParseAnyReference
dockerRef, err := reference.ParseAnyReference(c.input)
require.NoError(t, err, c.input)
if c.expectedRef == "" {
assert.Equal(t, c.expectedID, daemonRef.id.String(), c.input)
assert.Nil(t, daemonRef.ref, c.input)
_, ok := dockerRef.(reference.Digested)
require.True(t, ok, c.input)
assert.Equal(t, c.expectedID, dockerRef.String(), c.input)
} else {
assert.Equal(t, "", daemonRef.id.String(), c.input)
require.NotNil(t, daemonRef.ref, c.input)
assert.Equal(t, c.expectedRef, daemonRef.ref.String(), c.input)
_, ok := dockerRef.(reference.Named)
require.True(t, ok, c.input)
assert.Equal(t, c.expectedRef, dockerRef.String(), c.input)
}
}
}
}
// A common list of reference formats to test for the various ImageReference methods.
// (For IDs it is much simpler, we simply use them unmodified)
var validNamedReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{
{"busybox:notlatest", "docker.io/library/busybox:notlatest", "busybox:notlatest"}, // Explicit tag
{"busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, "busybox" + sha256digest}, // Explicit digest
{"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "busybox:latest"}, // All implied values explicitly specified
{"example.com/ns/foo:bar", "example.com/ns/foo:bar", "example.com/ns/foo:bar"}, // All values explicitly specified
}
func TestNewReference(t *testing.T) {
// An ID reference.
id, err := digest.Parse(sha256digest)
require.NoError(t, err)
ref, err := NewReference(id, nil)
require.NoError(t, err)
daemonRef, ok := ref.(daemonReference)
require.True(t, ok)
assert.Equal(t, id, daemonRef.id)
assert.Nil(t, daemonRef.ref)
// Named references
for _, c := range validNamedReferenceTestCases {
parsed, err := reference.ParseNormalizedNamed(c.input)
require.NoError(t, err)
ref, err := NewReference("", parsed)
require.NoError(t, err, c.input)
daemonRef, ok := ref.(daemonReference)
require.True(t, ok, c.input)
assert.Equal(t, "", daemonRef.id.String())
require.NotNil(t, daemonRef.ref)
assert.Equal(t, c.dockerRef, daemonRef.ref.String(), c.input)
}
// Both an ID and a named reference provided
parsed, err := reference.ParseNormalizedNamed("busybox:latest")
require.NoError(t, err)
_, err = NewReference(id, parsed)
assert.Error(t, err)
// A reference with neither a tag nor digest
parsed, err = reference.ParseNormalizedNamed("busybox")
require.NoError(t, err)
_, err = NewReference("", parsed)
assert.Error(t, err)
// A github.com/distribution/reference value can have a tag and a digest at the same time!
parsed, err = reference.ParseNormalizedNamed("busybox:notlatest@" + sha256digest)
require.NoError(t, err)
_, ok = parsed.(reference.Canonical)
require.True(t, ok)
_, ok = parsed.(reference.NamedTagged)
require.True(t, ok)
_, err = NewReference("", parsed)
assert.Error(t, err)
}
func TestReferenceTransport(t *testing.T) {
ref, err := ParseReference(sha256digest)
require.NoError(t, err)
assert.Equal(t, Transport, ref.Transport())
ref, err = ParseReference("busybox:latest")
require.NoError(t, err)
assert.Equal(t, Transport, ref.Transport())
}
func TestReferenceStringWithinTransport(t *testing.T) {
ref, err := ParseReference(sha256digest)
require.NoError(t, err)
assert.Equal(t, sha256digest, ref.StringWithinTransport())
for _, c := range validNamedReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
stringRef := ref.StringWithinTransport()
assert.Equal(t, c.stringWithinTransport, stringRef, c.input)
// Do one more round to verify that the output can be parsed, to an equal value.
ref2, err := Transport.ParseReference(stringRef)
require.NoError(t, err, c.input)
stringRef2 := ref2.StringWithinTransport()
assert.Equal(t, stringRef, stringRef2, c.input)
}
}
func TestReferenceDockerReference(t *testing.T) {
ref, err := ParseReference(sha256digest)
require.NoError(t, err)
assert.Nil(t, ref.DockerReference())
for _, c := range validNamedReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
dockerRef := ref.DockerReference()
require.NotNil(t, dockerRef, c.input)
assert.Equal(t, c.dockerRef, dockerRef.String(), c.input)
}
}
func TestReferencePolicyConfigurationIdentity(t *testing.T) {
ref, err := ParseReference(sha256digest)
require.NoError(t, err)
assert.Equal(t, "", ref.PolicyConfigurationIdentity())
for _, c := range validNamedReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
assert.Equal(t, "", ref.PolicyConfigurationIdentity(), c.input)
}
}
func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
ref, err := ParseReference(sha256digest)
require.NoError(t, err)
assert.Empty(t, ref.PolicyConfigurationNamespaces())
for _, c := range validNamedReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
assert.Empty(t, ref.PolicyConfigurationNamespaces(), c.input)
}
}
// daemonReference.NewImage, daemonReference.NewImageSource, openshiftReference.NewImageDestination
// untested because just creating the objects immediately connects to the daemon.
func TestReferenceDeleteImage(t *testing.T) {
ref, err := ParseReference(sha256digest)
require.NoError(t, err)
err = ref.DeleteImage(nil)
assert.Error(t, err)
for _, c := range validNamedReferenceTestCases {
ref, err := ParseReference(c.input)
require.NoError(t, err, c.input)
err = ref.DeleteImage(nil)
assert.Error(t, err, c.input)
}
}

View file

@ -34,6 +34,8 @@ const (
dockerCfgFileName = "config.json"
dockerCfgObsolete = ".dockercfg"
systemPerHostCertDirPath = "/etc/docker/certs.d"
resolvedPingV2URL = "%s://%s/v2/"
resolvedPingV1URL = "%s://%s/v1/_ping"
tagsPath = "/v2/%s/tags/list"
@ -129,12 +131,29 @@ func newTransport() *http.Transport {
return tr
}
func setupCertificates(dir string, tlsc *tls.Config) error {
if dir == "" {
return nil
// dockerCertDir returns a path to a directory to be consumed by setupCertificates() depending on ctx and hostPort.
func dockerCertDir(ctx *types.SystemContext, hostPort string) string {
if ctx != nil && ctx.DockerCertPath != "" {
return ctx.DockerCertPath
}
var hostCertDir string
if ctx != nil && ctx.DockerPerHostCertDirPath != "" {
hostCertDir = ctx.DockerPerHostCertDirPath
} else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
hostCertDir = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemPerHostCertDirPath)
} else {
hostCertDir = systemPerHostCertDirPath
}
return filepath.Join(hostCertDir, hostPort)
}
func setupCertificates(dir string, tlsc *tls.Config) error {
logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
fs, err := ioutil.ReadDir(dir)
if err != nil && !os.IsNotExist(err) {
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
@ -146,7 +165,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
return errors.Wrap(err, "unable to get system cert pool")
}
tlsc.RootCAs = systemPool
logrus.Debugf("crt: %s", fullPath)
logrus.Debugf(" crt: %s", fullPath)
data, err := ioutil.ReadFile(fullPath)
if err != nil {
return err
@ -156,7 +175,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
if strings.HasSuffix(f.Name(), ".cert") {
certName := f.Name()
keyName := certName[:len(certName)-5] + ".key"
logrus.Debugf("cert: %s", fullPath)
logrus.Debugf(" cert: %s", fullPath)
if !hasFile(fs, keyName) {
return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
}
@ -169,7 +188,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
if strings.HasSuffix(f.Name(), ".key") {
keyName := f.Name()
certName := keyName[:len(keyName)-4] + ".cert"
logrus.Debugf("key: %s", fullPath)
logrus.Debugf(" key: %s", fullPath)
if !hasFile(fs, certName) {
return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
}
@ -199,18 +218,18 @@ func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool,
return nil, err
}
tr := newTransport()
if ctx != nil && (ctx.DockerCertPath != "" || ctx.DockerInsecureSkipTLSVerify) {
tlsc := &tls.Config{}
if err := setupCertificates(ctx.DockerCertPath, tlsc); err != nil {
return nil, err
}
tlsc.InsecureSkipVerify = ctx.DockerInsecureSkipTLSVerify
tr.TLSClientConfig = tlsc
tr.TLSClientConfig = serverDefault()
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
// dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because
// generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
// undocumented and may change if docker/docker changes.
certDir := dockerCertDir(ctx, reference.Domain(ref.ref))
if err := setupCertificates(certDir, tr.TLSClientConfig); err != nil {
return nil, err
}
if tr.TLSClientConfig == nil {
tr.TLSClientConfig = serverDefault()
if ctx != nil && ctx.DockerInsecureSkipTLSVerify {
tr.TLSClientConfig.InsecureSkipVerify = true
}
client := &http.Client{Transport: tr}

View file

@ -1,432 +0,0 @@
package docker
import (
"encoding/base64"
"encoding/json"
//"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/homedir"
)
func TestGetAuth(t *testing.T) {
origHomeDir := homedir.Get()
tmpDir, err := ioutil.TempDir("", "test_docker_client_get_auth")
if err != nil {
t.Fatal(err)
}
t.Logf("using temporary home directory: %q", tmpDir)
// override homedir
os.Setenv(homedir.Key(), tmpDir)
defer func() {
err := os.RemoveAll(tmpDir)
if err != nil {
t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir, err)
}
os.Setenv(homedir.Key(), origHomeDir)
}()
configDir := filepath.Join(tmpDir, ".docker")
if err := os.Mkdir(configDir, 0750); err != nil {
t.Fatal(err)
}
configPath := filepath.Join(configDir, "config.json")
for _, tc := range []struct {
name string
hostname string
authConfig testAuthConfig
expectedUsername string
expectedPassword string
expectedError error
ctx *types.SystemContext
}{
{
name: "empty hostname",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{"localhost:5000": testAuthConfigData{"bob", "password"}}),
},
{
name: "no auth config",
hostname: "index.docker.io",
},
{
name: "match one",
hostname: "example.org",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{"example.org": testAuthConfigData{"joe", "mypass"}}),
expectedUsername: "joe",
expectedPassword: "mypass",
},
{
name: "match none",
hostname: "registry.example.org",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{"example.org": testAuthConfigData{"joe", "mypass"}}),
},
{
name: "match docker.io",
hostname: "docker.io",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{
"example.org": testAuthConfigData{"example", "org"},
"index.docker.io": testAuthConfigData{"index", "docker.io"},
"docker.io": testAuthConfigData{"docker", "io"},
}),
expectedUsername: "docker",
expectedPassword: "io",
},
{
name: "match docker.io normalized",
hostname: "docker.io",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{
"example.org": testAuthConfigData{"bob", "pw"},
"https://index.docker.io/v1": testAuthConfigData{"alice", "wp"},
}),
expectedUsername: "alice",
expectedPassword: "wp",
},
{
name: "normalize registry",
hostname: "https://docker.io/v1",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{
"docker.io": testAuthConfigData{"user", "pw"},
"localhost:5000": testAuthConfigData{"joe", "pass"},
}),
expectedUsername: "user",
expectedPassword: "pw",
},
{
name: "match localhost",
hostname: "http://localhost",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{
"docker.io": testAuthConfigData{"user", "pw"},
"localhost": testAuthConfigData{"joe", "pass"},
"example.com": testAuthConfigData{"alice", "pwd"},
}),
expectedUsername: "joe",
expectedPassword: "pass",
},
{
name: "match ip",
hostname: "10.10.3.56:5000",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{
"10.10.30.45": testAuthConfigData{"user", "pw"},
"localhost": testAuthConfigData{"joe", "pass"},
"10.10.3.56": testAuthConfigData{"alice", "pwd"},
"10.10.3.56:5000": testAuthConfigData{"me", "mine"},
}),
expectedUsername: "me",
expectedPassword: "mine",
},
{
name: "match port",
hostname: "https://localhost:5000",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{
"https://127.0.0.1:5000": testAuthConfigData{"user", "pw"},
"http://localhost": testAuthConfigData{"joe", "pass"},
"https://localhost:5001": testAuthConfigData{"alice", "pwd"},
"localhost:5000": testAuthConfigData{"me", "mine"},
}),
expectedUsername: "me",
expectedPassword: "mine",
},
{
name: "use system context",
hostname: "example.org",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{
"example.org": testAuthConfigData{"user", "pw"},
}),
expectedUsername: "foo",
expectedPassword: "bar",
ctx: &types.SystemContext{
DockerAuthConfig: &types.DockerAuthConfig{
Username: "foo",
Password: "bar",
},
},
},
} {
contents, err := json.MarshalIndent(&tc.authConfig, "", " ")
if err != nil {
t.Errorf("[%s] failed to marshal authConfig: %v", tc.name, err)
continue
}
if err := ioutil.WriteFile(configPath, contents, 0640); err != nil {
t.Errorf("[%s] failed to write file %q: %v", tc.name, configPath, err)
continue
}
var ctx *types.SystemContext
if tc.ctx != nil {
ctx = tc.ctx
}
username, password, err := getAuth(ctx, tc.hostname)
if err == nil && tc.expectedError != nil {
t.Errorf("[%s] got unexpected non error and username=%q, password=%q", tc.name, username, password)
continue
}
if err != nil && tc.expectedError == nil {
t.Errorf("[%s] got unexpected error: %#+v", tc.name, err)
continue
}
if !reflect.DeepEqual(err, tc.expectedError) {
t.Errorf("[%s] got unexpected error: %#+v != %#+v", tc.name, err, tc.expectedError)
continue
}
if username != tc.expectedUsername {
t.Errorf("[%s] got unexpected user name: %q != %q", tc.name, username, tc.expectedUsername)
}
if password != tc.expectedPassword {
t.Errorf("[%s] got unexpected user name: %q != %q", tc.name, password, tc.expectedPassword)
}
}
}
func TestGetAuthFromLegacyFile(t *testing.T) {
origHomeDir := homedir.Get()
tmpDir, err := ioutil.TempDir("", "test_docker_client_get_auth")
if err != nil {
t.Fatal(err)
}
t.Logf("using temporary home directory: %q", tmpDir)
// override homedir
os.Setenv(homedir.Key(), tmpDir)
defer func() {
err := os.RemoveAll(tmpDir)
if err != nil {
t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir, err)
}
os.Setenv(homedir.Key(), origHomeDir)
}()
configPath := filepath.Join(tmpDir, ".dockercfg")
for _, tc := range []struct {
name string
hostname string
authConfig testAuthConfig
expectedUsername string
expectedPassword string
expectedError error
}{
{
name: "normalize registry",
hostname: "https://docker.io/v1",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{
"docker.io": testAuthConfigData{"user", "pw"},
"localhost:5000": testAuthConfigData{"joe", "pass"},
}),
expectedUsername: "user",
expectedPassword: "pw",
},
{
name: "ignore schema and path",
hostname: "http://index.docker.io/v1",
authConfig: makeTestAuthConfig(testAuthConfigDataMap{
"docker.io/v2": testAuthConfigData{"user", "pw"},
"https://localhost/v1": testAuthConfigData{"joe", "pwd"},
}),
expectedUsername: "user",
expectedPassword: "pw",
},
} {
contents, err := json.MarshalIndent(&tc.authConfig.Auths, "", " ")
if err != nil {
t.Errorf("[%s] failed to marshal authConfig: %v", tc.name, err)
continue
}
if err := ioutil.WriteFile(configPath, contents, 0640); err != nil {
t.Errorf("[%s] failed to write file %q: %v", tc.name, configPath, err)
continue
}
username, password, err := getAuth(nil, tc.hostname)
if err == nil && tc.expectedError != nil {
t.Errorf("[%s] got unexpected non error and username=%q, password=%q", tc.name, username, password)
continue
}
if err != nil && tc.expectedError == nil {
t.Errorf("[%s] got unexpected error: %#+v", tc.name, err)
continue
}
if !reflect.DeepEqual(err, tc.expectedError) {
t.Errorf("[%s] got unexpected error: %#+v != %#+v", tc.name, err, tc.expectedError)
continue
}
if username != tc.expectedUsername {
t.Errorf("[%s] got unexpected user name: %q != %q", tc.name, username, tc.expectedUsername)
}
if password != tc.expectedPassword {
t.Errorf("[%s] got unexpected user name: %q != %q", tc.name, password, tc.expectedPassword)
}
}
}
func TestGetAuthPreferNewConfig(t *testing.T) {
origHomeDir := homedir.Get()
tmpDir, err := ioutil.TempDir("", "test_docker_client_get_auth")
if err != nil {
t.Fatal(err)
}
t.Logf("using temporary home directory: %q", tmpDir)
// override homedir
os.Setenv(homedir.Key(), tmpDir)
defer func() {
err := os.RemoveAll(tmpDir)
if err != nil {
t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir, err)
}
os.Setenv(homedir.Key(), origHomeDir)
}()
configDir := filepath.Join(tmpDir, ".docker")
if err := os.Mkdir(configDir, 0750); err != nil {
t.Fatal(err)
}
for _, data := range []struct {
path string
ac interface{}
}{
{
filepath.Join(configDir, "config.json"),
makeTestAuthConfig(testAuthConfigDataMap{
"https://index.docker.io/v1/": testAuthConfigData{"alice", "pass"},
}),
},
{
filepath.Join(tmpDir, ".dockercfg"),
makeTestAuthConfig(testAuthConfigDataMap{
"https://index.docker.io/v1/": testAuthConfigData{"bob", "pw"},
}).Auths,
},
} {
contents, err := json.MarshalIndent(&data.ac, "", " ")
if err != nil {
t.Fatalf("failed to marshal authConfig: %v", err)
}
if err := ioutil.WriteFile(data.path, contents, 0640); err != nil {
t.Fatalf("failed to write file %q: %v", data.path, err)
}
}
username, password, err := getAuth(nil, "index.docker.io")
if err != nil {
t.Fatalf("got unexpected error: %#+v", err)
}
if username != "alice" {
t.Fatalf("got unexpected user name: %q != %q", username, "alice")
}
if password != "pass" {
t.Fatalf("got unexpected user name: %q != %q", password, "pass")
}
}
func TestGetAuthFailsOnBadInput(t *testing.T) {
origHomeDir := homedir.Get()
tmpDir, err := ioutil.TempDir("", "test_docker_client_get_auth")
if err != nil {
t.Fatal(err)
}
t.Logf("using temporary home directory: %q", tmpDir)
// override homedir
os.Setenv(homedir.Key(), tmpDir)
defer func() {
err := os.RemoveAll(tmpDir)
if err != nil {
t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir, err)
}
os.Setenv(homedir.Key(), origHomeDir)
}()
configDir := filepath.Join(tmpDir, ".docker")
if err := os.Mkdir(configDir, 0750); err != nil {
t.Fatal(err)
}
configPath := filepath.Join(configDir, "config.json")
// no config file present
username, password, err := getAuth(nil, "index.docker.io")
if err != nil {
t.Fatalf("got unexpected error: %#+v", err)
}
if len(username) > 0 || len(password) > 0 {
t.Fatalf("got unexpected not empty username/password: %q/%q", username, password)
}
if err := ioutil.WriteFile(configPath, []byte("Json rocks! Unless it doesn't."), 0640); err != nil {
t.Fatalf("failed to write file %q: %v", configPath, err)
}
username, password, err = getAuth(nil, "index.docker.io")
if err == nil {
t.Fatalf("got unexpected non-error: username=%q, password=%q", username, password)
}
if _, ok := err.(*json.SyntaxError); !ok {
t.Fatalf("expected os.PathError, not: %#+v", err)
}
// remove the invalid config file
os.RemoveAll(configPath)
// no config file present
username, password, err = getAuth(nil, "index.docker.io")
if err != nil {
t.Fatalf("got unexpected error: %#+v", err)
}
if len(username) > 0 || len(password) > 0 {
t.Fatalf("got unexpected not empty username/password: %q/%q", username, password)
}
configPath = filepath.Join(tmpDir, ".dockercfg")
if err := ioutil.WriteFile(configPath, []byte("I'm certainly not a json string."), 0640); err != nil {
t.Fatalf("failed to write file %q: %v", configPath, err)
}
username, password, err = getAuth(nil, "index.docker.io")
if err == nil {
t.Fatalf("got unexpected non-error: username=%q, password=%q", username, password)
}
if _, ok := err.(*json.SyntaxError); !ok {
t.Fatalf("expected os.PathError, not: %#+v", err)
}
}
type testAuthConfigData struct {
username string
password string
}
type testAuthConfigDataMap map[string]testAuthConfigData
type testAuthConfigEntry struct {
Auth string `json:"auth,omitempty"`
}
type testAuthConfig struct {
Auths map[string]testAuthConfigEntry `json:"auths"`
}
// encodeAuth creates an auth value from given authConfig data to be stored in auth config file.
// Inspired by github.com/docker/docker/cliconfig/config.go v1.10.3.
func encodeAuth(authConfig *testAuthConfigData) string {
authStr := authConfig.username + ":" + authConfig.password
msg := []byte(authStr)
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
base64.StdEncoding.Encode(encoded, msg)
return string(encoded)
}
func makeTestAuthConfig(authConfigData map[string]testAuthConfigData) testAuthConfig {
ac := testAuthConfig{
Auths: make(map[string]testAuthConfigEntry),
}
for host, data := range authConfigData {
ac.Auths[host] = testAuthConfigEntry{
Auth: encodeAuth(&data),
}
}
return ac
}

View file

@ -1,24 +0,0 @@
package docker
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSimplifyContentType(t *testing.T) {
for _, c := range []struct{ input, expected string }{
{"", ""},
{"application/json", "application/json"},
{"application/json;charset=utf-8", "application/json"},
{"application/json; charset=utf-8", "application/json"},
{"application/json ; charset=utf-8", "application/json"},
{"application/json\t;\tcharset=utf-8", "application/json"},
{"application/json ;charset=utf-8", "application/json"},
{`application/json; charset="utf-8"`, "application/json"},
{"completely invalid", ""},
} {
out := simplifyContentType(c.input)
assert.Equal(t, c.expected, out, c.input)
}
}

View file

@ -1,196 +0,0 @@
package docker
import (
"testing"
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
sha256digest = "@sha256:" + sha256digestHex
)
func TestTransportName(t *testing.T) {
assert.Equal(t, "docker", Transport.Name())
}
func TestTransportParseReference(t *testing.T) {
testParseReference(t, Transport.ParseReference)
}
func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
for _, scope := range []string{
"docker.io/library/busybox" + sha256digest,
"docker.io/library/busybox:notlatest",
"docker.io/library/busybox",
"docker.io/library",
"docker.io",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.NoError(t, err, scope)
}
}
func TestParseReference(t *testing.T) {
testParseReference(t, ParseReference)
}
// testParseReference is a test shared for Transport.ParseReference and ParseReference.
func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
for _, c := range []struct{ input, expected string }{
{"busybox", ""}, // Missing // prefix
{"//busybox:notlatest", "docker.io/library/busybox:notlatest"}, // Explicit tag
{"//busybox" + sha256digest, "docker.io/library/busybox" + sha256digest}, // Explicit digest
{"//busybox", "docker.io/library/busybox:latest"}, // Default tag
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// The docker/distribution API does not really support that (we cant ask for an image with a specific
// tag and digest), so fail. This MAY be accepted in the future.
{"//busybox:latest" + sha256digest, ""}, // Both tag and digest
{"//docker.io/library/busybox:latest", "docker.io/library/busybox:latest"}, // All implied values explicitly specified
{"//UPPERCASEISINVALID", ""}, // Invalid input
} {
ref, err := fn(c.input)
if c.expected == "" {
assert.Error(t, err, c.input)
} else {
require.NoError(t, err, c.input)
dockerRef, ok := ref.(dockerReference)
require.True(t, ok, c.input)
assert.Equal(t, c.expected, dockerRef.ref.String(), c.input)
}
}
}
// A common list of reference formats to test for the various ImageReference methods.
var validReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{
{"busybox:notlatest", "docker.io/library/busybox:notlatest", "//busybox:notlatest"}, // Explicit tag
{"busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, "//busybox" + sha256digest}, // Explicit digest
{"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "//busybox:latest"}, // All implied values explicitly specified
{"example.com/ns/foo:bar", "example.com/ns/foo:bar", "//example.com/ns/foo:bar"}, // All values explicitly specified
}
func TestNewReference(t *testing.T) {
for _, c := range validReferenceTestCases {
parsed, err := reference.ParseNormalizedNamed(c.input)
require.NoError(t, err)
ref, err := NewReference(parsed)
require.NoError(t, err, c.input)
dockerRef, ok := ref.(dockerReference)
require.True(t, ok, c.input)
assert.Equal(t, c.dockerRef, dockerRef.ref.String(), c.input)
}
// Neither a tag nor digest
parsed, err := reference.ParseNormalizedNamed("busybox")
require.NoError(t, err)
_, err = NewReference(parsed)
assert.Error(t, err)
// A github.com/distribution/reference value can have a tag and a digest at the same time!
parsed, err = reference.ParseNormalizedNamed("busybox:notlatest" + sha256digest)
require.NoError(t, err)
_, ok := parsed.(reference.Canonical)
require.True(t, ok)
_, ok = parsed.(reference.NamedTagged)
require.True(t, ok)
_, err = NewReference(parsed)
assert.Error(t, err)
}
func TestReferenceTransport(t *testing.T) {
ref, err := ParseReference("//busybox")
require.NoError(t, err)
assert.Equal(t, Transport, ref.Transport())
}
func TestReferenceStringWithinTransport(t *testing.T) {
for _, c := range validReferenceTestCases {
ref, err := ParseReference("//" + c.input)
require.NoError(t, err, c.input)
stringRef := ref.StringWithinTransport()
assert.Equal(t, c.stringWithinTransport, stringRef, c.input)
// Do one more round to verify that the output can be parsed, to an equal value.
ref2, err := Transport.ParseReference(stringRef)
require.NoError(t, err, c.input)
stringRef2 := ref2.StringWithinTransport()
assert.Equal(t, stringRef, stringRef2, c.input)
}
}
func TestReferenceDockerReference(t *testing.T) {
for _, c := range validReferenceTestCases {
ref, err := ParseReference("//" + c.input)
require.NoError(t, err, c.input)
dockerRef := ref.DockerReference()
require.NotNil(t, dockerRef, c.input)
assert.Equal(t, c.dockerRef, dockerRef.String(), c.input)
}
}
func TestReferencePolicyConfigurationIdentity(t *testing.T) {
// Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference.
ref, err := ParseReference("//busybox")
require.NoError(t, err)
assert.Equal(t, "docker.io/library/busybox:latest", ref.PolicyConfigurationIdentity())
}
func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
// Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference.
ref, err := ParseReference("//busybox")
require.NoError(t, err)
assert.Equal(t, []string{
"docker.io/library/busybox",
"docker.io/library",
"docker.io",
}, ref.PolicyConfigurationNamespaces())
}
func TestReferenceNewImage(t *testing.T) {
ref, err := ParseReference("//busybox")
require.NoError(t, err)
img, err := ref.NewImage(&types.SystemContext{RegistriesDirPath: "/this/doesnt/exist"})
assert.NoError(t, err)
defer img.Close()
}
func TestReferenceNewImageSource(t *testing.T) {
ref, err := ParseReference("//busybox")
require.NoError(t, err)
src, err := ref.NewImageSource(&types.SystemContext{RegistriesDirPath: "/this/doesnt/exist"}, nil)
assert.NoError(t, err)
defer src.Close()
}
func TestReferenceNewImageDestination(t *testing.T) {
ref, err := ParseReference("//busybox")
require.NoError(t, err)
dest, err := ref.NewImageDestination(&types.SystemContext{RegistriesDirPath: "/this/doesnt/exist"})
assert.NoError(t, err)
defer dest.Close()
}
func TestReferenceTagOrDigest(t *testing.T) {
for input, expected := range map[string]string{
"//busybox:notlatest": "notlatest",
"//busybox" + sha256digest: "sha256:" + sha256digestHex,
} {
ref, err := ParseReference(input)
require.NoError(t, err, input)
dockerRef, ok := ref.(dockerReference)
require.True(t, ok, input)
tod, err := dockerRef.tagOrDigest()
require.NoError(t, err, input)
assert.Equal(t, expected, tod, input)
}
// Invalid input
ref, err := reference.ParseNormalizedNamed("busybox")
require.NoError(t, err)
dockerRef := dockerReference{ref: ref}
_, err = dockerRef.tagOrDigest()
assert.Error(t, err)
}

View file

@ -1,14 +0,0 @@
docker:
example.com:
sigstore: https://sigstore.example.com
registry.test.example.com:
sigstore: http://registry.test.example.com/sigstore
registry.test.example.com:8888:
sigstore: http://registry.test.example.com:8889/sigstore
sigstore-staging: https://registry.test.example.com:8889/sigstore/specialAPIserverWhichDoesntExist
localhost:
sigstore: file:///home/mitr/mydevelopment1
localhost:8080:
sigstore: file:///home/mitr/mydevelopment2
localhost/invalid/url/test:
sigstore: ":emptyscheme"

View file

@ -1,12 +0,0 @@
default-docker:
sigstore: file:///mnt/companywide/signatures/for/other/repositories
docker:
docker.io/contoso:
sigstore: https://sigstore.contoso.com/fordocker
docker.io/centos:
sigstore: https://sigstore.centos.org/
docker.io/centos/mybetaprooduct:
sigstore: http://localhost:9999/mybetaWIP/sigstore
sigstore-staging: file:///srv/mybetaWIP/sigstore
docker.io/centos/mybetaproduct:latest:
sigstore: https://sigstore.centos.org/

View file

@ -1,278 +0,0 @@
package docker
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"testing"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func dockerRefFromString(t *testing.T, s string) dockerReference {
ref, err := ParseReference(s)
require.NoError(t, err, s)
dockerRef, ok := ref.(dockerReference)
require.True(t, ok, s)
return dockerRef
}
func TestConfiguredSignatureStorageBase(t *testing.T) {
// Error reading configuration directory (/dev/null is not a directory)
_, err := configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: "/dev/null"},
dockerRefFromString(t, "//busybox"), false)
assert.Error(t, err)
// No match found
emptyDir, err := ioutil.TempDir("", "empty-dir")
require.NoError(t, err)
defer os.RemoveAll(emptyDir)
base, err := configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: emptyDir},
dockerRefFromString(t, "//this/is/not/in/the:configuration"), false)
assert.NoError(t, err)
assert.Nil(t, base)
// Invalid URL
_, err = configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: "fixtures/registries.d"},
dockerRefFromString(t, "//localhost/invalid/url/test"), false)
assert.Error(t, err)
// Success
base, err = configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: "fixtures/registries.d"},
dockerRefFromString(t, "//example.com/my/project"), false)
assert.NoError(t, err)
require.NotNil(t, base)
assert.Equal(t, "https://sigstore.example.com/my/project", (*url.URL)(base).String())
}
func TestRegistriesDirPath(t *testing.T) {
const nondefaultPath = "/this/is/not/the/default/registries.d"
const variableReference = "$HOME"
const rootPrefix = "/root/prefix"
for _, c := range []struct {
ctx *types.SystemContext
expected string
}{
// The common case
{nil, systemRegistriesDirPath},
// There is a context, but it does not override the path.
{&types.SystemContext{}, systemRegistriesDirPath},
// Path overridden
{&types.SystemContext{RegistriesDirPath: nondefaultPath}, nondefaultPath},
// Root overridden
{
&types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix},
filepath.Join(rootPrefix, systemRegistriesDirPath),
},
// Root and path overrides present simultaneously,
{
&types.SystemContext{
RootForImplicitAbsolutePaths: rootPrefix,
RegistriesDirPath: nondefaultPath,
},
nondefaultPath,
},
// No environment expansion happens in the overridden paths
{&types.SystemContext{RegistriesDirPath: variableReference}, variableReference},
} {
path := registriesDirPath(c.ctx)
assert.Equal(t, c.expected, path)
}
}
func TestLoadAndMergeConfig(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "merge-config")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
// No registries.d exists
config, err := loadAndMergeConfig(filepath.Join(tmpDir, "thisdoesnotexist"))
require.NoError(t, err)
assert.Equal(t, &registryConfiguration{Docker: map[string]registryNamespace{}}, config)
// Empty registries.d directory
emptyDir := filepath.Join(tmpDir, "empty")
err = os.Mkdir(emptyDir, 0755)
require.NoError(t, err)
config, err = loadAndMergeConfig(emptyDir)
require.NoError(t, err)
assert.Equal(t, &registryConfiguration{Docker: map[string]registryNamespace{}}, config)
// Unreadable registries.d directory
unreadableDir := filepath.Join(tmpDir, "unreadable")
err = os.Mkdir(unreadableDir, 0000)
require.NoError(t, err)
config, err = loadAndMergeConfig(unreadableDir)
assert.Error(t, err)
// An unreadable file in a registries.d directory
unreadableFileDir := filepath.Join(tmpDir, "unreadableFile")
err = os.Mkdir(unreadableFileDir, 0755)
require.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(unreadableFileDir, "0.yaml"), []byte("{}"), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(unreadableFileDir, "1.yaml"), nil, 0000)
require.NoError(t, err)
config, err = loadAndMergeConfig(unreadableFileDir)
assert.Error(t, err)
// Invalid YAML
invalidYAMLDir := filepath.Join(tmpDir, "invalidYAML")
err = os.Mkdir(invalidYAMLDir, 0755)
require.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(invalidYAMLDir, "0.yaml"), []byte("}"), 0644)
require.NoError(t, err)
config, err = loadAndMergeConfig(invalidYAMLDir)
assert.Error(t, err)
// Duplicate DefaultDocker
duplicateDefault := filepath.Join(tmpDir, "duplicateDefault")
err = os.Mkdir(duplicateDefault, 0755)
require.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(duplicateDefault, "0.yaml"),
[]byte("default-docker:\n sigstore: file:////tmp/something"), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(duplicateDefault, "1.yaml"),
[]byte("default-docker:\n sigstore: file:////tmp/different"), 0644)
require.NoError(t, err)
config, err = loadAndMergeConfig(duplicateDefault)
require.Error(t, err)
assert.Contains(t, err.Error(), "0.yaml")
assert.Contains(t, err.Error(), "1.yaml")
// Duplicate DefaultDocker
duplicateNS := filepath.Join(tmpDir, "duplicateNS")
err = os.Mkdir(duplicateNS, 0755)
require.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(duplicateNS, "0.yaml"),
[]byte("docker:\n example.com:\n sigstore: file:////tmp/something"), 0644)
require.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(duplicateNS, "1.yaml"),
[]byte("docker:\n example.com:\n sigstore: file:////tmp/different"), 0644)
require.NoError(t, err)
config, err = loadAndMergeConfig(duplicateNS)
assert.Error(t, err)
assert.Contains(t, err.Error(), "0.yaml")
assert.Contains(t, err.Error(), "1.yaml")
// A fully worked example, including an empty-dictionary file and a non-.yaml file
config, err = loadAndMergeConfig("fixtures/registries.d")
require.NoError(t, err)
assert.Equal(t, &registryConfiguration{
DefaultDocker: &registryNamespace{SigStore: "file:///mnt/companywide/signatures/for/other/repositories"},
Docker: map[string]registryNamespace{
"example.com": {SigStore: "https://sigstore.example.com"},
"registry.test.example.com": {SigStore: "http://registry.test.example.com/sigstore"},
"registry.test.example.com:8888": {SigStore: "http://registry.test.example.com:8889/sigstore", SigStoreStaging: "https://registry.test.example.com:8889/sigstore/specialAPIserverWhichDoesntExist"},
"localhost": {SigStore: "file:///home/mitr/mydevelopment1"},
"localhost:8080": {SigStore: "file:///home/mitr/mydevelopment2"},
"localhost/invalid/url/test": {SigStore: ":emptyscheme"},
"docker.io/contoso": {SigStore: "https://sigstore.contoso.com/fordocker"},
"docker.io/centos": {SigStore: "https://sigstore.centos.org/"},
"docker.io/centos/mybetaprooduct": {
SigStore: "http://localhost:9999/mybetaWIP/sigstore",
SigStoreStaging: "file:///srv/mybetaWIP/sigstore",
},
"docker.io/centos/mybetaproduct:latest": {SigStore: "https://sigstore.centos.org/"},
},
}, config)
}
func TestRegistryConfigurationSignaureTopLevel(t *testing.T) {
config := registryConfiguration{
DefaultDocker: &registryNamespace{SigStore: "=default", SigStoreStaging: "=default+w"},
Docker: map[string]registryNamespace{},
}
for _, ns := range []string{
"localhost",
"localhost:5000",
"example.com",
"example.com/ns1",
"example.com/ns1/ns2",
"example.com/ns1/ns2/repo",
"example.com/ns1/ns2/repo:notlatest",
} {
config.Docker[ns] = registryNamespace{SigStore: ns, SigStoreStaging: ns + "+w"}
}
for _, c := range []struct{ input, expected string }{
{"example.com/ns1/ns2/repo:notlatest", "example.com/ns1/ns2/repo:notlatest"},
{"example.com/ns1/ns2/repo:unmatched", "example.com/ns1/ns2/repo"},
{"example.com/ns1/ns2/notrepo:notlatest", "example.com/ns1/ns2"},
{"example.com/ns1/notns2/repo:notlatest", "example.com/ns1"},
{"example.com/notns1/ns2/repo:notlatest", "example.com"},
{"unknown.example.com/busybox", "=default"},
{"localhost:5000/busybox", "localhost:5000"},
{"localhost/busybox", "localhost"},
{"localhost:9999/busybox", "=default"},
} {
dr := dockerRefFromString(t, "//"+c.input)
res := config.signatureTopLevel(dr, false)
assert.Equal(t, c.expected, res, c.input)
res = config.signatureTopLevel(dr, true) // test that forWriting is correctly propagated
assert.Equal(t, c.expected+"+w", res, c.input)
}
config = registryConfiguration{
Docker: map[string]registryNamespace{
"unmatched": {SigStore: "a", SigStoreStaging: "b"},
},
}
dr := dockerRefFromString(t, "//thisisnotmatched")
res := config.signatureTopLevel(dr, false)
assert.Equal(t, "", res)
res = config.signatureTopLevel(dr, true)
assert.Equal(t, "", res)
}
func TestRegistryNamespaceSignatureTopLevel(t *testing.T) {
for _, c := range []struct {
ns registryNamespace
forWriting bool
expected string
}{
{registryNamespace{SigStoreStaging: "a", SigStore: "b"}, true, "a"},
{registryNamespace{SigStoreStaging: "a", SigStore: "b"}, false, "b"},
{registryNamespace{SigStore: "b"}, true, "b"},
{registryNamespace{SigStore: "b"}, false, "b"},
{registryNamespace{SigStoreStaging: "a"}, true, "a"},
{registryNamespace{SigStoreStaging: "a"}, false, ""},
{registryNamespace{}, true, ""},
{registryNamespace{}, false, ""},
} {
res := c.ns.signatureTopLevel(c.forWriting)
assert.Equal(t, c.expected, res, fmt.Sprintf("%#v %v", c.ns, c.forWriting))
}
}
func TestSignatureStorageBaseSignatureStorageURL(t *testing.T) {
const mdInput = "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
const mdMapped = "sha256=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
assert.True(t, signatureStorageURL(nil, mdInput, 0) == nil)
for _, c := range []struct {
base string
index int
expected string
}{
{"file:///tmp", 0, "file:///tmp@" + mdMapped + "/signature-1"},
{"file:///tmp", 1, "file:///tmp@" + mdMapped + "/signature-2"},
{"https://localhost:5555/root", 0, "https://localhost:5555/root@" + mdMapped + "/signature-1"},
{"https://localhost:5555/root", 1, "https://localhost:5555/root@" + mdMapped + "/signature-2"},
{"http://localhost:5555/root", 0, "http://localhost:5555/root@" + mdMapped + "/signature-1"},
{"http://localhost:5555/root", 1, "http://localhost:5555/root@" + mdMapped + "/signature-2"},
} {
url, err := url.Parse(c.base)
require.NoError(t, err)
expectedURL, err := url.Parse(c.expected)
require.NoError(t, err)
res := signatureStorageURL(url, mdInput, c.index)
assert.Equal(t, expectedURL, res, c.expected)
}
}

View file

@ -1,79 +0,0 @@
package policyconfiguration
import (
"fmt"
"strings"
"testing"
"github.com/containers/image/docker/reference"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDockerReference tests DockerReferenceIdentity and DockerReferenceNamespaces simulatenously
// to ensure they are consistent.
func TestDockerReference(t *testing.T) {
sha256Digest := "@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
// Test both that DockerReferenceIdentity returns the expected value (fullName+suffix),
// and that DockerReferenceNamespaces starts with the expected value (fullName), i.e. that the two functions are
// consistent.
for inputName, expectedNS := range map[string][]string{
"example.com/ns/repo": {"example.com/ns/repo", "example.com/ns", "example.com"},
"example.com/repo": {"example.com/repo", "example.com"},
"localhost/ns/repo": {"localhost/ns/repo", "localhost/ns", "localhost"},
// Note that "localhost" is special here: notlocalhost/repo is parsed as docker.io/notlocalhost.repo:
"localhost/repo": {"localhost/repo", "localhost"},
"notlocalhost/repo": {"docker.io/notlocalhost/repo", "docker.io/notlocalhost", "docker.io"},
"docker.io/ns/repo": {"docker.io/ns/repo", "docker.io/ns", "docker.io"},
"docker.io/library/repo": {"docker.io/library/repo", "docker.io/library", "docker.io"},
"docker.io/repo": {"docker.io/library/repo", "docker.io/library", "docker.io"},
"ns/repo": {"docker.io/ns/repo", "docker.io/ns", "docker.io"},
"library/repo": {"docker.io/library/repo", "docker.io/library", "docker.io"},
"repo": {"docker.io/library/repo", "docker.io/library", "docker.io"},
} {
for inputSuffix, mappedSuffix := range map[string]string{
":tag": ":tag",
sha256Digest: sha256Digest,
} {
fullInput := inputName + inputSuffix
ref, err := reference.ParseNormalizedNamed(fullInput)
require.NoError(t, err, fullInput)
identity, err := DockerReferenceIdentity(ref)
require.NoError(t, err, fullInput)
assert.Equal(t, expectedNS[0]+mappedSuffix, identity, fullInput)
ns := DockerReferenceNamespaces(ref)
require.NotNil(t, ns, fullInput)
require.Len(t, ns, len(expectedNS), fullInput)
moreSpecific := identity
for i := range expectedNS {
assert.Equal(t, ns[i], expectedNS[i], fmt.Sprintf("%s item %d", fullInput, i))
assert.True(t, strings.HasPrefix(moreSpecific, ns[i]))
moreSpecific = ns[i]
}
}
}
}
func TestDockerReferenceIdentity(t *testing.T) {
// TestDockerReference above has tested the core of the functionality, this tests only the failure cases.
// Neither a tag nor digest
parsed, err := reference.ParseNormalizedNamed("busybox")
require.NoError(t, err)
id, err := DockerReferenceIdentity(parsed)
assert.Equal(t, "", id)
assert.Error(t, err)
// A github.com/distribution/reference value can have a tag and a digest at the same time!
parsed, err = reference.ParseNormalizedNamed("busybox:notlatest@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")
require.NoError(t, err)
_, ok := parsed.(reference.Canonical)
require.True(t, ok)
_, ok = parsed.(reference.NamedTagged)
require.True(t, ok)
id, err = DockerReferenceIdentity(parsed)
assert.Equal(t, "", id)
assert.Error(t, err)
}

View file

@ -1,573 +0,0 @@
package reference
import (
"strconv"
"testing"
"github.com/opencontainers/go-digest"
)
func TestValidateReferenceName(t *testing.T) {
validRepoNames := []string{
"docker/docker",
"library/debian",
"debian",
"docker.io/docker/docker",
"docker.io/library/debian",
"docker.io/debian",
"index.docker.io/docker/docker",
"index.docker.io/library/debian",
"index.docker.io/debian",
"127.0.0.1:5000/docker/docker",
"127.0.0.1:5000/library/debian",
"127.0.0.1:5000/debian",
"thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev",
// This test case was moved from invalid to valid since it is valid input
// when specified with a hostname, it removes the ambiguity from about
// whether the value is an identifier or repository name
"docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a",
}
invalidRepoNames := []string{
"https://github.com/docker/docker",
"docker/Docker",
"-docker",
"-docker/docker",
"-docker.io/docker/docker",
"docker///docker",
"docker.io/docker/Docker",
"docker.io/docker///docker",
"1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a",
}
for _, name := range invalidRepoNames {
_, err := ParseNormalizedNamed(name)
if err == nil {
t.Fatalf("Expected invalid repo name for %q", name)
}
}
for _, name := range validRepoNames {
_, err := ParseNormalizedNamed(name)
if err != nil {
t.Fatalf("Error parsing repo name %s, got: %q", name, err)
}
}
}
func TestValidateRemoteName(t *testing.T) {
validRepositoryNames := []string{
// Sanity check.
"docker/docker",
// Allow 64-character non-hexadecimal names (hexadecimal names are forbidden).
"thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev",
// Allow embedded hyphens.
"docker-rules/docker",
// Allow multiple hyphens as well.
"docker---rules/docker",
//Username doc and image name docker being tested.
"doc/docker",
// single character names are now allowed.
"d/docker",
"jess/t",
// Consecutive underscores.
"dock__er/docker",
}
for _, repositoryName := range validRepositoryNames {
_, err := ParseNormalizedNamed(repositoryName)
if err != nil {
t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err)
}
}
invalidRepositoryNames := []string{
// Disallow capital letters.
"docker/Docker",
// Only allow one slash.
"docker///docker",
// Disallow 64-character hexadecimal.
"1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a",
// Disallow leading and trailing hyphens in namespace.
"-docker/docker",
"docker-/docker",
"-docker-/docker",
// Don't allow underscores everywhere (as opposed to hyphens).
"____/____",
"_docker/_docker",
// Disallow consecutive periods.
"dock..er/docker",
"dock_.er/docker",
"dock-.er/docker",
// No repository.
"docker/",
//namespace too long
"this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker",
}
for _, repositoryName := range invalidRepositoryNames {
if _, err := ParseNormalizedNamed(repositoryName); err == nil {
t.Errorf("Repository name should be invalid: %v", repositoryName)
}
}
}
func TestParseRepositoryInfo(t *testing.T) {
type tcase struct {
RemoteName, FamiliarName, FullName, AmbiguousName, Domain string
}
tcases := []tcase{
{
RemoteName: "fooo/bar",
FamiliarName: "fooo/bar",
FullName: "docker.io/fooo/bar",
AmbiguousName: "index.docker.io/fooo/bar",
Domain: "docker.io",
},
{
RemoteName: "library/ubuntu",
FamiliarName: "ubuntu",
FullName: "docker.io/library/ubuntu",
AmbiguousName: "library/ubuntu",
Domain: "docker.io",
},
{
RemoteName: "nonlibrary/ubuntu",
FamiliarName: "nonlibrary/ubuntu",
FullName: "docker.io/nonlibrary/ubuntu",
AmbiguousName: "",
Domain: "docker.io",
},
{
RemoteName: "other/library",
FamiliarName: "other/library",
FullName: "docker.io/other/library",
AmbiguousName: "",
Domain: "docker.io",
},
{
RemoteName: "private/moonbase",
FamiliarName: "127.0.0.1:8000/private/moonbase",
FullName: "127.0.0.1:8000/private/moonbase",
AmbiguousName: "",
Domain: "127.0.0.1:8000",
},
{
RemoteName: "privatebase",
FamiliarName: "127.0.0.1:8000/privatebase",
FullName: "127.0.0.1:8000/privatebase",
AmbiguousName: "",
Domain: "127.0.0.1:8000",
},
{
RemoteName: "private/moonbase",
FamiliarName: "example.com/private/moonbase",
FullName: "example.com/private/moonbase",
AmbiguousName: "",
Domain: "example.com",
},
{
RemoteName: "privatebase",
FamiliarName: "example.com/privatebase",
FullName: "example.com/privatebase",
AmbiguousName: "",
Domain: "example.com",
},
{
RemoteName: "private/moonbase",
FamiliarName: "example.com:8000/private/moonbase",
FullName: "example.com:8000/private/moonbase",
AmbiguousName: "",
Domain: "example.com:8000",
},
{
RemoteName: "privatebasee",
FamiliarName: "example.com:8000/privatebasee",
FullName: "example.com:8000/privatebasee",
AmbiguousName: "",
Domain: "example.com:8000",
},
{
RemoteName: "library/ubuntu-12.04-base",
FamiliarName: "ubuntu-12.04-base",
FullName: "docker.io/library/ubuntu-12.04-base",
AmbiguousName: "index.docker.io/library/ubuntu-12.04-base",
Domain: "docker.io",
},
{
RemoteName: "library/foo",
FamiliarName: "foo",
FullName: "docker.io/library/foo",
AmbiguousName: "docker.io/foo",
Domain: "docker.io",
},
{
RemoteName: "library/foo/bar",
FamiliarName: "library/foo/bar",
FullName: "docker.io/library/foo/bar",
AmbiguousName: "",
Domain: "docker.io",
},
{
RemoteName: "store/foo/bar",
FamiliarName: "store/foo/bar",
FullName: "docker.io/store/foo/bar",
AmbiguousName: "",
Domain: "docker.io",
},
}
for _, tcase := range tcases {
refStrings := []string{tcase.FamiliarName, tcase.FullName}
if tcase.AmbiguousName != "" {
refStrings = append(refStrings, tcase.AmbiguousName)
}
var refs []Named
for _, r := range refStrings {
named, err := ParseNormalizedNamed(r)
if err != nil {
t.Fatal(err)
}
refs = append(refs, named)
}
for _, r := range refs {
if expected, actual := tcase.FamiliarName, FamiliarName(r); expected != actual {
t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual)
}
if expected, actual := tcase.FullName, r.String(); expected != actual {
t.Fatalf("Invalid canonical reference for %q. Expected %q, got %q", r, expected, actual)
}
if expected, actual := tcase.Domain, Domain(r); expected != actual {
t.Fatalf("Invalid domain for %q. Expected %q, got %q", r, expected, actual)
}
if expected, actual := tcase.RemoteName, Path(r); expected != actual {
t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual)
}
}
}
}
func TestParseReferenceWithTagAndDigest(t *testing.T) {
shortRef := "busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"
ref, err := ParseNormalizedNamed(shortRef)
if err != nil {
t.Fatal(err)
}
if expected, actual := "docker.io/library/"+shortRef, ref.String(); actual != expected {
t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual)
}
if _, isTagged := ref.(NamedTagged); !isTagged {
t.Fatalf("Reference from %q should support tag", ref)
}
if _, isCanonical := ref.(Canonical); !isCanonical {
t.Fatalf("Reference from %q should support digest", ref)
}
if expected, actual := shortRef, FamiliarString(ref); actual != expected {
t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual)
}
}
func TestInvalidReferenceComponents(t *testing.T) {
if _, err := ParseNormalizedNamed("-foo"); err == nil {
t.Fatal("Expected WithName to detect invalid name")
}
ref, err := ParseNormalizedNamed("busybox")
if err != nil {
t.Fatal(err)
}
if _, err := WithTag(ref, "-foo"); err == nil {
t.Fatal("Expected WithName to detect invalid tag")
}
if _, err := WithDigest(ref, digest.Digest("foo")); err == nil {
t.Fatal("Expected WithDigest to detect invalid digest")
}
}
func equalReference(r1, r2 Reference) bool {
switch v1 := r1.(type) {
case digestReference:
if v2, ok := r2.(digestReference); ok {
return v1 == v2
}
case repository:
if v2, ok := r2.(repository); ok {
return v1 == v2
}
case taggedReference:
if v2, ok := r2.(taggedReference); ok {
return v1 == v2
}
case canonicalReference:
if v2, ok := r2.(canonicalReference); ok {
return v1 == v2
}
case reference:
if v2, ok := r2.(reference); ok {
return v1 == v2
}
}
return false
}
func TestParseAnyReference(t *testing.T) {
tcases := []struct {
Reference string
Equivalent string
Expected Reference
}{
{
Reference: "redis",
Equivalent: "docker.io/library/redis",
},
{
Reference: "redis:latest",
Equivalent: "docker.io/library/redis:latest",
},
{
Reference: "docker.io/library/redis:latest",
Equivalent: "docker.io/library/redis:latest",
},
{
Reference: "redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
},
{
Reference: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
},
{
Reference: "dmcgowan/myapp",
Equivalent: "docker.io/dmcgowan/myapp",
},
{
Reference: "dmcgowan/myapp:latest",
Equivalent: "docker.io/dmcgowan/myapp:latest",
},
{
Reference: "docker.io/mcgowan/myapp:latest",
Equivalent: "docker.io/mcgowan/myapp:latest",
},
{
Reference: "dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
},
{
Reference: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
},
{
Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
},
{
Reference: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"),
Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c",
},
{
Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9",
Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9",
},
}
for _, tcase := range tcases {
var ref Reference
var err error
ref, err = ParseAnyReference(tcase.Reference)
if err != nil {
t.Fatalf("Error parsing reference %s: %v", tcase.Reference, err)
}
if ref.String() != tcase.Equivalent {
t.Fatalf("Unexpected string: %s, expected %s", ref.String(), tcase.Equivalent)
}
expected := tcase.Expected
if expected == nil {
expected, err = Parse(tcase.Equivalent)
if err != nil {
t.Fatalf("Error parsing reference %s: %v", tcase.Equivalent, err)
}
}
if !equalReference(ref, expected) {
t.Errorf("Unexpected reference %#v, expected %#v", ref, expected)
}
}
}
func TestNormalizedSplitHostname(t *testing.T) {
testcases := []struct {
input string
domain string
name string
}{
{
input: "test.com/foo",
domain: "test.com",
name: "foo",
},
{
input: "test_com/foo",
domain: "docker.io",
name: "test_com/foo",
},
{
input: "docker/migrator",
domain: "docker.io",
name: "docker/migrator",
},
{
input: "test.com:8080/foo",
domain: "test.com:8080",
name: "foo",
},
{
input: "test-com:8080/foo",
domain: "test-com:8080",
name: "foo",
},
{
input: "foo",
domain: "docker.io",
name: "library/foo",
},
{
input: "xn--n3h.com/foo",
domain: "xn--n3h.com",
name: "foo",
},
{
input: "xn--n3h.com:18080/foo",
domain: "xn--n3h.com:18080",
name: "foo",
},
{
input: "docker.io/foo",
domain: "docker.io",
name: "library/foo",
},
{
input: "docker.io/library/foo",
domain: "docker.io",
name: "library/foo",
},
{
input: "docker.io/library/foo/bar",
domain: "docker.io",
name: "library/foo/bar",
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
t.Fail()
}
named, err := ParseNormalizedNamed(testcase.input)
if err != nil {
failf("error parsing name: %s", err)
}
domain, name := SplitHostname(named)
if domain != testcase.domain {
failf("unexpected domain: got %q, expected %q", domain, testcase.domain)
}
if name != testcase.name {
failf("unexpected name: got %q, expected %q", name, testcase.name)
}
}
}
func TestMatchError(t *testing.T) {
named, err := ParseAnyReference("foo")
if err != nil {
t.Fatal(err)
}
_, err = FamiliarMatch("[-x]", named)
if err == nil {
t.Fatalf("expected an error, got nothing")
}
}
func TestMatch(t *testing.T) {
matchCases := []struct {
reference string
pattern string
expected bool
}{
{
reference: "foo",
pattern: "foo/**/ba[rz]",
expected: false,
},
{
reference: "foo/any/bat",
pattern: "foo/**/ba[rz]",
expected: false,
},
{
reference: "foo/a/bar",
pattern: "foo/**/ba[rz]",
expected: true,
},
{
reference: "foo/b/baz",
pattern: "foo/**/ba[rz]",
expected: true,
},
{
reference: "foo/c/baz:tag",
pattern: "foo/**/ba[rz]",
expected: true,
},
{
reference: "foo/c/baz:tag",
pattern: "foo/*/baz:tag",
expected: true,
},
{
reference: "foo/c/baz:tag",
pattern: "foo/c/baz:tag",
expected: true,
},
{
reference: "example.com/foo/c/baz:tag",
pattern: "*/foo/c/baz",
expected: true,
},
{
reference: "example.com/foo/c/baz:tag",
pattern: "example.com/foo/c/baz",
expected: true,
},
}
for _, c := range matchCases {
named, err := ParseAnyReference(c.reference)
if err != nil {
t.Fatal(err)
}
actual, err := FamiliarMatch(c.pattern, named)
if err != nil {
t.Fatal(err)
}
if actual != c.expected {
t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual)
}
}
}

View file

@ -1,659 +0,0 @@
package reference
import (
_ "crypto/sha256"
_ "crypto/sha512"
"encoding/json"
"strconv"
"strings"
"testing"
"github.com/opencontainers/go-digest"
)
func TestReferenceParse(t *testing.T) {
// referenceTestcases is a unified set of testcases for
// testing the parsing of references
referenceTestcases := []struct {
// input is the repository name or name component testcase
input string
// err is the error expected from Parse, or nil
err error
// repository is the string representation for the reference
repository string
// domain is the domain expected in the reference
domain string
// tag is the tag for the reference
tag string
// digest is the digest for the reference (enforces digest reference)
digest string
}{
{
input: "test_com",
repository: "test_com",
},
{
input: "test.com:tag",
repository: "test.com",
tag: "tag",
},
{
input: "test.com:5000",
repository: "test.com",
tag: "5000",
},
{
input: "test.com/repo:tag",
domain: "test.com",
repository: "test.com/repo",
tag: "tag",
},
{
input: "test:5000/repo",
domain: "test:5000",
repository: "test:5000/repo",
},
{
input: "test:5000/repo:tag",
domain: "test:5000",
repository: "test:5000/repo",
tag: "tag",
},
{
input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
domain: "test:5000",
repository: "test:5000/repo",
digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
},
{
input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
domain: "test:5000",
repository: "test:5000/repo",
tag: "tag",
digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
},
{
input: "test:5000/repo",
domain: "test:5000",
repository: "test:5000/repo",
},
{
input: "",
err: ErrNameEmpty,
},
{
input: ":justtag",
err: ErrReferenceInvalidFormat,
},
{
input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
err: ErrReferenceInvalidFormat,
},
{
input: "repo@sha256:ffffffffffffffffffffffffffffffffff",
err: digest.ErrDigestInvalidLength,
},
{
input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
err: digest.ErrDigestUnsupported,
},
{
input: "Uppercase:tag",
err: ErrNameContainsUppercase,
},
// FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes.
// See https://github.com/docker/distribution/pull/1778, and https://github.com/docker/docker/pull/20175
//{
// input: "Uppercase/lowercase:tag",
// err: ErrNameContainsUppercase,
//},
{
input: "test:5000/Uppercase/lowercase:tag",
err: ErrNameContainsUppercase,
},
{
input: "lowercase:Uppercase",
repository: "lowercase",
tag: "Uppercase",
},
{
input: strings.Repeat("a/", 128) + "a:tag",
err: ErrNameTooLong,
},
{
input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max",
domain: "a",
repository: strings.Repeat("a/", 127) + "a",
tag: "tag-puts-this-over-max",
},
{
input: "aa/asdf$$^/aa",
err: ErrReferenceInvalidFormat,
},
{
input: "sub-dom1.foo.com/bar/baz/quux",
domain: "sub-dom1.foo.com",
repository: "sub-dom1.foo.com/bar/baz/quux",
},
{
input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag",
domain: "sub-dom1.foo.com",
repository: "sub-dom1.foo.com/bar/baz/quux",
tag: "some-long-tag",
},
{
input: "b.gcr.io/test.example.com/my-app:test.example.com",
domain: "b.gcr.io",
repository: "b.gcr.io/test.example.com/my-app",
tag: "test.example.com",
},
{
input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode
domain: "xn--n3h.com",
repository: "xn--n3h.com/myimage",
tag: "xn--n3h.com",
},
{
input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode
domain: "xn--7o8h.com",
repository: "xn--7o8h.com/myimage",
tag: "xn--7o8h.com",
digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
},
{
input: "foo_bar.com:8080",
repository: "foo_bar.com",
tag: "8080",
},
{
input: "foo/foo_bar.com:8080",
domain: "foo",
repository: "foo/foo_bar.com",
tag: "8080",
},
}
for _, testcase := range referenceTestcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
t.Fail()
}
repo, err := Parse(testcase.input)
if testcase.err != nil {
if err == nil {
failf("missing expected error: %v", testcase.err)
} else if testcase.err != err {
failf("mismatched error: got %v, expected %v", err, testcase.err)
}
continue
} else if err != nil {
failf("unexpected parse error: %v", err)
continue
}
if repo.String() != testcase.input {
failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input)
}
if named, ok := repo.(Named); ok {
if named.Name() != testcase.repository {
failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository)
}
domain, _ := SplitHostname(named)
if domain != testcase.domain {
failf("unexpected domain: got %q, expected %q", domain, testcase.domain)
}
} else if testcase.repository != "" || testcase.domain != "" {
failf("expected named type, got %T", repo)
}
tagged, ok := repo.(Tagged)
if testcase.tag != "" {
if ok {
if tagged.Tag() != testcase.tag {
failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag)
}
} else {
failf("expected tagged type, got %T", repo)
}
} else if ok {
failf("unexpected tagged type")
}
digested, ok := repo.(Digested)
if testcase.digest != "" {
if ok {
if digested.Digest().String() != testcase.digest {
failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest)
}
} else {
failf("expected digested type, got %T", repo)
}
} else if ok {
failf("unexpected digested type")
}
}
}
// TestWithNameFailure tests cases where WithName should fail. Cases where it
// should succeed are covered by TestSplitHostname, below.
func TestWithNameFailure(t *testing.T) {
testcases := []struct {
input string
err error
}{
{
input: "",
err: ErrNameEmpty,
},
{
input: ":justtag",
err: ErrReferenceInvalidFormat,
},
{
input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
err: ErrReferenceInvalidFormat,
},
{
input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
err: ErrReferenceInvalidFormat,
},
{
input: strings.Repeat("a/", 128) + "a:tag",
err: ErrNameTooLong,
},
{
input: "aa/asdf$$^/aa",
err: ErrReferenceInvalidFormat,
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
t.Fail()
}
_, err := WithName(testcase.input)
if err == nil {
failf("no error parsing name. expected: %s", testcase.err)
}
}
}
func TestSplitHostname(t *testing.T) {
testcases := []struct {
input string
domain string
name string
}{
{
input: "test.com/foo",
domain: "test.com",
name: "foo",
},
{
input: "test_com/foo",
domain: "",
name: "test_com/foo",
},
{
input: "test:8080/foo",
domain: "test:8080",
name: "foo",
},
{
input: "test.com:8080/foo",
domain: "test.com:8080",
name: "foo",
},
{
input: "test-com:8080/foo",
domain: "test-com:8080",
name: "foo",
},
{
input: "xn--n3h.com:18080/foo",
domain: "xn--n3h.com:18080",
name: "foo",
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
t.Fail()
}
named, err := WithName(testcase.input)
if err != nil {
failf("error parsing name: %s", err)
}
domain, name := SplitHostname(named)
if domain != testcase.domain {
failf("unexpected domain: got %q, expected %q", domain, testcase.domain)
}
if name != testcase.name {
failf("unexpected name: got %q, expected %q", name, testcase.name)
}
}
}
type serializationType struct {
Description string
Field Field
}
func TestSerialization(t *testing.T) {
testcases := []struct {
description string
input string
name string
tag string
digest string
err error
}{
{
description: "empty value",
err: ErrNameEmpty,
},
{
description: "just a name",
input: "example.com:8000/named",
name: "example.com:8000/named",
},
{
description: "name with a tag",
input: "example.com:8000/named:tagged",
name: "example.com:8000/named",
tag: "tagged",
},
{
description: "name with digest",
input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112",
name: "other.com/named",
digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112",
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
t.Fail()
}
m := map[string]string{
"Description": testcase.description,
"Field": testcase.input,
}
b, err := json.Marshal(m)
if err != nil {
failf("error marshalling: %v", err)
}
t := serializationType{}
if err := json.Unmarshal(b, &t); err != nil {
if testcase.err == nil {
failf("error unmarshalling: %v", err)
}
if err != testcase.err {
failf("wrong error, expected %v, got %v", testcase.err, err)
}
continue
} else if testcase.err != nil {
failf("expected error unmarshalling: %v", testcase.err)
}
if t.Description != testcase.description {
failf("wrong description, expected %q, got %q", testcase.description, t.Description)
}
ref := t.Field.Reference()
if named, ok := ref.(Named); ok {
if named.Name() != testcase.name {
failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name)
}
} else if testcase.name != "" {
failf("expected named type, got %T", ref)
}
tagged, ok := ref.(Tagged)
if testcase.tag != "" {
if ok {
if tagged.Tag() != testcase.tag {
failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag)
}
} else {
failf("expected tagged type, got %T", ref)
}
} else if ok {
failf("unexpected tagged type")
}
digested, ok := ref.(Digested)
if testcase.digest != "" {
if ok {
if digested.Digest().String() != testcase.digest {
failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest)
}
} else {
failf("expected digested type, got %T", ref)
}
} else if ok {
failf("unexpected digested type")
}
t = serializationType{
Description: testcase.description,
Field: AsField(ref),
}
b2, err := json.Marshal(t)
if err != nil {
failf("error marshing serialization type: %v", err)
}
if string(b) != string(b2) {
failf("unexpected serialized value: expected %q, got %q", string(b), string(b2))
}
// Ensure t.Field is not implementing "Reference" directly, getting
// around the Reference type system
var fieldInterface interface{} = t.Field
if _, ok := fieldInterface.(Reference); ok {
failf("field should not implement Reference interface")
}
}
}
func TestWithTag(t *testing.T) {
testcases := []struct {
name string
digest digest.Digest
tag string
combined string
}{
{
name: "test.com/foo",
tag: "tag",
combined: "test.com/foo:tag",
},
{
name: "foo",
tag: "tag2",
combined: "foo:tag2",
},
{
name: "test.com:8000/foo",
tag: "tag4",
combined: "test.com:8000/foo:tag4",
},
{
name: "test.com:8000/foo",
tag: "TAG5",
combined: "test.com:8000/foo:TAG5",
},
{
name: "test.com:8000/foo",
digest: "sha256:1234567890098765432112345667890098765",
tag: "TAG5",
combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765",
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.name)+": "+format, v...)
t.Fail()
}
named, err := WithName(testcase.name)
if err != nil {
failf("error parsing name: %s", err)
}
if testcase.digest != "" {
canonical, err := WithDigest(named, testcase.digest)
if err != nil {
failf("error adding digest")
}
named = canonical
}
tagged, err := WithTag(named, testcase.tag)
if err != nil {
failf("WithTag failed: %s", err)
}
if tagged.String() != testcase.combined {
failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined)
}
}
}
func TestWithDigest(t *testing.T) {
testcases := []struct {
name string
digest digest.Digest
tag string
combined string
}{
{
name: "test.com/foo",
digest: "sha256:1234567890098765432112345667890098765",
combined: "test.com/foo@sha256:1234567890098765432112345667890098765",
},
{
name: "foo",
digest: "sha256:1234567890098765432112345667890098765",
combined: "foo@sha256:1234567890098765432112345667890098765",
},
{
name: "test.com:8000/foo",
digest: "sha256:1234567890098765432112345667890098765",
combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765",
},
{
name: "test.com:8000/foo",
digest: "sha256:1234567890098765432112345667890098765",
tag: "latest",
combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765",
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.name)+": "+format, v...)
t.Fail()
}
named, err := WithName(testcase.name)
if err != nil {
failf("error parsing name: %s", err)
}
if testcase.tag != "" {
tagged, err := WithTag(named, testcase.tag)
if err != nil {
failf("error adding tag")
}
named = tagged
}
digested, err := WithDigest(named, testcase.digest)
if err != nil {
failf("WithDigest failed: %s", err)
}
if digested.String() != testcase.combined {
failf("unexpected: got %q, expected %q", digested.String(), testcase.combined)
}
}
}
func TestParseNamed(t *testing.T) {
testcases := []struct {
input string
domain string
name string
err error
}{
{
input: "test.com/foo",
domain: "test.com",
name: "foo",
},
{
input: "test:8080/foo",
domain: "test:8080",
name: "foo",
},
{
input: "test_com/foo",
err: ErrNameNotCanonical,
},
{
input: "test.com",
err: ErrNameNotCanonical,
},
{
input: "foo",
err: ErrNameNotCanonical,
},
{
input: "library/foo",
err: ErrNameNotCanonical,
},
{
input: "docker.io/library/foo",
domain: "docker.io",
name: "library/foo",
},
// Ambiguous case, parser will add "library/" to foo
{
input: "docker.io/foo",
err: ErrNameNotCanonical,
},
}
for _, testcase := range testcases {
failf := func(format string, v ...interface{}) {
t.Logf(strconv.Quote(testcase.input)+": "+format, v...)
t.Fail()
}
named, err := ParseNamed(testcase.input)
if err != nil && testcase.err == nil {
failf("error parsing name: %s", err)
continue
} else if err == nil && testcase.err != nil {
failf("parsing succeded: expected error %v", testcase.err)
continue
} else if err != testcase.err {
failf("unexpected error %v, expected %v", err, testcase.err)
continue
} else if err != nil {
continue
}
domain, name := SplitHostname(named)
if domain != testcase.domain {
failf("unexpected domain: got %q, expected %q", domain, testcase.domain)
}
if name != testcase.name {
failf("unexpected name: got %q, expected %q", name, testcase.name)
}
}
}

View file

@ -1,553 +0,0 @@
package reference
import (
"regexp"
"strings"
"testing"
)
type regexpMatch struct {
input string
match bool
subs []string
}
func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) {
matches := r.FindStringSubmatch(m.input)
if m.match && matches != nil {
if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input {
t.Fatalf("Bad match result %#v for %q", matches, m.input)
}
if len(matches) < (len(m.subs) + 1) {
t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input)
}
for i := range m.subs {
if m.subs[i] != matches[i+1] {
t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input)
}
}
} else if m.match {
t.Errorf("Expected match for %q", m.input)
} else if matches != nil {
t.Errorf("Unexpected match for %q", m.input)
}
}
func TestDomainRegexp(t *testing.T) {
hostcases := []regexpMatch{
{
input: "test.com",
match: true,
},
{
input: "test.com:10304",
match: true,
},
{
input: "test.com:http",
match: false,
},
{
input: "localhost",
match: true,
},
{
input: "localhost:8080",
match: true,
},
{
input: "a",
match: true,
},
{
input: "a.b",
match: true,
},
{
input: "ab.cd.com",
match: true,
},
{
input: "a-b.com",
match: true,
},
{
input: "-ab.com",
match: false,
},
{
input: "ab-.com",
match: false,
},
{
input: "ab.c-om",
match: true,
},
{
input: "ab.-com",
match: false,
},
{
input: "ab.com-",
match: false,
},
{
input: "0101.com",
match: true, // TODO(dmcgowan): valid if this should be allowed
},
{
input: "001a.com",
match: true,
},
{
input: "b.gbc.io:443",
match: true,
},
{
input: "b.gbc.io",
match: true,
},
{
input: "xn--n3h.com", // ☃.com in punycode
match: true,
},
{
input: "Asdf.com", // uppercase character
match: true,
},
}
r := regexp.MustCompile(`^` + domainRegexp.String() + `$`)
for i := range hostcases {
checkRegexp(t, r, hostcases[i])
}
}
func TestFullNameRegexp(t *testing.T) {
if anchoredNameRegexp.NumSubexp() != 2 {
t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2",
anchoredNameRegexp, anchoredNameRegexp.NumSubexp())
}
testcases := []regexpMatch{
{
input: "",
match: false,
},
{
input: "short",
match: true,
subs: []string{"", "short"},
},
{
input: "simple/name",
match: true,
subs: []string{"simple", "name"},
},
{
input: "library/ubuntu",
match: true,
subs: []string{"library", "ubuntu"},
},
{
input: "docker/stevvooe/app",
match: true,
subs: []string{"docker", "stevvooe/app"},
},
{
input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb",
match: true,
subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"},
},
{
input: "aa/aa/bb/bb/bb",
match: true,
subs: []string{"aa", "aa/bb/bb/bb"},
},
{
input: "a/a/a/a",
match: true,
subs: []string{"a", "a/a/a"},
},
{
input: "a/a/a/a/",
match: false,
},
{
input: "a//a/a",
match: false,
},
{
input: "a",
match: true,
subs: []string{"", "a"},
},
{
input: "a/aa",
match: true,
subs: []string{"a", "aa"},
},
{
input: "a/aa/a",
match: true,
subs: []string{"a", "aa/a"},
},
{
input: "foo.com",
match: true,
subs: []string{"", "foo.com"},
},
{
input: "foo.com/",
match: false,
},
{
input: "foo.com:8080/bar",
match: true,
subs: []string{"foo.com:8080", "bar"},
},
{
input: "foo.com:http/bar",
match: false,
},
{
input: "foo.com/bar",
match: true,
subs: []string{"foo.com", "bar"},
},
{
input: "foo.com/bar/baz",
match: true,
subs: []string{"foo.com", "bar/baz"},
},
{
input: "localhost:8080/bar",
match: true,
subs: []string{"localhost:8080", "bar"},
},
{
input: "sub-dom1.foo.com/bar/baz/quux",
match: true,
subs: []string{"sub-dom1.foo.com", "bar/baz/quux"},
},
{
input: "blog.foo.com/bar/baz",
match: true,
subs: []string{"blog.foo.com", "bar/baz"},
},
{
input: "a^a",
match: false,
},
{
input: "aa/asdf$$^/aa",
match: false,
},
{
input: "asdf$$^/aa",
match: false,
},
{
input: "aa-a/a",
match: true,
subs: []string{"aa-a", "a"},
},
{
input: strings.Repeat("a/", 128) + "a",
match: true,
subs: []string{"a", strings.Repeat("a/", 127) + "a"},
},
{
input: "a-/a/a/a",
match: false,
},
{
input: "foo.com/a-/a/a",
match: false,
},
{
input: "-foo/bar",
match: false,
},
{
input: "foo/bar-",
match: false,
},
{
input: "foo-/bar",
match: false,
},
{
input: "foo/-bar",
match: false,
},
{
input: "_foo/bar",
match: false,
},
{
input: "foo_bar",
match: true,
subs: []string{"", "foo_bar"},
},
{
input: "foo_bar.com",
match: true,
subs: []string{"", "foo_bar.com"},
},
{
input: "foo_bar.com:8080",
match: false,
},
{
input: "foo_bar.com:8080/app",
match: false,
},
{
input: "foo.com/foo_bar",
match: true,
subs: []string{"foo.com", "foo_bar"},
},
{
input: "____/____",
match: false,
},
{
input: "_docker/_docker",
match: false,
},
{
input: "docker_/docker_",
match: false,
},
{
input: "b.gcr.io/test.example.com/my-app",
match: true,
subs: []string{"b.gcr.io", "test.example.com/my-app"},
},
{
input: "xn--n3h.com/myimage", // ☃.com in punycode
match: true,
subs: []string{"xn--n3h.com", "myimage"},
},
{
input: "xn--7o8h.com/myimage", // 🐳.com in punycode
match: true,
subs: []string{"xn--7o8h.com", "myimage"},
},
{
input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode
match: true,
subs: []string{"example.com", "xn--7o8h.com/myimage"},
},
{
input: "example.com/some_separator__underscore/myimage",
match: true,
subs: []string{"example.com", "some_separator__underscore/myimage"},
},
{
input: "example.com/__underscore/myimage",
match: false,
},
{
input: "example.com/..dots/myimage",
match: false,
},
{
input: "example.com/.dots/myimage",
match: false,
},
{
input: "example.com/nodouble..dots/myimage",
match: false,
},
{
input: "example.com/nodouble..dots/myimage",
match: false,
},
{
input: "docker./docker",
match: false,
},
{
input: ".docker/docker",
match: false,
},
{
input: "docker-/docker",
match: false,
},
{
input: "-docker/docker",
match: false,
},
{
input: "do..cker/docker",
match: false,
},
{
input: "do__cker:8080/docker",
match: false,
},
{
input: "do__cker/docker",
match: true,
subs: []string{"", "do__cker/docker"},
},
{
input: "b.gcr.io/test.example.com/my-app",
match: true,
subs: []string{"b.gcr.io", "test.example.com/my-app"},
},
{
input: "registry.io/foo/project--id.module--name.ver---sion--name",
match: true,
subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"},
},
{
input: "Asdf.com/foo/bar", // uppercase character in hostname
match: true,
},
{
input: "Foo/FarB", // uppercase characters in remote name
match: false,
},
}
for i := range testcases {
checkRegexp(t, anchoredNameRegexp, testcases[i])
}
}
func TestReferenceRegexp(t *testing.T) {
if ReferenceRegexp.NumSubexp() != 3 {
t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3",
ReferenceRegexp, ReferenceRegexp.NumSubexp())
}
testcases := []regexpMatch{
{
input: "registry.com:8080/myapp:tag",
match: true,
subs: []string{"registry.com:8080/myapp", "tag", ""},
},
{
input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
match: true,
subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
},
{
input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
match: true,
subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
},
{
input: "registry.com:8080/myapp@sha256:badbadbadbad",
match: false,
},
{
input: "registry.com:8080/myapp:invalid~tag",
match: false,
},
{
input: "bad_hostname.com:8080/myapp:tag",
match: false,
},
{
input:// localhost treated as name, missing tag with 8080 as tag
"localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
match: true,
subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
},
{
input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
match: true,
subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
},
{
input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
match: false,
},
{
// localhost will be treated as an image name without a host
input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912",
match: true,
subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"},
},
{
input: "registry.com:8080/myapp@bad",
match: false,
},
{
input: "registry.com:8080/myapp@2bad",
match: false, // TODO(dmcgowan): Support this as valid
},
}
for i := range testcases {
checkRegexp(t, ReferenceRegexp, testcases[i])
}
}
func TestIdentifierRegexp(t *testing.T) {
fullCases := []regexpMatch{
{
input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821",
match: true,
},
{
input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C",
match: false,
},
{
input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf",
match: false,
},
{
input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821",
match: false,
},
{
input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482",
match: false,
},
}
shortCases := []regexpMatch{
{
input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821",
match: true,
},
{
input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C",
match: false,
},
{
input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf",
match: true,
},
{
input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821",
match: false,
},
{
input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482",
match: false,
},
{
input: "da304",
match: false,
},
{
input: "da304e",
match: true,
},
}
for i := range fullCases {
checkRegexp(t, anchoredIdentifierRegexp, fullCases[i])
}
for i := range shortCases {
checkRegexp(t, anchoredShortIdentifierRegexp, shortCases[i])
}
}

View file

@ -1,45 +0,0 @@
package docker
import (
"testing"
"github.com/stretchr/testify/assert"
)
// This is just a smoke test for the common expected header formats,
// by no means comprehensive.
func TestParseValueAndParams(t *testing.T) {
for _, c := range []struct {
input string
scope string
params map[string]string
}{
{
`Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/busybox:pull"`,
"bearer",
map[string]string{
"realm": "https://auth.docker.io/token",
"service": "registry.docker.io",
"scope": "repository:library/busybox:pull",
},
},
{
`Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/busybox:pull,push"`,
"bearer",
map[string]string{
"realm": "https://auth.docker.io/token",
"service": "registry.docker.io",
"scope": "repository:library/busybox:pull,push",
},
},
{
`Bearer realm="http://127.0.0.1:5000/openshift/token"`,
"bearer",
map[string]string{"realm": "http://127.0.0.1:5000/openshift/token"},
},
} {
scope, params := parseValueAndParams(c.input)
assert.Equal(t, c.scope, scope, c.input)
assert.Equal(t, c.params, params, c.input)
}
}

View file

@ -1,66 +0,0 @@
{
"title": "JSON embedded in an atomic container signature",
"description": "This schema is a supplement to atomic-signature.md in this directory.\n\nConsumers of the JSON MUST use the processing rules documented in atomic-signature.md, especially the requirements for the 'critical' subjobject.\n\nWhenever this schema and atomic-signature.md, or the github.com/containers/image/signature implementation, differ,\nit is the atomic-signature.md document, or the github.com/containers/image/signature implementation, which governs.\n\nUsers are STRONGLY RECOMMENDED to use the github.com/containeres/image/signature implementation instead of writing\ntheir own, ESPECIALLY when consuming signatures, so that the policy.json format can be shared by all image consumers.\n",
"type": "object",
"required": [
"critical",
"optional"
],
"additionalProperties": false,
"properties": {
"critical": {
"type": "object",
"required": [
"type",
"image",
"identity"
],
"additionalProperties": false,
"properties": {
"type": {
"type": "string",
"enum": [
"atomic container signature"
]
},
"image": {
"type": "object",
"required": [
"docker-manifest-digest"
],
"additionalProperties": false,
"properties": {
"docker-manifest-digest": {
"type": "string"
}
}
},
"identity": {
"type": "object",
"required": [
"docker-reference"
],
"additionalProperties": false,
"properties": {
"docker-reference": {
"type": "string"
}
}
}
}
},
"optional": {
"type": "object",
"description": "All members are optional, but if they are included, they must be valid.",
"additionalProperties": true,
"properties": {
"creator": {
"type": "string"
},
"timestamp": {
"type": "integer"
}
}
}
}
}

View file

@ -1,241 +0,0 @@
% atomic-signature(5) Atomic signature format
% Miloslav Trmač
% March 2017
# Atomic signature format
This document describes the format of “atomic” container signatures,
as implemented by the `github.com/containers/image/signature` package.
Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package
(preferably through the higher-level `signature.PolicyContext` interface)
without having to care about the details of the format described below.
This documentation exists primarily for maintainers of the package
and to allow independent reimplementations.
## High-level overview
The signature provides an end-to-end authenticated claim that a container image
has been approved by a specific party (e.g. the creator of the image as their work,
an automated build system as a result of an automated build,
a company IT department approving the image for production) under a specified _identity_
(e.g. an OS base image / specific application, with a specific version).
An atomic container signature consists of a cryptographic signature which identifies
and authenticates who signed the image, and carries as a signed payload a JSON document.
The JSON document identifies the image being signed, claims a specific identity of the
image and if applicable, contains other information about the image.
The signatures do not modify the container image (the layers, configuration, manifest, …);
e.g. their presence does not change the manifest digest used to identify the image in
docker/distribution servers; rather, the signatures are associated with an immutable image.
An image can have any number of signatures so signature distribution systems SHOULD support
associating more than one signature with an image.
## The cryptographic signature
As distributed, the atomic container signature is a blob which contains a cryptographic signature
in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the
JSON document and a signature of the JSON document; it is not a “detached signature” with
independent blobs containing the JSON document and a cryptographic signature).
Currently the only defined cryptographic signature format is an OpenPGP signature (RFC 4880),
but others may be added in the future. (The blob does not contain metadata identifying the
cryptographic signature format. It is expected that most formats are sufficiently self-describing
that this is not necessary and the configured expected public key provides another indication
of the expected cryptographic signature format. Such metadata may be added in the future for
newly added cryptographic signature formats, if necessary.)
Consumers of atomic container signatures SHOULD verify the cryptographic signature
against one or more trusted public keys
(e.g. defined in a [policy.json signature verification policy file](policy.json.md))
before parsing or processing the JSON payload in _any_ way,
in particular they SHOULD stop processing the container signature
if the cryptographic signature verification fails, without even starting to process the JSON payload.
(Consumers MAY extract identification of the signing key and other metadata from the cryptographic signature,
and the JSON payload, without verifying the signature, if the purpose is to allow managing the signature blobs,
e.g. to list the authors and image identities of signatures associated with a single container image;
if so, they SHOULD design the output of such processing to minimize the risk of users considering the output trusted
or in any way usable for making policy decisions about the image.)
### OpenPGP signature verification
When verifying a cryptographic signature in the OpenPGP format,
the consumer MUST verify at least the following aspects of the signature
(like the `github.com/containers/image/signature` package does):
- The blob MUST be a “Signed Message” as defined RFC 4880 section 11.3.
(e.g. it MUST NOT be an unsigned “Literal Message”, or any other non-signature format).
- The signature MUST have been made by an expected key trusted for the purpose (and the specific container image).
- The signature MUST be correctly formed and pass the cryptographic validation.
- The signature MUST correctly authenticate the included JSON payload
(in particular, the parsing of the JSON payload MUST NOT start before the complete payload has been cryptographically authenticated).
- The signature MUST NOT be expired.
The consumer SHOULD have tests for its verification code which verify that signatures failing any of the above are rejected.
## JSON processing and forward compatibility
The payload of the cryptographic signature is a JSON document (RFC 7159).
Consumers SHOULD parse it very strictly,
refusing any signature which violates the expected format (e.g. missing members, incorrect member types)
or can be interpreted ambiguously (e.g. a duplicated member in a JSON object).
Any violations of the JSON format or of other requirements in this document MAY be accepted if the JSON document can be recognized
to have been created by a known-incorrect implementation (see [`optional.creator`](#optionalcreator) below)
and if the semantics of the invalid document, as created by such an implementation, is clear.
The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`,
each a JSON object.
The `critical` object MUST contain a `type` member identifying the document as an atomic container signature
(as defined [below](#criticaltype))
and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value.
To ensure forward compatibility (allowing older signature consumers to correctly
accept or reject signatures created at a later date, with possible extensions to this format),
consumers MUST reject the signature if the `critical` object, or _any_ of its subobjects,
contain _any_ member or data value which is unrecognized, unsupported, invalid, or in any other way unexpected.
At a minimum, this includes unrecognized members in a JSON object, or incorrect types of expected members.
For the same reason, consumers SHOULD accept any members with unrecognized names in the `optional` object,
and MAY accept signatures where the object member is recognized but unsupported, or the value of the member is unsupported.
Consumers still SHOULD reject signatures where a member of an `optional` object is supported but the value is recognized as invalid.
## JSON data format
An example of the full format follows, with detailed description below.
To reiterate, consumers of the signature SHOULD perform successful cryptographic verification,
and MUST reject unexpected data in the `critical` object, or in the top-level object, as described above.
```json
{
"critical": {
"type": "atomic container signature",
"image": {
"docker-manifest-digest": "sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e"
},
"identity": {
"docker-reference": "docker.io/library/busybox:latest"
}
},
"optional": {
"creator": "some software package v1.0.1-35",
"timestamp": 1483228800,
}
}
```
### `critical`
This MUST be a JSON object which contains data critical to correctly evaluating the validity of a signature.
Consumers MUST reject any signature where the `critical` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
### `critical.type`
This MUST be a string with a string value exactly equal to `atomic container signature` (three words, including the spaces).
Signature consumers MUST reject signatures which do not have this member or this member does not have exactly the expected value.
(The consumers MAY support signatures with a different value of the `type` member, if any is defined in the future;
if so, the rest of the JSON document is interpreted according to rules defining that value of `critical.type`,
not by this document.)
### `critical.image`
This MUST be a JSON object which identifies the container image this signature applies to.
Consumers MUST reject any signature where the `critical.image` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
(Currently only the `docker-manifest-digest` way of identifying a container image is defined;
alternatives to this may be defined in the future,
but existing consumers are required to reject signatures which use formats they do not support.)
### `critical.image.docker-manifest-digest`
This MUST be a JSON string, in the `github.com/opencontainers/go-digest.Digest` string format.
The value of this member MUST match the manifest of the signed container image, as implemented in the docker/distribution manifest addressing system.
The consumer of the signature SHOULD verify the manifest digest against a fully verified signature before processing the contents of the image manifest in any other way
(e.g. parsing the manifest further or downloading layers of the image).
Implementation notes:
* A single container image manifest may have several valid manifest digest values, using different algorithms.
* For “signed” [docker/distribution schema 1](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md) manifests,
the manifest digest applies to the payload of the JSON web signature, not to the raw manifest blob.
### `critical.identity`
This MUST be a JSON object which identifies the claimed identity of the image (usually the purpose of the image, or the application, along with a version information),
as asserted by the author of the signature.
Consumers MUST reject any signature where the `critical.identity` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
(Currently only the `docker-reference` way of claiming an image identity/purpose is defined;
alternatives to this may be defined in the future,
but existing consumers are required to reject signatures which use formats they do not support.)
### `critical.identity.docker-reference`
This MUST be a JSON string, in the `github.com/docker/distribution/reference` string format,
and using the same normalization semantics (where e.g. `busybox:latest` is equivalent to `docker.io/library/busybox:latest`).
If the normalization semantics allows multiple string representations of the claimed identity with equivalent meaning,
the `critical.identity.docker-reference` member SHOULD use the fully explicit form (including the full host name and namespaces).
The value of this member MUST match the image identity/purpose expected by the consumer of the image signature and the image
(again, accounting for the `docker/distribution/reference` normalization semantics).
In the most common case, this means that the `critical.identity.docker-reference` value must be equal to the docker/distribution reference used to refer to or download the image.
However, depending on the specific application, users or system administrators may accept less specific matches
(e.g. ignoring the tag value in the signature when pulling the `:latest` tag or when referencing an image by digest),
or they may require `critical.identity.docker-reference` values with a completely different namespace to the reference used to refer to/download the image
(e.g. requiring a `critical.identity.docker-reference` value which identifies the image as coming from a supplier when fetching it from a company-internal mirror of approved images).
The software performing this verification SHOULD allow the users to define such a policy using the [policy.json signature verification policy file format](policy.json.md).
The `critical.identity.docker-reference` value SHOULD contain either a tag or digest;
in most cases, it SHOULD use a tag rather than a digest. (See also the default [`matchRepoDigestOrExact` matching semantics in `policy.json`](policy.json.md#signedby).)
### `optional`
This MUST be a JSON object.
Consumers SHOULD accept any members with unrecognized names in the `optional` object,
and MAY accept a signature where the object member is recognized but unsupported, or the value of the member is valid but unsupported.
Consumers still SHOULD reject any signature where a member of an `optional` object is supported but the value is recognized as invalid.
### `optional.creator`
If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature.
The contents of this string is not defined in detail; however each implementation creating atomic container signatures:
- SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number)
- SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number)
changes whenever the format or semantics of the generated signature changes in any way;
it SHOULD not be possible for two implementations which use a different format or semantics to have the same `optional.creator` value
- SHOULD use a format which is reasonably easy to parse in software (perhaps using a regexp),
and which makes it easy enough to recognize a range of versions of a specific implementation
(e.g. the version of the implementation SHOULD NOT be only a git hash, because they dont have an easily defined ordering;
the string should contain a version number, or at least a date of the commit).
Consumers of atomic container signatures MAY recognize specific values or sets of values of `optional.creator`
(perhaps augmented with `optional.timestamp`),
and MAY change their processing of the signature based on these values
(usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively),
as long as the semantics of the invalid document, as created by such an implementation, is clear.
If consumers of signatures do change their behavior based on the `optional.creator` value,
they SHOULD take care that the way they process the signatures is not inconsistent with
strictly validating signature consumers.
(I.e. it is acceptable for a consumer to accept a signature based on a specific `optional.creator` value
if other implementations would completely reject the signature,
but it would be very undesirable for the two kinds of implementations to accept the signature in different
and inconsistent situations.)
### `optional.timestamp`
If present, this MUST be a JSON number, which is representable as a 64-bit integer, and identifies the time when the signature was created
as the number of seconds since the UNIX epoch (Jan 1 1970 00:00 UTC).

View file

@ -1,267 +0,0 @@
% POLICY.JSON(5) policy.json Man Page
% Miloslav Trmač
% September 2016
# Signature verification policy file format
Signature verification policy files are used to specify policy, e.g. trusted keys,
applicable when deciding whether to accept an image, or individual signatures of that image, as valid.
The default policy is stored (unless overridden at compile-time) at `/etc/containers/policy.json`;
applications performing verification may allow using a different policy instead.
## Overall structure
The signature verification policy file, usually called `policy.json`,
uses a JSON format. Unlike some other JSON files, its parsing is fairly strict:
unrecognized, duplicated or otherwise invalid fields cause the entire file,
and usually the entire operation, to be rejected.
The purpose of the policy file is to define a set of *policy requirements* for a container image,
usually depending on its location (where it is being pulled from) or otherwise defined identity.
Policy requirements can be defined for:
- An individual *scope* in a *transport*.
The *transport* values are the same as the transport prefixes when pushing/pulling images (e.g. `docker:`, `atomic:`),
and *scope* values are defined by each transport; see below for more details.
Usually, a scope can be defined to match a single image, and various prefixes of
such a most specific scope define namespaces of matching images.
- A default policy for a single transport, expressed using an empty string as a scope
- A global default policy.
If multiple policy requirements match a given image, only the requirements from the most specific match apply,
the more general policy requirements definitions are ignored.
This is expressed in JSON using the top-level syntax
```js
{
"default": [/* policy requirements: global default */]
"transports": {
transport_name: {
"": [/* policy requirements: default for transport $transport_name */],
scope_1: [/* policy requirements: default for $scope_1 in $transport_name */],
scope_2: [/*…*/]
/*…*/
},
transport_name_2: {/*…*/}
/*…*/
}
}
```
The global `default` set of policy requirements is mandatory; all of the other fields
(`transports` itself, any specific transport, the transport-specific default, etc.) are optional.
<!-- NOTE: Keep this in sync with transports/transports.go! -->
## Supported transports and their scopes
### `atomic:`
The `atomic:` transport refers to images in an Atomic Registry.
Supported scopes use the form _hostname_[`:`_port_][`/`_namespace_[`/`_imagestream_ [`:`_tag_]]],
i.e. either specifying a complete name of a tagged image, or prefix denoting
a host/namespace/image stream.
*Note:* The _hostname_ and _port_ refer to the Docker registry host and port (the one used
e.g. for `docker pull`), _not_ to the OpenShift API host and port.
### `dir:`
The `dir:` transport refers to images stored in local directories.
Supported scopes are paths of directories (either containing a single image or
subdirectories possibly containing images).
*Note:* The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored.
The top-level scope `"/"` is forbidden; use the transport default scope `""`,
for consistency with other transports.
### `docker:`
The `docker:` transport refers to images in a registry implementing the "Docker Registry HTTP API V2".
Scopes matching individual images are named Docker references *in the fully expanded form*, either
using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`).
More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest),
a repository namespace, or a registry host (by only specifying the host name).
### `oci:`
The `oci:` transport refers to images in directories compliant with "Open Container Image Layout Specification".
Supported scopes use the form _directory_`:`_tag_, and _directory_ referring to
a directory containing one or more tags, or any of the parent directories.
*Note:* See `dir:` above for semantics and restrictions on the directory paths, they apply to `oci:` equivalently.
## Policy Requirements
Using the mechanisms above, a set of policy requirements is looked up. The policy requirements
are represented as a JSON array of individual requirement objects. For an image to be accepted,
*all* of the requirements must be satisfied simulatenously.
The policy requirements can also be used to decide whether an individual signature is accepted (= is signed by a recognized key of a known author);
in that case some requirements may apply only to some signatures, but each signature must be accepted by *at least one* requirement object.
The following requirement objects are supported:
### `insecureAcceptAnything`
A simple requirement with the following syntax
```json
{"type":"insecureAcceptAnything"}
```
This requirement accepts any image (but note that other requirements in the array still apply).
When deciding to accept an individual signature, this requirement does not have any effect; it does *not* cause the signature to be accepted, though.
This is useful primarily for policy scopes where no signature verification is required;
because the array of policy requirements must not be empty, this requirement is used
to represent the lack of requirements explicitly.
### `reject`
A simple requirement with the following syntax:
```json
{"type":"reject"}
```
This requirement rejects every image, and every signature.
### `signedBy`
This requirement requires an image to be signed with an expected identity, or accepts a signature if it is using an expected identity and key.
```js
{
"type": "signedBy",
"keyType": "GPGKeys", /* The only currently supported value */
"keyPath": "/path/to/local/keyring/file",
"keyData": "base64-encoded-keyring-data",
"signedIdentity": identity_requirement
}
```
<!-- Later: other keyType values -->
Exactly one of `keyPath` and `keyData` must be present, containing a GPG keyring of one or more public keys. Only signatures made by these keys are accepted.
The `signedIdentity` field, a JSON object, specifies what image identity the signature claims about the image.
One of the following alternatives are supported:
- The identity in the signature must exactly match the image identity. Note that with this, referencing an image by digest (with a signature claiming a _repository_`:`_tag_ identity) will fail.
```json
{"type":"matchExact"}
```
- If the image identity carries a tag, the identity in the signature must exactly match;
if the image identity uses a digest reference, the identity in the signature must be in the same repository as the image identity (using any tag).
(Note that with images identified using digest references, the digest from the reference is validated even before signature verification starts.)
```json
{"type":"matchRepoDigestOrExact"}
```
- The identity in the signature must be in the same repository as the image identity. This is useful e.g. to pull an image using the `:latest` tag when the image is signed with a tag specifing an exact image version.
```json
{"type":"matchRepository"}
```
- The identity in the signature must exactly match a specified identity.
This is useful e.g. when locally mirroring images signed using their public identity.
```js
{
"type": "exactReference",
"dockerReference": docker_reference_value
}
```
- The identity in the signature must be in the same repository as a specified identity.
This combines the properties of `matchRepository` and `exactReference`.
```js
{
"type": "exactRepository",
"dockerRepository": docker_repository_value
}
```
If the `signedIdentity` field is missing, it is treated as `matchRepoDigestOrExact`.
*Note*: `matchExact`, `matchRepoDigestOrExact` and `matchRepository` can be only used if a Docker-like image identity is
provided by the transport. In particular, the `dir:` and `oci:` transports can be only
used with `exactReference` or `exactRepository`.
<!-- ### `signedBaseLayer` -->
## Examples
It is *strongly* recommended to set the `default` policy to `reject`, and then
selectively allow individual transports and scopes as desired.
### A reasonably locked-down system
(Note that the `/*``*/` comments are not valid in JSON, and must not be used in real policies.)
```js
{
"default": [{"type": "reject"}], /* Reject anything not explicitly allowed */
"transports": {
"docker": {
/* Allow installing images from a specific repository namespace, without cryptographic verification.
This namespace includes images like openshift/hello-openshift and openshift/origin. */
"docker.io/openshift": [{"type": "insecureAcceptAnything"}],
/* Similarly, allow installing the “official” busybox images. Note how the fully expanded
form, with the explicit /library/, must be used. */
"docker.io/library/busybox": [{"type": "insecureAcceptAnything"}]
/* Other docker: images use the global default policy and are rejected */
},
"dir": {
"": [{"type": "insecureAcceptAnything"}] /* Allow any images originating in local directories */
},
"atomic": {
/* The common case: using a known key for a repository or set of repositories */
"hostname:5000/myns/official": [
{
"type": "signedBy",
"keyType": "GPGKeys",
"keyPath": "/path/to/official-pubkey.gpg"
}
],
/* A more complex example, for a repository which contains a mirror of a third-party product,
which must be signed-off by local IT */
"hostname:5000/vendor/product": [
{ /* Require the image to be signed by the original vendor, using the vendor's repository location. */
"type": "signedBy",
"keyType": "GPGKeys",
"keyPath": "/path/to/vendor-pubkey.gpg",
"signedIdentity": {
"type": "exactRepository",
"dockerRepository": "vendor-hostname/product/repository"
}
},
{ /* Require the image to _also_ be signed by a local reviewer. */
"type": "signedBy",
"keyType": "GPGKeys",
"keyPath": "/path/to/reviewer-pubkey.gpg"
}
]
}
}
}
```
### Completely disable security, allow all images, do not trust any signatures
```json
{
"default": [{"type": "insecureAcceptAnything"}]
}
```

View file

@ -1,124 +0,0 @@
% REGISTRIES.D(5) Registries.d Man Page
% Miloslav Trmač
% August 2016
# Registries Configuration Directory
The registries configuration directory contains configuration for various registries
(servers storing remote container images), and for content stored in them,
so that the configuration does not have to be provided in command-line options over and over for every command,
and so that it can be shared by all users of containers/image.
By default (unless overridden at compile-time), the registries configuration directory is `/etc/containers/registries.d`;
applications may allow using a different directory instead.
## Directory Structure
The directory may contain any number of files with the extension `.yaml`,
each using the YAML format. Other than the mandatory extension, names of the files
dont matter.
The contents of these files are merged together; to have a well-defined and easy to understand
behavior, there can be only one configuration section describing a single namespace within a registry
(in particular there can be at most one one `default-docker` section across all files,
and there can be at most one instance of any key under the the `docker` section;
these sections are documented later).
Thus, it is forbidden to have two conflicting configurations for a single registry or scope,
and it is also forbidden to split a configuration for a single registry or scope across
more than one file (even if they are not semantically in conflict).
## Registries, Scopes and Search Order
Each YAML file must contain a “YAML mapping” (key-value pairs). Two top-level keys are defined:
- `default-docker` is the _configuration section_ (as documented below)
for registries implementing "Docker Registry HTTP API V2".
This key is optional.
- `docker` is a mapping, using individual registries implementing "Docker Registry HTTP API V2",
or namespaces and individual images within these registries, as keys;
the value assigned to any such key is a _configuration section_.
This key is optional.
Scopes matching individual images are named Docker references *in the fully expanded form*, either
using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`).
More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest),
a repository namespace, or a registry host (and a port if it differs from the default).
Note that if a registry is accessed using a hostname+port configuration, the port-less hostname
is _not_ used as parent scope.
When searching for a configuration to apply for an individual container image, only
the configuration for the most-precisely matching scope is used; configuration using
more general scopes is ignored. For example, if _any_ configuration exists for
`docker.io/library/busybox`, the configuration for `docker.io` is ignored
(even if some element of the configuration is defined for `docker.io` and not for `docker.io/library/busybox`).
## Individual Configuration Sections
A single configuration section is selected for a container image using the process
described above. The configuration section is a YAML mapping, with the following keys:
- `sigstore-staging` defines an URL of of the signature storage, used for editing it (adding or deleting signatures).
This key is optional; if it is missing, `sigstore` below is used.
- `sigstore` defines an URL of the signature storage.
This URL is used for reading existing signatures,
and if `sigstore-staging` does not exist, also for adding or removing them.
This key is optional; if it is missing, no signature storage is defined (no signatures
are download along with images, adding new signatures is possible only if `sigstore-staging` is defined).
## Examples
### Using Containers from Various Origins
The following demonstrates how to to consume and run images from various registries and namespaces:
```yaml
docker:
registry.database-supplier.com:
sigstore: https://sigstore.database-supplier.com
distribution.great-middleware.org:
sigstore: https://security-team.great-middleware.org/sigstore
docker.io/web-framework:
sigstore: https://sigstore.web-framework.io:8080
```
### Developing and Signing Containers, Staging Signatures
For developers in `example.com`:
- Consume most container images using the public servers also used by clients.
- Use a separate sigure storage for an container images in a namespace corresponding to the developers' department, with a staging storage used before publishing signatures.
- Craft an individual exception for a single branch a specific developer is working on locally.
```yaml
docker:
registry.example.com:
sigstore: https://registry-sigstore.example.com
registry.example.com/mydepartment:
sigstore: https://sigstore.mydepartment.example.com
sigstore-staging: file:///mnt/mydepartment/sigstore-staging
registry.example.com/mydepartment/myproject:mybranch:
sigstore: http://localhost:4242/sigstore
sigstore-staging: file:///home/useraccount/webroot/sigstore
```
### A Global Default
If a company publishes its products using a different domain, and different registry hostname for each of them, it is still possible to use a single signature storage server
without listing each domain individually. This is expected to rarely happen, usually only for staging new signatures.
```yaml
default-docker:
sigstore-staging: file:///mnt/company/common-sigstore-staging
```
# AUTHORS
Miloslav Trmač <mitr@redhat.com>

View file

@ -1,136 +0,0 @@
# Signature access protocols
The `github.com/containers/image` library supports signatures implemented as blobs “attached to” an image.
Some image transports (local storage formats and remote procotocols) implement these signatures natively
or trivially; for others, the protocol extensions described below are necessary.
## docker/distribution registries—separate storage
### Usage
Any existing docker/distribution registry, whether or not it natively supports signatures,
can be augmented with separate signature storage by configuring a signature storage URL in [`registries.d`](registries.d.md).
`registries.d` can be configured to use one storage URL for a whole docker/distribution server,
or also separate URLs for smaller namespaces or individual repositories within the server
(which e.g. allows image authors to manage their own signature storage while publishing
the images on the public `docker.io` server).
The signature storage URL defines a root of a path hierarchy.
It can be either a `file:///…` URL, pointing to a local directory structure,
or a `http`/`https` URL, pointing to a remote server.
`file:///` signature storage can be both read and written, `http`/`https` only supports reading.
The same path hierarchy is used in both cases, so the HTTP/HTTPS server can be
a simple static web server serving a directory structure created by writing to a `file:///` signature storage.
(This of course does not prevent other server implementations,
e.g. a HTTP server reading signatures from a database.)
The usual workflow for producing and distributing images using the separate storage mechanism
is to configure the repository in `registries.d` with `sigstore-staging` URL pointing to a private
`file:///` staging area, and a `sigstore` URL pointing to a public web server.
To publish an image, the image author would sign the image as necessary (e.g. using `skopeo copy`),
and then copy the created directory structure from the `file:///` staging area
to a subdirectory of a webroot of the public web server so that they are accessible using the public `sigstore` URL.
The author would also instruct consumers of the image to, or provide a `registries.d` configuration file to,
set up a `sigstore` URL pointing to the public web server.
### Path structure
Given a _base_ signature storage URL configured in `registries.d` as mentioned above,
and a container image stored in a docker/distribution registry using the _fully-expanded_ name
_hostname_`/`_namespaces_`/`_name_{`@`_digest_,`:`_tag_} (e.g. for `docker.io/library/busybox:latest`,
_namespaces_ is `library`, even if the user refers to the image using the shorter syntax as `busybox:latest`),
signatures are accessed using URLs of the form
> _base_`/`_namespaces_`/`_name_`@`_digest-algo_`=`_digest-value_`/signature-`_index_
where _digest-algo_`:`_digest-value_ is a manifest digest usable for referencing the relevant image manifest
(i.e. even if the user referenced the image using a tag,
the signature storage is always disambiguated using digest references).
Note that in the URLs used for signatures,
_digest-algo_ and _digest-value_ are separated using the `=` character,
not `:` like when acessing the manifest using the docker/distribution API.
Within the URL, _index_ is a decimal integer (in the canonical form), starting with 1.
Signatures are stored at URLs with successive _index_ values; to read all of them, start with _index_=1,
and continue reading signatures and increasing _index_ as long as signatures with these _index_ values exist.
Similarly, to add one more signature to an image, find the first _index_ which does not exist, and
then store the new signature using that _index_ value.
There is no way to list existing signatures other than iterating through the successive _index_ values,
and no way to download all of the signatures at once.
### Examples
For a docker/distribution image available as `busybox@sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e`
(or as `busybox:latest` if the `latest` tag points to to a manifest with the same digest),
and with a `registries.d` configuration specifying a `sigstore` URL `https://example.com/sigstore` for the same image,
the following URLs would be accessed to download all signatures:
> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-1`
> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-2`
> - …
For a docker/distribution image available as `example.com/ns1/ns2/ns3/repo@somedigest:digestvalue` and the same
`sigstore` URL, the signatures would be available at
> `https://example.com/sigstore/ns1/ns2/ns3/repo@somedigest=digestvalue/signature-1`
and so on.
## (OpenShift) docker/distribution API extension
As of https://github.com/openshift/origin/pull/12504/ , the OpenShift-embedded registry also provides
an extension of the docker/distribution API which allows simpler access to the signatures,
using only the docker/distribution API endpoint.
This API is not inherently OpenShift-specific (e.g. the client does not need to know the OpenShift API endpoint,
and credentials sufficient to access the docker/distribution API server are sufficient to access signatures as well),
and it is the preferred way implement signature storage in registries.
See https://github.com/openshift/openshift-docs/pull/3556 for the upstream documentation of the API.
To read the signature, any user with access to an image can use the `/extensions/v2/…/signatures/…`
path to read an array of signatures. Use only the signature objects
which have `version` equal to `2`, `type` equal to `atomic`, and read the signature from `content`;
ignore the other fields of the signature object.
To add a single signature, `PUT` a new object with `version` set to `2`, `type` set to `atomic`,
and `content` set to the signature. Also set `name` to an unique name with the form
_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (also used in the URL),
and _per-image-name_ is any unique identifier.
To add more than one signature, add them one at a time. This API does not allow deleting signatures.
Note that because signatures are stored within the cluster-wide image objects,
i.e. different namespaces can not associate different sets of signatures to the same image,
updating signatures requires a cluster-wide access to the `imagesignatures` resource
(by default available to the `system:image-signer` role),
## OpenShift-embedded registries
The OpenShift-embedded registry implements the ordinary docker/distribution API,
and it also exposes images through the OpenShift REST API (available through the “API master” servers).
Note: OpenShift versions 1.5 and later support the above-described [docker/distribution API extension](#openshift-dockerdistribution-api-extension),
which is easier to set up and should usually be preferred.
Continue reading for details on using older versions of OpenShift.
As of https://github.com/openshift/origin/pull/9181,
signatures are exposed through the OpenShift API
(i.e. to access the complete image, it is necessary to use both APIs,
in particular to know the URLs for both the docker/distribution and the OpenShift API master endpoints).
To read the signature, any user with access to an image can use the `imagestreamimages` namespaced
resource to read an `Image` object and its `Signatures` array. Use only the `ImageSignature` objects
which have `Type` equal to `atomic`, and read the signature from `Content`; ignore the other fields of
the `ImageSignature` object.
To add or remove signatures, use the cluster-wide (non-namespaced) `imagesignatures` resource,
with `Type` set to `atomic` and `Content` set to the signature. Signature names must have the form
_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (OpenShift “image name”),
and _per-image-name_ is any unique identifier.
Note that because signatures are stored within the cluster-wide image objects,
i.e. different namespaces can not associate different sets of signatures to the same image,
updating signatures requires a cluster-wide access to the `imagesignatures` resource
(by default available to the `system:image-signer` role),
and deleting signatures is strongly discouraged
(it deletes the signature from all namespaces which contain the same image).

View file

@ -1,26 +0,0 @@
package image
import (
"io/ioutil"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func manifestSchema1FromFixture(t *testing.T, fixture string) genericManifest {
manifest, err := ioutil.ReadFile(filepath.Join("fixtures", fixture))
require.NoError(t, err)
m, err := manifestSchema1FromManifest(manifest)
require.NoError(t, err)
return m
}
func TestManifestSchema1ToOCIConfig(t *testing.T) {
m := manifestSchema1FromFixture(t, "schema1-to-oci-config.json")
configOCI, err := m.OCIConfig()
require.NoError(t, err)
assert.Equal(t, "/pause", configOCI.Config.Entrypoint[0])
}

View file

@ -1,525 +0,0 @@
package image
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"path/filepath"
"testing"
"time"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// unusedImageSource is used when we don't expect the ImageSource to be used in our tests.
type unusedImageSource struct{}
func (f unusedImageSource) Reference() types.ImageReference {
panic("Unexpected call to a mock function")
}
func (f unusedImageSource) Close() error {
panic("Unexpected call to a mock function")
}
func (f unusedImageSource) GetManifest() ([]byte, string, error) {
panic("Unexpected call to a mock function")
}
func (f unusedImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
panic("Unexpected call to a mock function")
}
func (f unusedImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
panic("Unexpected call to a mock function")
}
func (f unusedImageSource) GetSignatures() ([][]byte, error) {
panic("Unexpected call to a mock function")
}
func manifestSchema2FromFixture(t *testing.T, src types.ImageSource, fixture string) genericManifest {
manifest, err := ioutil.ReadFile(filepath.Join("fixtures", fixture))
require.NoError(t, err)
m, err := manifestSchema2FromManifest(src, manifest)
require.NoError(t, err)
return m
}
func manifestSchema2FromComponentsLikeFixture(configBlob []byte) genericManifest {
return manifestSchema2FromComponents(descriptor{
MediaType: "application/octet-stream",
Size: 5940,
Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f",
}, nil, configBlob, []descriptor{
{
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
Size: 51354364,
},
{
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
Size: 150,
},
{
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
Size: 11739507,
},
{
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
Size: 8841833,
},
{
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
Size: 291,
},
})
}
func TestManifestSchema2FromManifest(t *testing.T) {
// This just tests that the JSON can be loaded; we test that the parsed
// values are correctly returned in tests for the individual getter methods.
_ = manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json")
_, err := manifestSchema2FromManifest(nil, []byte{})
assert.Error(t, err)
}
func TestManifestSchema2FromComponents(t *testing.T) {
// This just smoke-tests that the manifest can be created; we test that the parsed
// values are correctly returned in tests for the individual getter methods.
_ = manifestSchema2FromComponentsLikeFixture(nil)
}
func TestManifestSchema2Serialize(t *testing.T) {
for _, m := range []genericManifest{
manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"),
manifestSchema2FromComponentsLikeFixture(nil),
} {
serialized, err := m.serialize()
require.NoError(t, err)
var contents map[string]interface{}
err = json.Unmarshal(serialized, &contents)
require.NoError(t, err)
original, err := ioutil.ReadFile("fixtures/schema2.json")
require.NoError(t, err)
var originalContents map[string]interface{}
err = json.Unmarshal(original, &originalContents)
require.NoError(t, err)
// We would ideally like to compare “serialized” with some transformation of
// “original”, but the ordering of fields in JSON maps is undefined, so this is
// easier.
assert.Equal(t, originalContents, contents)
}
}
func TestManifestSchema2ManifestMIMEType(t *testing.T) {
for _, m := range []genericManifest{
manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"),
manifestSchema2FromComponentsLikeFixture(nil),
} {
assert.Equal(t, manifest.DockerV2Schema2MediaType, m.manifestMIMEType())
}
}
func TestManifestSchema2ConfigInfo(t *testing.T) {
for _, m := range []genericManifest{
manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"),
manifestSchema2FromComponentsLikeFixture(nil),
} {
assert.Equal(t, types.BlobInfo{
Size: 5940,
Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f",
}, m.ConfigInfo())
}
}
// configBlobImageSource allows testing various GetBlob behaviors in .ConfigBlob()
type configBlobImageSource struct {
unusedImageSource // We inherit almost all of the methods, which just panic()
f func(digest digest.Digest) (io.ReadCloser, int64, error)
}
func (f configBlobImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
if info.Digest.String() != "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" {
panic("Unexpected digest in GetBlob")
}
return f.f(info.Digest)
}
func TestManifestSchema2ConfigBlob(t *testing.T) {
realConfigJSON, err := ioutil.ReadFile("fixtures/schema2-config.json")
require.NoError(t, err)
for _, c := range []struct {
cbISfn func(digest digest.Digest) (io.ReadCloser, int64, error)
blob []byte
}{
// Success
{func(digest digest.Digest) (io.ReadCloser, int64, error) {
return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil
}, realConfigJSON},
// Various kinds of failures
{nil, nil},
{func(digest digest.Digest) (io.ReadCloser, int64, error) {
return nil, -1, errors.New("Error returned from GetBlob")
}, nil},
{func(digest digest.Digest) (io.ReadCloser, int64, error) {
reader, writer := io.Pipe()
writer.CloseWithError(errors.New("Expected error reading input in ConfigBlob"))
return reader, 1, nil
}, nil},
{func(digest digest.Digest) (io.ReadCloser, int64, error) {
nonmatchingJSON := []byte("This does not match ConfigDescriptor.Digest")
return ioutil.NopCloser(bytes.NewReader(nonmatchingJSON)), int64(len(nonmatchingJSON)), nil
}, nil},
} {
var src types.ImageSource
if c.cbISfn != nil {
src = configBlobImageSource{unusedImageSource{}, c.cbISfn}
} else {
src = nil
}
m := manifestSchema2FromFixture(t, src, "schema2.json")
blob, err := m.ConfigBlob()
if c.blob != nil {
assert.NoError(t, err)
assert.Equal(t, c.blob, blob)
} else {
assert.Error(t, err)
}
}
// Generally conficBlob should match ConfigInfo; we dont quite need it to, and this will
// guarantee that the returned object is returning the original contents instead
// of reading an object from elsewhere.
configBlob := []byte("config blob which does not match ConfigInfo")
// This just tests that the manifest can be created; we test that the parsed
// values are correctly returned in tests for the individual getter methods.
m := manifestSchema2FromComponentsLikeFixture(configBlob)
cb, err := m.ConfigBlob()
require.NoError(t, err)
assert.Equal(t, configBlob, cb)
}
func TestManifestSchema2LayerInfo(t *testing.T) {
for _, m := range []genericManifest{
manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"),
manifestSchema2FromComponentsLikeFixture(nil),
} {
assert.Equal(t, []types.BlobInfo{
{
Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
Size: 51354364,
},
{
Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
Size: 150,
},
{
Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
Size: 11739507,
},
{
Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
Size: 8841833,
},
{
Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
Size: 291,
},
}, m.LayerInfos())
}
}
func TestManifestSchema2EmbeddedDockerReferenceConflicts(t *testing.T) {
for _, m := range []genericManifest{
manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"),
manifestSchema2FromComponentsLikeFixture(nil),
} {
for _, name := range []string{"busybox", "example.com:5555/ns/repo:tag"} {
ref, err := reference.ParseNormalizedNamed(name)
require.NoError(t, err)
conflicts := m.EmbeddedDockerReferenceConflicts(ref)
assert.False(t, conflicts)
}
}
}
func TestManifestSchema2ImageInspectInfo(t *testing.T) {
configJSON, err := ioutil.ReadFile("fixtures/schema2-config.json")
require.NoError(t, err)
m := manifestSchema2FromComponentsLikeFixture(configJSON)
ii, err := m.imageInspectInfo()
require.NoError(t, err)
assert.Equal(t, types.ImageInspectInfo{
Tag: "",
Created: time.Date(2016, 9, 23, 23, 20, 45, 789764590, time.UTC),
DockerVersion: "1.12.1",
Labels: map[string]string{},
Architecture: "amd64",
Os: "linux",
Layers: nil,
}, *ii)
// nil configBlob will trigger an error in m.ConfigBlob()
m = manifestSchema2FromComponentsLikeFixture(nil)
_, err = m.imageInspectInfo()
assert.Error(t, err)
m = manifestSchema2FromComponentsLikeFixture([]byte("invalid JSON"))
_, err = m.imageInspectInfo()
assert.Error(t, err)
}
func TestManifestSchema2UpdatedImageNeedsLayerDiffIDs(t *testing.T) {
for _, m := range []genericManifest{
manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json"),
manifestSchema2FromComponentsLikeFixture(nil),
} {
assert.False(t, m.UpdatedImageNeedsLayerDiffIDs(types.ManifestUpdateOptions{
ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
}))
}
}
// schema2ImageSource is plausible enough for schema conversions in manifestSchema2.UpdatedImage() to work.
type schema2ImageSource struct {
configBlobImageSource
ref reference.Named
}
func (s2is *schema2ImageSource) Reference() types.ImageReference {
return refImageReferenceMock{s2is.ref}
}
// refImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference.
type refImageReferenceMock struct{ reference.Named }
func (ref refImageReferenceMock) Transport() types.ImageTransport {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) StringWithinTransport() string {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) DockerReference() reference.Named {
return ref.Named
}
func (ref refImageReferenceMock) PolicyConfigurationIdentity() string {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) PolicyConfigurationNamespaces() []string {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) NewImage(ctx *types.SystemContext) (types.Image, error) {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) DeleteImage(ctx *types.SystemContext) error {
panic("unexpected call to a mock function")
}
func newSchema2ImageSource(t *testing.T, dockerRef string) *schema2ImageSource {
realConfigJSON, err := ioutil.ReadFile("fixtures/schema2-config.json")
require.NoError(t, err)
ref, err := reference.ParseNormalizedNamed(dockerRef)
require.NoError(t, err)
return &schema2ImageSource{
configBlobImageSource: configBlobImageSource{
f: func(digest digest.Digest) (io.ReadCloser, int64, error) {
return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil
},
},
ref: ref,
}
}
type memoryImageDest struct {
ref reference.Named
storedBlobs map[digest.Digest][]byte
}
func (d *memoryImageDest) Reference() types.ImageReference {
return refImageReferenceMock{d.ref}
}
func (d *memoryImageDest) Close() error {
panic("Unexpected call to a mock function")
}
func (d *memoryImageDest) SupportedManifestMIMETypes() []string {
panic("Unexpected call to a mock function")
}
func (d *memoryImageDest) SupportsSignatures() error {
panic("Unexpected call to a mock function")
}
func (d *memoryImageDest) ShouldCompressLayers() bool {
panic("Unexpected call to a mock function")
}
func (d *memoryImageDest) AcceptsForeignLayerURLs() bool {
panic("Unexpected call to a mock function")
}
func (d *memoryImageDest) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
if d.storedBlobs == nil {
d.storedBlobs = make(map[digest.Digest][]byte)
}
if inputInfo.Digest.String() == "" {
panic("inputInfo.Digest unexpectedly empty")
}
contents, err := ioutil.ReadAll(stream)
if err != nil {
return types.BlobInfo{}, err
}
d.storedBlobs[inputInfo.Digest] = contents
return types.BlobInfo{Digest: inputInfo.Digest, Size: int64(len(contents))}, nil
}
func (d *memoryImageDest) HasBlob(inputInfo types.BlobInfo) (bool, int64, error) {
panic("Unexpected call to a mock function")
}
func (d *memoryImageDest) ReapplyBlob(inputInfo types.BlobInfo) (types.BlobInfo, error) {
panic("Unexpected call to a mock function")
}
func (d *memoryImageDest) PutManifest([]byte) error {
panic("Unexpected call to a mock function")
}
func (d *memoryImageDest) PutSignatures(signatures [][]byte) error {
panic("Unexpected call to a mock function")
}
func (d *memoryImageDest) Commit() error {
panic("Unexpected call to a mock function")
}
func TestManifestSchema2UpdatedImage(t *testing.T) {
originalSrc := newSchema2ImageSource(t, "httpd:latest")
original := manifestSchema2FromFixture(t, originalSrc, "schema2.json")
// LayerInfos:
layerInfos := append(original.LayerInfos()[1:], original.LayerInfos()[0])
res, err := original.UpdatedImage(types.ManifestUpdateOptions{
LayerInfos: layerInfos,
})
require.NoError(t, err)
assert.Equal(t, layerInfos, res.LayerInfos())
_, err = original.UpdatedImage(types.ManifestUpdateOptions{
LayerInfos: append(layerInfos, layerInfos[0]),
})
assert.Error(t, err)
// EmbeddedDockerReference:
// … is ignored
embeddedRef, err := reference.ParseNormalizedNamed("busybox")
require.NoError(t, err)
res, err = original.UpdatedImage(types.ManifestUpdateOptions{
EmbeddedDockerReference: embeddedRef,
})
require.NoError(t, err)
nonEmbeddedRef, err := reference.ParseNormalizedNamed("notbusybox:notlatest")
require.NoError(t, err)
conflicts := res.EmbeddedDockerReferenceConflicts(nonEmbeddedRef)
assert.False(t, conflicts)
// ManifestMIMEType:
// Only smoke-test the valid conversions, detailed tests are below. (This also verifies that “original” is not affected.)
for _, mime := range []string{
manifest.DockerV2Schema1MediaType,
manifest.DockerV2Schema1SignedMediaType,
} {
_, err = original.UpdatedImage(types.ManifestUpdateOptions{
ManifestMIMEType: mime,
InformationOnly: types.ManifestUpdateInformation{
Destination: &memoryImageDest{ref: originalSrc.ref},
},
})
assert.NoError(t, err, mime)
}
for _, mime := range []string{
manifest.DockerV2Schema2MediaType, // This indicates a confused caller, not a no-op
"this is invalid",
} {
_, err = original.UpdatedImage(types.ManifestUpdateOptions{
ManifestMIMEType: mime,
})
assert.Error(t, err, mime)
}
// m hasnt been changed:
m2 := manifestSchema2FromFixture(t, originalSrc, "schema2.json")
typedOriginal, ok := original.(*manifestSchema2)
require.True(t, ok)
typedM2, ok := m2.(*manifestSchema2)
require.True(t, ok)
assert.Equal(t, *typedM2, *typedOriginal)
}
func TestConvertToManifestOCI(t *testing.T) {
originalSrc := newSchema2ImageSource(t, "httpd-copy:latest")
original := manifestSchema2FromFixture(t, originalSrc, "schema2.json")
res, err := original.UpdatedImage(types.ManifestUpdateOptions{
ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
})
require.NoError(t, err)
convertedJSON, mt, err := res.Manifest()
require.NoError(t, err)
assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt)
byHandJSON, err := ioutil.ReadFile("fixtures/schema2-to-oci1.json")
require.NoError(t, err)
var converted, byHand map[string]interface{}
err = json.Unmarshal(byHandJSON, &byHand)
require.NoError(t, err)
err = json.Unmarshal(convertedJSON, &converted)
require.NoError(t, err)
assert.Equal(t, byHand, converted)
}
func TestConvertToManifestSchema1(t *testing.T) {
originalSrc := newSchema2ImageSource(t, "httpd-copy:latest")
original := manifestSchema2FromFixture(t, originalSrc, "schema2.json")
memoryDest := &memoryImageDest{ref: originalSrc.ref}
res, err := original.UpdatedImage(types.ManifestUpdateOptions{
ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
InformationOnly: types.ManifestUpdateInformation{
Destination: memoryDest,
},
})
require.NoError(t, err)
convertedJSON, mt, err := res.Manifest()
require.NoError(t, err)
assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, mt)
// byDockerJSON is the result of asking the Docker Hub for a schema1 manifest,
// except that we have replaced "name" to verify that the ref from
// memoryDest, not from originalSrc, is used.
byDockerJSON, err := ioutil.ReadFile("fixtures/schema2-to-schema1-by-docker.json")
require.NoError(t, err)
var converted, byDocker map[string]interface{}
err = json.Unmarshal(byDockerJSON, &byDocker)
require.NoError(t, err)
err = json.Unmarshal(convertedJSON, &converted)
require.NoError(t, err)
delete(byDocker, "signatures")
delete(converted, "signatures")
assert.Equal(t, byDocker, converted)
assert.Equal(t, gzippedEmptyLayer, memoryDest.storedBlobs[gzippedEmptyLayerDigest])
// FIXME? Test also the various failure cases, if only to see that we don't crash?
}

File diff suppressed because one or more lines are too long

View file

@ -1,36 +0,0 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 5940,
"digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 51354364,
"digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 150,
"digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 11739507,
"digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 8841833,
"digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 291,
"digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
}
]
}

View file

@ -1,35 +0,0 @@
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 5940,
"digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 51354364,
"digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 150,
"digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 11739507,
"digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 8841833,
"digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 291,
"digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
}
]
}

View file

@ -1,29 +0,0 @@
{
"schemaVersion": 1,
"name": "google_containers/pause-amd64",
"tag": "3.0",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:f112334343777b75be77ec1f835e3bbbe7d7bd46e27b6a2ae35c6b3cfea0987c"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
}
],
"history": [
{
"v1Compatibility": "{\"id\":\"bb497e16a2d55195649174d1fadac52b00fa2c14124d73009712606909286bc5\",\"parent\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"created\":\"2016-05-04T06:26:41.522308365Z\",\"container\":\"a9873535145fe72b464d3055efbac36aab70d059914e221cbbd7fe3cac53ef6b\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT \\u0026{[\\\"/pause\\\"]}\"],\"Image\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":[\"/pause\"],\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":[\"/pause\"],\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\"}"
},
{
"v1Compatibility": "{\"id\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"parent\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"created\":\"2016-05-04T06:26:41.091672218Z\",\"container\":\"e1b38778b023f25642273ed9e7f4846b4bf38b22a8b55755880b2e6ab6019811\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ADD file:b7eb6a5df9d5fbe509cac16ed89f8d6513a4362017184b14c6a5fae151eee5c5 in /pause\"],\"Image\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":746888}"
},
{
"v1Compatibility": "{\"id\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"created\":\"2016-05-04T06:26:40.628395649Z\",\"container\":\"95722352e41d57660259fbede4413d06889a28eb07a7302d2a7b3f9c71ceaa46\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ARG ARCH\"],\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\"}"
}
],"signatures":[{"header":{"alg":"ES256","jwk":{"crv":"P-256","kid":"ORN4:M47W:3KP3:TZRZ:C3UF:5MFQ:INZV:TCMY:LHNV:EYQU:IRGJ:IJLJ","kty":"EC","x":"yJ0ZQ19NBZUQn8LV60sFEabhlgky9svozfK0VGVou7Y","y":"gOJScOkkLVY1f8aAx-6XXpVM5rJaDYLkCNJ1dvcQGMs"}},"protected":"eyJmb3JtYXRMZW5ndGgiOjQxMzMsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNi0wNS0wNFQwNjoyODo1MVoifQ","signature":"77_7DVx1IZ3PiKNnO7QnvoF7Sgik4GI4bnlVJdtQW461dSyYzd-nSdBmky8Jew3InEW8Cuv_t5w4GmOSwXvL7g"}]
}

File diff suppressed because one or more lines are too long

View file

@ -1,29 +0,0 @@
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 4651,
"digest": "sha256:a13a0762ab7bed51a1b49adec0a702b1cd99294fd460a025b465bcfb7b152745"
},
"layers": [{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 51354364,
"digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
}, {
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 150,
"digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
}, {
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 11739507,
"digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
}, {
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 8841833,
"digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
}, {
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 291,
"digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
}]
}

View file

@ -1,116 +0,0 @@
{
"schemaVersion": 1,
"name": "library/httpd-copy",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
},
{
"blobSum": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
},
{
"blobSum": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
}
],
"history": [
{
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"383850eeb47b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"80/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"HTTPD_PREFIX=/usr/local/apache2\",\"HTTPD_VERSION=2.4.23\",\"HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\",\"HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\",\"HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"],\"Cmd\":[\"httpd-foreground\"],\"ArgsEscaped\":true,\"Image\":\"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd\",\"Volumes\":null,\"WorkingDir\":\"/usr/local/apache2\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"container\":\"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69\",\"container_config\":{\"Hostname\":\"383850eeb47b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"80/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"HTTPD_PREFIX=/usr/local/apache2\",\"HTTPD_VERSION=2.4.23\",\"HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\",\"HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\",\"HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"CMD [\\\"httpd-foreground\\\"]\"],\"ArgsEscaped\":true,\"Image\":\"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd\",\"Volumes\":null,\"WorkingDir\":\"/usr/local/apache2\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"created\":\"2016-09-23T23:20:45.78976459Z\",\"docker_version\":\"1.12.1\",\"id\":\"dca7323f9c839837493199d63263083d94f5eb1796d7bd04ca8374c4e9d3749a\",\"os\":\"linux\",\"parent\":\"1b750729af47c9a802c8d14b0d327d3ad5ecdce5ae773ac728a0263315b914f4\",\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"1b750729af47c9a802c8d14b0d327d3ad5ecdce5ae773ac728a0263315b914f4\",\"parent\":\"3ef2f186f8b0a2fd2d95f5a1f1cd213f5fb0a6e51b0a8dfbe2ec7003a788ff9a\",\"created\":\"2016-09-23T23:20:45.453934921Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) EXPOSE 80/tcp\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"3ef2f186f8b0a2fd2d95f5a1f1cd213f5fb0a6e51b0a8dfbe2ec7003a788ff9a\",\"parent\":\"dbbb5c772ba968f675ebdb1968a2fbcf3cf53c0c85ff4e3329619e3735c811e6\",\"created\":\"2016-09-23T23:20:45.127455562Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ \"]}}"
},
{
"v1Compatibility": "{\"id\":\"dbbb5c772ba968f675ebdb1968a2fbcf3cf53c0c85ff4e3329619e3735c811e6\",\"parent\":\"d264ded964bb52f78c8905c9e6c5f2b8526ef33f371981f0651f3fb0164ad4a7\",\"created\":\"2016-09-23T23:20:44.585743332Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -x \\t\\u0026\\u0026 buildDeps=' \\t\\tbzip2 \\t\\tca-certificates \\t\\tgcc \\t\\tlibpcre++-dev \\t\\tlibssl-dev \\t\\tmake \\t\\twget \\t' \\t\\u0026\\u0026 apt-get update \\t\\u0026\\u0026 apt-get install -y --no-install-recommends $buildDeps \\t\\u0026\\u0026 rm -r /var/lib/apt/lists/* \\t\\t\\u0026\\u0026 wget -O httpd.tar.bz2 \\\"$HTTPD_BZ2_URL\\\" \\t\\u0026\\u0026 echo \\\"$HTTPD_SHA1 *httpd.tar.bz2\\\" | sha1sum -c - \\t\\u0026\\u0026 wget -O httpd.tar.bz2.asc \\\"$HTTPD_ASC_URL\\\" \\t\\u0026\\u0026 export GNUPGHOME=\\\"$(mktemp -d)\\\" \\t\\u0026\\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \\t\\u0026\\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \\t\\u0026\\u0026 rm -r \\\"$GNUPGHOME\\\" httpd.tar.bz2.asc \\t\\t\\u0026\\u0026 mkdir -p src \\t\\u0026\\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \\t\\u0026\\u0026 rm httpd.tar.bz2 \\t\\u0026\\u0026 cd src \\t\\t\\u0026\\u0026 ./configure \\t\\t--prefix=\\\"$HTTPD_PREFIX\\\" \\t\\t--enable-mods-shared=reallyall \\t\\u0026\\u0026 make -j\\\"$(nproc)\\\" \\t\\u0026\\u0026 make install \\t\\t\\u0026\\u0026 cd .. \\t\\u0026\\u0026 rm -r src \\t\\t\\u0026\\u0026 sed -ri \\t\\t-e 's!^(\\\\s*CustomLog)\\\\s+\\\\S+!\\\\1 /proc/self/fd/1!g' \\t\\t-e 's!^(\\\\s*ErrorLog)\\\\s+\\\\S+!\\\\1 /proc/self/fd/2!g' \\t\\t\\\"$HTTPD_PREFIX/conf/httpd.conf\\\" \\t\\t\\u0026\\u0026 apt-get purge -y --auto-remove $buildDeps\"]}}"
},
{
"v1Compatibility": "{\"id\":\"d264ded964bb52f78c8905c9e6c5f2b8526ef33f371981f0651f3fb0164ad4a7\",\"parent\":\"fd6f8d569a8a6d2a95f797494ab3cee7a47693dde647210b236a141f76b5c5fd\",\"created\":\"2016-09-23T23:19:04.009782822Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"fd6f8d569a8a6d2a95f797494ab3cee7a47693dde647210b236a141f76b5c5fd\",\"parent\":\"5e2578d171daa47c0eeb55e592b4e3bd28a0946a75baed58e4d4dd315c5d5780\",\"created\":\"2016-09-23T23:19:03.705796801Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"5e2578d171daa47c0eeb55e592b4e3bd28a0946a75baed58e4d4dd315c5d5780\",\"parent\":\"1912159ee5bea8d7fde49b85012f90c47bceb3f09e4082b112b1f06a3f339c53\",\"created\":\"2016-09-23T19:16:55.629947307Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"1912159ee5bea8d7fde49b85012f90c47bceb3f09e4082b112b1f06a3f339c53\",\"parent\":\"3bfb089ca9d4bb73a9016e44a2c6f908b701f97704433305c419f75e8559d8a2\",\"created\":\"2016-09-23T19:16:55.321573403Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"3bfb089ca9d4bb73a9016e44a2c6f908b701f97704433305c419f75e8559d8a2\",\"parent\":\"ae1ece73de4d0365c8b8ab45ba0bf6b1efa4213c16a4903b89341b704d101c3c\",\"created\":\"2016-09-23T19:16:54.948461741Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c apt-get update \\t\\u0026\\u0026 apt-get install -y --no-install-recommends \\t\\tlibapr1 \\t\\tlibaprutil1 \\t\\tlibaprutil1-ldap \\t\\tlibapr1-dev \\t\\tlibaprutil1-dev \\t\\tlibpcre++0 \\t\\tlibssl1.0.0 \\t\\u0026\\u0026 rm -r /var/lib/apt/lists/*\"]}}"
},
{
"v1Compatibility": "{\"id\":\"ae1ece73de4d0365c8b8ab45ba0bf6b1efa4213c16a4903b89341b704d101c3c\",\"parent\":\"bffbcb416f40e0bd3ebae202403587bfd41829cd1e0d538b66f29adce40c6408\",\"created\":\"2016-09-23T19:16:42.339911155Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) WORKDIR /usr/local/apache2\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"bffbcb416f40e0bd3ebae202403587bfd41829cd1e0d538b66f29adce40c6408\",\"parent\":\"7b27731a3363efcb6b0520962d544471745aae15664920dffe690b4fdb410d80\",\"created\":\"2016-09-23T19:16:41.990121202Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c mkdir -p \\\"$HTTPD_PREFIX\\\" \\t\\u0026\\u0026 chown www-data:www-data \\\"$HTTPD_PREFIX\\\"\"]}}"
},
{
"v1Compatibility": "{\"id\":\"7b27731a3363efcb6b0520962d544471745aae15664920dffe690b4fdb410d80\",\"parent\":\"57a0a421f1acbc1fe6b88b32d3d1c3c0388ff1958b97f95dd0e3a599b810499b\",\"created\":\"2016-09-23T19:16:41.037788416Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"57a0a421f1acbc1fe6b88b32d3d1c3c0388ff1958b97f95dd0e3a599b810499b\",\"parent\":\"faeaf6fdfdcbb18d68c12db9683a02428bab83962a493de88b4c7b1ec941db8f\",\"created\":\"2016-09-23T19:16:40.725768956Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"faeaf6fdfdcbb18d68c12db9683a02428bab83962a493de88b4c7b1ec941db8f\",\"parent\":\"d0c4f1eb7dc8f4dae2b45fe5c0cf4cfc70e5be85d933f5f5f4deb59f134fb520\",\"created\":\"2016-09-23T18:08:51.133779867Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"/bin/bash\\\"]\"]},\"throwaway\":true}"
},
{
"v1Compatibility": "{\"id\":\"d0c4f1eb7dc8f4dae2b45fe5c0cf4cfc70e5be85d933f5f5f4deb59f134fb520\",\"created\":\"2016-09-23T18:08:50.537223822Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / \"]}}"
}
],
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "6QVR:5NTY:VIHC:W6IU:XYIN:CTKT:OG5R:XEEG:Z6XJ:2623:YCBP:36MA",
"kty": "EC",
"x": "NAGHj6-IdNonuFoxlqJnNMjcrCCE1CBoq2r_1NDci68",
"y": "Kocqgj_Ey5J-wLXTjkuqLC-HjciAnWxsBEziAOTvSPc"
},
"alg": "ES256"
},
"signature": "2MN5k06i8xkJhD5ay4yxAFK7tsZk58UznAZONxDplvQ5lZwbRS162OeBDjCb0Hk0IDyrLXtAfBDlY2Gzf6jrpw",
"protected": "eyJmb3JtYXRMZW5ndGgiOjEwODk1LCJmb3JtYXRUYWlsIjoiQ24wIiwidGltZSI6IjIwMTYtMTAtMTRUMTY6MTI6MDlaIn0"
}
]
}

View file

@ -1,36 +0,0 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/octet-stream",
"size": 5940,
"digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 51354364,
"digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 150,
"digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 11739507,
"digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 8841833,
"digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 291,
"digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa"
}
]
}

View file

@ -1,372 +0,0 @@
package image
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"path/filepath"
"testing"
"time"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func manifestOCI1FromFixture(t *testing.T, src types.ImageSource, fixture string) genericManifest {
manifest, err := ioutil.ReadFile(filepath.Join("fixtures", fixture))
require.NoError(t, err)
m, err := manifestOCI1FromManifest(src, manifest)
require.NoError(t, err)
return m
}
func manifestOCI1FromComponentsLikeFixture(configBlob []byte) genericManifest {
return manifestOCI1FromComponents(descriptor{
MediaType: imgspecv1.MediaTypeImageConfig,
Size: 5940,
Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f",
}, nil, configBlob, []descriptor{
{
MediaType: imgspecv1.MediaTypeImageLayerGzip,
Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
Size: 51354364,
},
{
MediaType: imgspecv1.MediaTypeImageLayerGzip,
Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
Size: 150,
},
{
MediaType: imgspecv1.MediaTypeImageLayerGzip,
Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
Size: 11739507,
},
{
MediaType: imgspecv1.MediaTypeImageLayerGzip,
Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
Size: 8841833,
},
{
MediaType: imgspecv1.MediaTypeImageLayerGzip,
Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
Size: 291,
},
})
}
func TestManifestOCI1FromManifest(t *testing.T) {
// This just tests that the JSON can be loaded; we test that the parsed
// values are correctly returned in tests for the individual getter methods.
_ = manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json")
_, err := manifestOCI1FromManifest(nil, []byte{})
assert.Error(t, err)
}
func TestManifestOCI1FromComponents(t *testing.T) {
// This just smoke-tests that the manifest can be created; we test that the parsed
// values are correctly returned in tests for the individual getter methods.
_ = manifestOCI1FromComponentsLikeFixture(nil)
}
func TestManifestOCI1Serialize(t *testing.T) {
for _, m := range []genericManifest{
manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"),
manifestOCI1FromComponentsLikeFixture(nil),
} {
serialized, err := m.serialize()
require.NoError(t, err)
var contents map[string]interface{}
err = json.Unmarshal(serialized, &contents)
require.NoError(t, err)
original, err := ioutil.ReadFile("fixtures/oci1.json")
require.NoError(t, err)
var originalContents map[string]interface{}
err = json.Unmarshal(original, &originalContents)
require.NoError(t, err)
// We would ideally like to compare “serialized” with some transformation of
// “original”, but the ordering of fields in JSON maps is undefined, so this is
// easier.
assert.Equal(t, originalContents, contents)
}
}
func TestManifestOCI1ManifestMIMEType(t *testing.T) {
for _, m := range []genericManifest{
manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"),
manifestOCI1FromComponentsLikeFixture(nil),
} {
assert.Equal(t, imgspecv1.MediaTypeImageManifest, m.manifestMIMEType())
}
}
func TestManifestOCI1ConfigInfo(t *testing.T) {
for _, m := range []genericManifest{
manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"),
manifestOCI1FromComponentsLikeFixture(nil),
} {
assert.Equal(t, types.BlobInfo{
Size: 5940,
Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f",
}, m.ConfigInfo())
}
}
func TestManifestOCI1ConfigBlob(t *testing.T) {
realConfigJSON, err := ioutil.ReadFile("fixtures/oci1-config.json")
require.NoError(t, err)
for _, c := range []struct {
cbISfn func(digest digest.Digest) (io.ReadCloser, int64, error)
blob []byte
}{
// Success
{func(digest digest.Digest) (io.ReadCloser, int64, error) {
return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil
}, realConfigJSON},
// Various kinds of failures
{nil, nil},
{func(digest digest.Digest) (io.ReadCloser, int64, error) {
return nil, -1, errors.New("Error returned from GetBlob")
}, nil},
{func(digest digest.Digest) (io.ReadCloser, int64, error) {
reader, writer := io.Pipe()
writer.CloseWithError(errors.New("Expected error reading input in ConfigBlob"))
return reader, 1, nil
}, nil},
{func(digest digest.Digest) (io.ReadCloser, int64, error) {
nonmatchingJSON := []byte("This does not match ConfigDescriptor.Digest")
return ioutil.NopCloser(bytes.NewReader(nonmatchingJSON)), int64(len(nonmatchingJSON)), nil
}, nil},
} {
var src types.ImageSource
if c.cbISfn != nil {
src = configBlobImageSource{unusedImageSource{}, c.cbISfn}
} else {
src = nil
}
m := manifestOCI1FromFixture(t, src, "oci1.json")
blob, err := m.ConfigBlob()
if c.blob != nil {
assert.NoError(t, err)
assert.Equal(t, c.blob, blob)
} else {
assert.Error(t, err)
}
}
// Generally conficBlob should match ConfigInfo; we dont quite need it to, and this will
// guarantee that the returned object is returning the original contents instead
// of reading an object from elsewhere.
configBlob := []byte("config blob which does not match ConfigInfo")
// This just tests that the manifest can be created; we test that the parsed
// values are correctly returned in tests for the individual getter methods.
m := manifestOCI1FromComponentsLikeFixture(configBlob)
cb, err := m.ConfigBlob()
require.NoError(t, err)
assert.Equal(t, configBlob, cb)
}
func TestManifestOCI1LayerInfo(t *testing.T) {
for _, m := range []genericManifest{
manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"),
manifestOCI1FromComponentsLikeFixture(nil),
} {
assert.Equal(t, []types.BlobInfo{
{
Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb",
Size: 51354364,
},
{
Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c",
Size: 150,
},
{
Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9",
Size: 11739507,
},
{
Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909",
Size: 8841833,
},
{
Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa",
Size: 291,
},
}, m.LayerInfos())
}
}
func TestManifestOCI1EmbeddedDockerReferenceConflicts(t *testing.T) {
for _, m := range []genericManifest{
manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"),
manifestOCI1FromComponentsLikeFixture(nil),
} {
for _, name := range []string{"busybox", "example.com:5555/ns/repo:tag"} {
ref, err := reference.ParseNormalizedNamed(name)
require.NoError(t, err)
conflicts := m.EmbeddedDockerReferenceConflicts(ref)
assert.False(t, conflicts)
}
}
}
func TestManifestOCI1ImageInspectInfo(t *testing.T) {
configJSON, err := ioutil.ReadFile("fixtures/oci1-config.json")
require.NoError(t, err)
m := manifestOCI1FromComponentsLikeFixture(configJSON)
ii, err := m.imageInspectInfo()
require.NoError(t, err)
assert.Equal(t, types.ImageInspectInfo{
Tag: "",
Created: time.Date(2016, 9, 23, 23, 20, 45, 789764590, time.UTC),
DockerVersion: "1.12.1",
Labels: map[string]string{},
Architecture: "amd64",
Os: "linux",
Layers: nil,
}, *ii)
// nil configBlob will trigger an error in m.ConfigBlob()
m = manifestOCI1FromComponentsLikeFixture(nil)
_, err = m.imageInspectInfo()
assert.Error(t, err)
m = manifestOCI1FromComponentsLikeFixture([]byte("invalid JSON"))
_, err = m.imageInspectInfo()
assert.Error(t, err)
}
func TestManifestOCI1UpdatedImageNeedsLayerDiffIDs(t *testing.T) {
for _, m := range []genericManifest{
manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"),
manifestOCI1FromComponentsLikeFixture(nil),
} {
assert.False(t, m.UpdatedImageNeedsLayerDiffIDs(types.ManifestUpdateOptions{
ManifestMIMEType: manifest.DockerV2Schema2MediaType,
}))
}
}
// oci1ImageSource is plausible enough for schema conversions in manifestOCI1.UpdatedImage() to work.
type oci1ImageSource struct {
configBlobImageSource
ref reference.Named
}
func (OCIis *oci1ImageSource) Reference() types.ImageReference {
return refImageReferenceMock{OCIis.ref}
}
func newOCI1ImageSource(t *testing.T, dockerRef string) *oci1ImageSource {
realConfigJSON, err := ioutil.ReadFile("fixtures/oci1-config.json")
require.NoError(t, err)
ref, err := reference.ParseNormalizedNamed(dockerRef)
require.NoError(t, err)
return &oci1ImageSource{
configBlobImageSource: configBlobImageSource{
f: func(digest digest.Digest) (io.ReadCloser, int64, error) {
return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil
},
},
ref: ref,
}
}
func TestManifestOCI1UpdatedImage(t *testing.T) {
originalSrc := newOCI1ImageSource(t, "httpd:latest")
original := manifestOCI1FromFixture(t, originalSrc, "oci1.json")
// LayerInfos:
layerInfos := append(original.LayerInfos()[1:], original.LayerInfos()[0])
res, err := original.UpdatedImage(types.ManifestUpdateOptions{
LayerInfos: layerInfos,
})
require.NoError(t, err)
assert.Equal(t, layerInfos, res.LayerInfos())
_, err = original.UpdatedImage(types.ManifestUpdateOptions{
LayerInfos: append(layerInfos, layerInfos[0]),
})
assert.Error(t, err)
// EmbeddedDockerReference:
// … is ignored
embeddedRef, err := reference.ParseNormalizedNamed("busybox")
require.NoError(t, err)
res, err = original.UpdatedImage(types.ManifestUpdateOptions{
EmbeddedDockerReference: embeddedRef,
})
require.NoError(t, err)
nonEmbeddedRef, err := reference.ParseNormalizedNamed("notbusybox:notlatest")
require.NoError(t, err)
conflicts := res.EmbeddedDockerReferenceConflicts(nonEmbeddedRef)
assert.False(t, conflicts)
// ManifestMIMEType:
// Only smoke-test the valid conversions, detailed tests are below. (This also verifies that “original” is not affected.)
for _, mime := range []string{
manifest.DockerV2Schema2MediaType,
} {
_, err = original.UpdatedImage(types.ManifestUpdateOptions{
ManifestMIMEType: mime,
InformationOnly: types.ManifestUpdateInformation{
Destination: &memoryImageDest{ref: originalSrc.ref},
},
})
assert.NoError(t, err, mime)
}
for _, mime := range []string{
imgspecv1.MediaTypeImageManifest, // This indicates a confused caller, not a no-op.
"this is invalid",
} {
_, err = original.UpdatedImage(types.ManifestUpdateOptions{
ManifestMIMEType: mime,
})
assert.Error(t, err, mime)
}
// m hasnt been changed:
m2 := manifestOCI1FromFixture(t, originalSrc, "oci1.json")
typedOriginal, ok := original.(*manifestOCI1)
require.True(t, ok)
typedM2, ok := m2.(*manifestOCI1)
require.True(t, ok)
assert.Equal(t, *typedM2, *typedOriginal)
}
func TestConvertToManifestSchema2(t *testing.T) {
originalSrc := newOCI1ImageSource(t, "httpd-copy:latest")
original := manifestOCI1FromFixture(t, originalSrc, "oci1.json")
res, err := original.UpdatedImage(types.ManifestUpdateOptions{
ManifestMIMEType: manifest.DockerV2Schema2MediaType,
})
require.NoError(t, err)
convertedJSON, mt, err := res.Manifest()
require.NoError(t, err)
assert.Equal(t, manifest.DockerV2Schema2MediaType, mt)
byHandJSON, err := ioutil.ReadFile("fixtures/oci1-to-schema2.json")
require.NoError(t, err)
var converted, byHand map[string]interface{}
err = json.Unmarshal(byHandJSON, &byHand)
require.NoError(t, err)
err = json.Unmarshal(convertedJSON, &converted)
require.NoError(t, err)
assert.Equal(t, byHand, converted)
// FIXME? Test also the various failure cases, if only to see that we don't crash?
}

View file

@ -1,30 +0,0 @@
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7143,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
},
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"platform": {
"architecture": "amd64",
"os": "linux",
"os.features": [
"sse4"
]
}
}
],
"annotations": {
"com.example.key1": "value1",
"com.example.key2": "value2"
}
}

View file

@ -1,29 +0,0 @@
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 7023,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
}
],
"annotations": {
"com.example.key1": "value1",
"com.example.key2": "value2"
}
}

View file

@ -1,5 +0,0 @@
{
"schemaVersion": 99999,
"name": "mitr/noversion-nonsense",
"tag": "latest"
}

View file

@ -1,56 +0,0 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
"size": 2094,
"digest": "sha256:7820f9a86d4ad15a2c4f0c0e5479298df2aa7c2f6871288e2ef8546f3e7b6783",
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
"size": 1922,
"digest": "sha256:ae1b0e06e8ade3a11267564a26e750585ba2259c0ecab59ab165ad1af41d1bdd",
"platform": {
"architecture": "amd64",
"os": "linux",
"features": [
"sse"
]
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
"size": 2084,
"digest": "sha256:e4c0df75810b953d6717b8f8f28298d73870e8aa2a0d5e77b8391f16fdfbbbe2",
"platform": {
"architecture": "s390x",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
"size": 2084,
"digest": "sha256:07ebe243465ef4a667b78154ae6c3ea46fdb1582936aac3ac899ea311a701b40",
"platform": {
"architecture": "arm",
"os": "linux",
"variant": "armv7"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
"size": 2090,
"digest": "sha256:fb2fc0707b86dafa9959fe3d29e66af8787aee4d9a23581714be65db4265ad8a",
"platform": {
"architecture": "arm64",
"os": "linux",
"variant": "armv8"
}
}
]
}

View file

@ -1,11 +0,0 @@
{
"schemaVersion": 1,
"name": "mitr/buxybox",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
],
"history": [
],
"signatures": 1
}

View file

@ -1,28 +0,0 @@
{
"schemaVersion": 1,
"name": "mitr/buxybox",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
},
{
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
},
{
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
}
],
"history": [
{
"v1Compatibility": "{\"id\":\"f1b5eb0a1215f663765d509b6cdf3841bc2bcff0922346abb943d1342d469a97\",\"parent\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"c0924f5b281a1992127d0afc065e59548ded8880b08aea4debd56d4497acb17a\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Checksum=4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\"],\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
},
{
"v1Compatibility": "{\"id\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"parent\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:38.563048924Z\",\"container\":\"fd4cf54dcd239fbae9bdade9db48e41880b436d27cb5313f60952a46ab04deff\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Name=atomic-test-2\"],\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
},
{
"v1Compatibility": "{\"id\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:32.948089874Z\",\"container\":\"56f0fe1dfc95755dd6cda10f7215c9937a8d9c6348d079c581a261fd4c2f3a5f\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) MAINTAINER \\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
}
]
}

View file

@ -1,44 +0,0 @@
{
"schemaVersion": 1,
"name": "mitr/buxybox",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
},
{
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
},
{
"blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
}
],
"history": [
{
"v1Compatibility": "{\"id\":\"f1b5eb0a1215f663765d509b6cdf3841bc2bcff0922346abb943d1342d469a97\",\"parent\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"c0924f5b281a1992127d0afc065e59548ded8880b08aea4debd56d4497acb17a\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Checksum=4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\"],\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Checksum\":\"4fef81d30f31f9213c642881357e6662846a0f884c2366c13ebad807b4031368 ./tests/test-images/Dockerfile.2\",\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
},
{
"v1Compatibility": "{\"id\":\"594075be8d003f784074cc639d970d1fa091a8197850baaae5052c01564ac535\",\"parent\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:38.563048924Z\",\"container\":\"fd4cf54dcd239fbae9bdade9db48e41880b436d27cb5313f60952a46ab04deff\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) LABEL Name=atomic-test-2\"],\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{\"Name\":\"atomic-test-2\"}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
},
{
"v1Compatibility": "{\"id\":\"03dfa1cd1abe452bc2b69b8eb2362fa6beebc20893e65437906318954f6276d4\",\"created\":\"2016-03-03T11:29:32.948089874Z\",\"container\":\"56f0fe1dfc95755dd6cda10f7215c9937a8d9c6348d079c581a261fd4c2f3a5f\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) MAINTAINER \\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
}
],
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "OZ45:U3IG:TDOI:PMBD:NGP2:LDIW:II2U:PSBI:MMCZ:YZUP:TUUO:XPZT",
"kty": "EC",
"x": "ReC5c0J9tgXSdUL4_xzEt5RsD8kFt2wWSgJcpAcOQx8",
"y": "3sBGEqQ3ZMeqPKwQBAadN2toOUEASha18xa0WwsDF-M"
},
"alg": "ES256"
},
"signature": "dV1paJ3Ck1Ph4FcEhg_frjqxdlGdI6-ywRamk6CvMOcaOEUdCWCpCPQeBQpD2N6tGjkoG1BbstkFNflllfenCw",
"protected": "eyJmb3JtYXRMZW5ndGgiOjU0NzgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNi0wNC0xOFQyMDo1NDo0MloifQ"
}
]
}

View file

@ -1,26 +0,0 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 7023,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
}
]
}

View file

@ -1,10 +0,0 @@
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 7023,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
},
"layers": [
]
}

View file

@ -1,12 +0,0 @@
package manifest
import "github.com/opencontainers/go-digest"
const (
// TestV2S2ManifestDigest is the Docker manifest digest of "v2s2.manifest.json"
TestDockerV2S2ManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55")
// TestV2S1ManifestDigest is the Docker manifest digest of "v2s1.manifest.json"
TestDockerV2S1ManifestDigest = digest.Digest("sha256:077594da70fc17ec2c93cfa4e6ed1fcc26992851fb2c71861338aaf4aa9e41b1")
// TestV2S1UnsignedManifestDigest is the Docker manifest digest of "v2s1unsigned.manifest.json"
TestDockerV2S1UnsignedManifestDigest = digest.Digest("sha256:077594da70fc17ec2c93cfa4e6ed1fcc26992851fb2c71861338aaf4aa9e41b1")
)

View file

@ -1,125 +0,0 @@
package manifest
import (
"io/ioutil"
"path/filepath"
"testing"
"github.com/docker/libtrust"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
digestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
func TestGuessMIMEType(t *testing.T) {
cases := []struct {
path string
mimeType string
}{
{"v2s2.manifest.json", DockerV2Schema2MediaType},
{"v2list.manifest.json", DockerV2ListMediaType},
{"v2s1.manifest.json", DockerV2Schema1SignedMediaType},
{"v2s1-unsigned.manifest.json", DockerV2Schema1MediaType},
{"v2s1-invalid-signatures.manifest.json", DockerV2Schema1SignedMediaType},
{"v2s2nomime.manifest.json", DockerV2Schema2MediaType}, // It is unclear whether this one is legal, but we should guess v2s2 if anything at all.
{"unknown-version.manifest.json", ""},
{"non-json.manifest.json", ""}, // Not a manifest (nor JSON) at all
{"ociv1.manifest.json", imgspecv1.MediaTypeImageManifest},
{"ociv1.image.index.json", imgspecv1.MediaTypeImageIndex},
}
for _, c := range cases {
manifest, err := ioutil.ReadFile(filepath.Join("fixtures", c.path))
require.NoError(t, err)
mimeType := GuessMIMEType(manifest)
assert.Equal(t, c.mimeType, mimeType, c.path)
}
}
func TestDigest(t *testing.T) {
cases := []struct {
path string
expectedDigest digest.Digest
}{
{"v2s2.manifest.json", TestDockerV2S2ManifestDigest},
{"v2s1.manifest.json", TestDockerV2S1ManifestDigest},
{"v2s1-unsigned.manifest.json", TestDockerV2S1UnsignedManifestDigest},
}
for _, c := range cases {
manifest, err := ioutil.ReadFile(filepath.Join("fixtures", c.path))
require.NoError(t, err)
actualDigest, err := Digest(manifest)
require.NoError(t, err)
assert.Equal(t, c.expectedDigest, actualDigest)
}
manifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
require.NoError(t, err)
actualDigest, err := Digest(manifest)
assert.Error(t, err)
actualDigest, err = Digest([]byte{})
require.NoError(t, err)
assert.Equal(t, digest.Digest(digestSha256EmptyTar), actualDigest)
}
func TestMatchesDigest(t *testing.T) {
cases := []struct {
path string
expectedDigest digest.Digest
result bool
}{
// Success
{"v2s2.manifest.json", TestDockerV2S2ManifestDigest, true},
{"v2s1.manifest.json", TestDockerV2S1ManifestDigest, true},
// No match (switched s1/s2)
{"v2s2.manifest.json", TestDockerV2S1ManifestDigest, false},
{"v2s1.manifest.json", TestDockerV2S2ManifestDigest, false},
// Unrecognized algorithm
{"v2s2.manifest.json", digest.Digest("md5:2872f31c5c1f62a694fbd20c1e85257c"), false},
// Mangled format
{"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String() + "abc"), false},
{"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String()[:20]), false},
{"v2s2.manifest.json", digest.Digest(""), false},
}
for _, c := range cases {
manifest, err := ioutil.ReadFile(filepath.Join("fixtures", c.path))
require.NoError(t, err)
res, err := MatchesDigest(manifest, c.expectedDigest)
require.NoError(t, err)
assert.Equal(t, c.result, res)
}
manifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
require.NoError(t, err)
// Even a correct SHA256 hash is rejected if we can't strip the JSON signature.
res, err := MatchesDigest(manifest, digest.FromBytes(manifest))
assert.False(t, res)
assert.Error(t, err)
res, err = MatchesDigest([]byte{}, digest.Digest(digestSha256EmptyTar))
assert.True(t, res)
assert.NoError(t, err)
}
func TestAddDummyV2S1Signature(t *testing.T) {
manifest, err := ioutil.ReadFile("fixtures/v2s1-unsigned.manifest.json")
require.NoError(t, err)
signedManifest, err := AddDummyV2S1Signature(manifest)
require.NoError(t, err)
sig, err := libtrust.ParsePrettySignature(signedManifest, "signatures")
require.NoError(t, err)
signaturePayload, err := sig.Payload()
require.NoError(t, err)
assert.Equal(t, manifest, signaturePayload)
_, err = AddDummyV2S1Signature([]byte("}this is invalid JSON"))
assert.Error(t, err)
}

View file

@ -1,61 +0,0 @@
package layout
import (
"os"
"testing"
"github.com/containers/image/types"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// readerFromFunc allows implementing Reader by any function, e.g. a closure.
type readerFromFunc func([]byte) (int, error)
func (fn readerFromFunc) Read(p []byte) (int, error) {
return fn(p)
}
// TestPutBlobDigestFailure simulates behavior on digest verification failure.
func TestPutBlobDigestFailure(t *testing.T) {
const digestErrorString = "Simulated digest error"
const blobDigest = "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
dirRef, ok := ref.(ociReference)
require.True(t, ok)
blobPath, err := dirRef.blobPath(blobDigest)
assert.NoError(t, err)
firstRead := true
reader := readerFromFunc(func(p []byte) (int, error) {
_, err := os.Lstat(blobPath)
require.Error(t, err)
require.True(t, os.IsNotExist(err))
if firstRead {
if len(p) > 0 {
firstRead = false
}
for i := 0; i < len(p); i++ {
p[i] = 0xAA
}
return len(p), nil
}
return 0, errors.Errorf(digestErrorString)
})
dest, err := ref.NewImageDestination(nil)
require.NoError(t, err)
defer dest.Close()
_, err = dest.PutBlob(reader, types.BlobInfo{Digest: blobDigest, Size: -1})
assert.Error(t, err)
assert.Contains(t, digestErrorString, err.Error())
err = dest.Commit()
assert.NoError(t, err)
_, err = os.Lstat(blobPath)
require.Error(t, err)
require.True(t, os.IsNotExist(err))
}

View file

@ -1,291 +0,0 @@
package layout
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestTransportName(t *testing.T) {
assert.Equal(t, "oci", Transport.Name())
}
func TestTransportParseReference(t *testing.T) {
testParseReference(t, Transport.ParseReference)
}
func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
for _, scope := range []string{
"/etc",
"/etc:notlatest",
"/this/does/not/exist",
"/this/does/not/exist:notlatest",
"/:strangecornercase",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.NoError(t, err, scope)
}
for _, scope := range []string{
"relative/path",
"/",
"/double//slashes",
"/has/./dot",
"/has/dot/../dot",
"/trailing/slash/",
"/etc:invalid'tag!value@",
"/path:with/colons",
"/path:with/colons/and:tag",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.Error(t, err, scope)
}
}
func TestParseReference(t *testing.T) {
testParseReference(t, ParseReference)
}
// testParseReference is a test shared for Transport.ParseReference and ParseReference.
func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) {
tmpDir, err := ioutil.TempDir("", "oci-transport-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, path := range []string{
"/",
"/etc",
tmpDir,
"relativepath",
tmpDir + "/thisdoesnotexist",
} {
for _, tag := range []struct{ suffix, tag string }{
{":notlatest", "notlatest"},
{"", "latest"},
} {
input := path + tag.suffix
ref, err := fn(input)
require.NoError(t, err, input)
ociRef, ok := ref.(ociReference)
require.True(t, ok)
assert.Equal(t, path, ociRef.dir, input)
assert.Equal(t, tag.tag, ociRef.tag, input)
}
}
_, err = fn(tmpDir + "/with:multiple:colons:and:tag")
assert.Error(t, err)
_, err = fn(tmpDir + ":invalid'tag!value@")
assert.Error(t, err)
}
func TestNewReference(t *testing.T) {
const tagValue = "tagValue"
tmpDir, err := ioutil.TempDir("", "oci-transport-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
ref, err := NewReference(tmpDir, tagValue)
require.NoError(t, err)
ociRef, ok := ref.(ociReference)
require.True(t, ok)
assert.Equal(t, tmpDir, ociRef.dir)
assert.Equal(t, tagValue, ociRef.tag)
_, err = NewReference(tmpDir+"/thisparentdoesnotexist/something", tagValue)
assert.Error(t, err)
_, err = NewReference(tmpDir+"/has:colon", tagValue)
assert.Error(t, err)
_, err = NewReference(tmpDir, "invalid'tag!value@")
assert.Error(t, err)
}
// refToTempOCI creates a temporary directory and returns an reference to it.
// The caller should
// defer os.RemoveAll(tmpDir)
func refToTempOCI(t *testing.T) (ref types.ImageReference, tmpDir string) {
tmpDir, err := ioutil.TempDir("", "oci-transport-test")
require.NoError(t, err)
m := `{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7143,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
"platform": {
"architecture": "ppc64le",
"os": "linux"
},
"annotations": {
"org.opencontainers.image.ref.name": "tagValue"
}
}
]
}
`
ioutil.WriteFile(filepath.Join(tmpDir, "index.json"), []byte(m), 0644)
ref, err = NewReference(tmpDir, "tagValue")
require.NoError(t, err)
return ref, tmpDir
}
func TestReferenceTransport(t *testing.T) {
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
assert.Equal(t, Transport, ref.Transport())
}
func TestReferenceStringWithinTransport(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "oci-transport-test")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, c := range []struct{ input, result string }{
{"/dir1:notlatest", "/dir1:notlatest"}, // Explicit tag
{"/dir2", "/dir2:latest"}, // Default tag
} {
ref, err := ParseReference(tmpDir + c.input)
require.NoError(t, err, c.input)
stringRef := ref.StringWithinTransport()
assert.Equal(t, tmpDir+c.result, stringRef, c.input)
// Do one more round to verify that the output can be parsed, to an equal value.
ref2, err := Transport.ParseReference(stringRef)
require.NoError(t, err, c.input)
stringRef2 := ref2.StringWithinTransport()
assert.Equal(t, stringRef, stringRef2, c.input)
}
}
func TestReferenceDockerReference(t *testing.T) {
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
assert.Nil(t, ref.DockerReference())
}
func TestReferencePolicyConfigurationIdentity(t *testing.T) {
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
assert.Equal(t, tmpDir+":tagValue", ref.PolicyConfigurationIdentity())
// A non-canonical path. Test just one, the various other cases are
// tested in explicitfilepath.ResolvePathToFullyExplicit.
ref, err := NewReference(tmpDir+"/.", "tag2")
require.NoError(t, err)
assert.Equal(t, tmpDir+":tag2", ref.PolicyConfigurationIdentity())
// "/" as a corner case.
ref, err = NewReference("/", "tag3")
require.NoError(t, err)
assert.Equal(t, "/:tag3", ref.PolicyConfigurationIdentity())
}
func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
// We don't really know enough to make a full equality test here.
ns := ref.PolicyConfigurationNamespaces()
require.NotNil(t, ns)
assert.True(t, len(ns) >= 2)
assert.Equal(t, tmpDir, ns[0])
assert.Equal(t, filepath.Dir(tmpDir), ns[1])
// Test with a known path which should exist. Test just one non-canonical
// path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
//
// It would be nice to test a deeper hierarchy, but it is not obvious what
// deeper path is always available in the various distros, AND is not likely
// to contains a symbolic link.
for _, path := range []string{"/etc/skel", "/etc/skel/./."} {
_, err := os.Lstat(path)
require.NoError(t, err)
ref, err := NewReference(path, "sometag")
require.NoError(t, err)
ns := ref.PolicyConfigurationNamespaces()
require.NotNil(t, ns)
assert.Equal(t, []string{"/etc/skel", "/etc"}, ns)
}
// "/" as a corner case.
ref, err := NewReference("/", "tag3")
require.NoError(t, err)
assert.Equal(t, []string{}, ref.PolicyConfigurationNamespaces())
}
func TestReferenceNewImage(t *testing.T) {
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
_, err := ref.NewImage(nil)
assert.Error(t, err)
}
func TestReferenceNewImageSource(t *testing.T) {
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
_, err := ref.NewImageSource(nil, nil)
assert.NoError(t, err)
}
func TestReferenceNewImageDestination(t *testing.T) {
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
dest, err := ref.NewImageDestination(nil)
assert.NoError(t, err)
defer dest.Close()
}
func TestReferenceDeleteImage(t *testing.T) {
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
err := ref.DeleteImage(nil)
assert.Error(t, err)
}
func TestReferenceOCILayoutPath(t *testing.T) {
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
ociRef, ok := ref.(ociReference)
require.True(t, ok)
assert.Equal(t, tmpDir+"/oci-layout", ociRef.ociLayoutPath())
}
func TestReferenceIndexPath(t *testing.T) {
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
ociRef, ok := ref.(ociReference)
require.True(t, ok)
assert.Equal(t, tmpDir+"/index.json", ociRef.indexPath())
}
func TestReferenceBlobPath(t *testing.T) {
const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
ociRef, ok := ref.(ociReference)
require.True(t, ok)
bp, err := ociRef.blobPath("sha256:" + hex)
assert.NoError(t, err)
assert.Equal(t, tmpDir+"/blobs/sha256/"+hex, bp)
}
func TestReferenceBlobPathInvalid(t *testing.T) {
const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
ref, tmpDir := refToTempOCI(t)
defer os.RemoveAll(tmpDir)
ociRef, ok := ref.(ociReference)
require.True(t, ok)
_, err := ociRef.blobPath(hex)
assert.Error(t, err)
assert.Contains(t, err.Error(), "unexpected digest reference "+hex)
}

View file

@ -1 +0,0 @@
package oci

View file

@ -1,125 +0,0 @@
package openshift
import (
"testing"
"github.com/containers/image/docker/reference"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
sha256digest = "@sha256:" + sha256digestHex
)
func TestTransportName(t *testing.T) {
assert.Equal(t, "atomic", Transport.Name())
}
func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
for _, scope := range []string{
"registry.example.com/ns/stream" + sha256digest,
"registry.example.com/ns/stream:notlatest",
"registry.example.com/ns/stream",
"registry.example.com/ns",
"registry.example.com",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.NoError(t, err, scope)
}
for _, scope := range []string{
"registry.example.com/too/deep/hierarchy",
"registry.example.com/ns/stream:tag1:tag2",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.Error(t, err, scope)
}
}
func TestNewReference(t *testing.T) {
// too many ns
r, err := reference.ParseNormalizedNamed("registry.example.com/ns1/ns2/ns3/stream:tag")
require.NoError(t, err)
tagged, ok := r.(reference.NamedTagged)
require.True(t, ok)
_, err = NewReference(tagged)
assert.Error(t, err)
r, err = reference.ParseNormalizedNamed("registry.example.com/ns/stream:tag")
require.NoError(t, err)
tagged, ok = r.(reference.NamedTagged)
require.True(t, ok)
_, err = NewReference(tagged)
assert.NoError(t, err)
}
func TestParseReference(t *testing.T) {
// Success
ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
require.NoError(t, err)
osRef, ok := ref.(openshiftReference)
require.True(t, ok)
assert.Equal(t, "ns", osRef.namespace)
assert.Equal(t, "stream", osRef.stream)
assert.Equal(t, "notlatest", osRef.dockerReference.Tag())
assert.Equal(t, "registry.example.com:8443", reference.Domain(osRef.dockerReference))
// Components creating an invalid Docker Reference name
_, err = ParseReference("registry.example.com/ns/UPPERCASEISINVALID:notlatest")
assert.Error(t, err)
_, err = ParseReference("registry.example.com/ns/stream:invalid!tag@value=")
assert.Error(t, err)
}
func TestReferenceDockerReference(t *testing.T) {
ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
require.NoError(t, err)
dockerRef := ref.DockerReference()
require.NotNil(t, dockerRef)
assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", dockerRef.String())
}
func TestReferenceTransport(t *testing.T) {
ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
require.NoError(t, err)
assert.Equal(t, Transport, ref.Transport())
}
func TestReferenceStringWithinTransport(t *testing.T) {
ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
require.NoError(t, err)
assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", ref.StringWithinTransport())
// We should do one more round to verify that the output can be parsed, to an equal value,
// but that is untested because it depends on per-user configuration.
}
func TestReferencePolicyConfigurationIdentity(t *testing.T) {
// Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference.
ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
require.NoError(t, err)
assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", ref.PolicyConfigurationIdentity())
}
func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
// Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference.
ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
require.NoError(t, err)
assert.Equal(t, []string{
"registry.example.com:8443/ns/stream",
"registry.example.com:8443/ns",
"registry.example.com:8443",
}, ref.PolicyConfigurationNamespaces())
}
// openshiftReference.NewImage, openshiftReference.NewImageSource, openshiftReference.NewImageDestination untested because they depend
// on per-user configuration when initializing httpClient.
func TestReferenceDeleteImage(t *testing.T) {
ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest")
require.NoError(t, err)
err = ref.DeleteImage(nil)
assert.Error(t, err)
}

View file

@ -1,316 +0,0 @@
package ostree
import (
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
"path/filepath"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
sha256digest = "@sha256:" + sha256digestHex
)
func TestTransportName(t *testing.T) {
assert.Equal(t, "ostree", Transport.Name())
}
// A helper to replace $TMP in a repo path with a real temporary directory
func withTmpDir(repo string, tmpDir string) string {
return strings.Replace(repo, "$TMP", tmpDir, -1)
}
// A common list of repo suffixes to test for the various ImageReference methods.
var repoSuffixes = []struct{ repoSuffix, resolvedRepo string }{
{"", "/ostree/repo"},
{"@/ostree/repo", "/ostree/repo"}, // /ostree/repo is accepted even if neither /ostree/repo nor /ostree exists, as a special case.
{"@$TMP/at@sign@repo", "$TMP/at@sign@repo"},
// Rejected as ambiguous: /repo:with:colons could either be an (/repo, with:colons) policy configuration identity, or a (/repo:with, colons) policy configuration namespace.
{"@$TMP/repo:with:colons", ""},
}
// A common list of cases for image name parsing and normalization
var imageNameTestcases = []struct{ input, normalized, branchName string }{
{"busybox:notlatest", "busybox:notlatest", "busybox_3Anotlatest"}, // Explicit tag
{"busybox", "busybox:latest", "busybox_3Alatest"}, // Default tag
{"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "docker.io_2Flibrary_2Fbusybox_3Alatest"}, // A hierarchical name
{"UPPERCASEISINVALID", "", ""}, // Invalid input
{"busybox" + sha256digest, "", ""}, // Digested references are not supported (parsed as invalid repository name)
{"busybox:invalid+tag", "", ""}, // Invalid tag value
{"busybox:tag:with:colons", "", ""}, // Multiple colons - treated as a tag which contains a colon, which is invalid
{"", "", ""}, // Empty input is rejected (invalid repository.Named)
}
func TestTransportParseReference(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreeParseReference")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, c := range imageNameTestcases {
for _, suffix := range repoSuffixes {
fullInput := c.input + withTmpDir(suffix.repoSuffix, tmpDir)
ref, err := Transport.ParseReference(fullInput)
if c.normalized == "" || suffix.resolvedRepo == "" {
assert.Error(t, err, fullInput)
} else {
require.NoError(t, err, fullInput)
ostreeRef, ok := ref.(ostreeReference)
require.True(t, ok, fullInput)
assert.Equal(t, c.normalized, ostreeRef.image, fullInput)
assert.Equal(t, c.branchName, ostreeRef.branchName, fullInput)
assert.Equal(t, withTmpDir(suffix.resolvedRepo, tmpDir), ostreeRef.repo, fullInput)
}
}
}
}
func TestTransportValidatePolicyConfigurationScope(t *testing.T) {
for _, scope := range []string{
"/etc:docker.io/library/busybox:notlatest", // This also demonstrates that two colons are interpreted as repo:name:tag.
"/etc:docker.io/library/busybox",
"/etc:docker.io/library",
"/etc:docker.io",
"/etc:repo",
"/this/does/not/exist:notlatest",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.NoError(t, err, scope)
}
for _, scope := range []string{
"/colon missing as a path-reference delimiter",
"relative/path:busybox",
"/double//slashes:busybox",
"/has/./dot:busybox",
"/has/dot/../dot:busybox",
"/trailing/slash/:busybox",
} {
err := Transport.ValidatePolicyConfigurationScope(scope)
assert.Error(t, err, scope)
}
}
func TestNewReference(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreeNewReference")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, c := range imageNameTestcases {
for _, suffix := range repoSuffixes {
if suffix.repoSuffix == "" {
continue
}
caseName := c.input + suffix.repoSuffix
ref, err := NewReference(c.input, withTmpDir(strings.TrimPrefix(suffix.repoSuffix, "@"), tmpDir))
if c.normalized == "" || suffix.resolvedRepo == "" {
assert.Error(t, err, caseName)
} else {
require.NoError(t, err, caseName)
ostreeRef, ok := ref.(ostreeReference)
require.True(t, ok, caseName)
assert.Equal(t, c.normalized, ostreeRef.image, caseName)
assert.Equal(t, c.branchName, ostreeRef.branchName, caseName)
assert.Equal(t, withTmpDir(suffix.resolvedRepo, tmpDir), ostreeRef.repo, caseName)
}
}
}
for _, path := range []string{
"/",
"/etc",
tmpDir,
"relativepath",
tmpDir + "/thisdoesnotexist",
} {
_, err := NewReference("busybox", path)
require.NoError(t, err, path)
}
_, err = NewReference("busybox", tmpDir+"/thisparentdoesnotexist/something")
assert.Error(t, err)
}
// A common list of reference formats to test for the various ImageReference methods.
var validReferenceTestCases = []struct{ input, stringWithinTransport, policyConfigurationIdentity string }{
{"busybox", "busybox:latest@/ostree/repo", "/ostree/repo:busybox:latest"}, // Everything implied
{"busybox:latest@/ostree/repo", "busybox:latest@/ostree/repo", "/ostree/repo:busybox:latest"}, // All implied values explicitly specified
{"example.com/ns/foo:bar@$TMP/non-DEFAULT", "example.com/ns/foo:bar@$TMP/non-DEFAULT", "$TMP/non-DEFAULT:example.com/ns/foo:bar"}, // All values explicitly specified, a hierarchical name
// A non-canonical path. Testing just one, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
{"busybox@$TMP/.", "busybox:latest@$TMP", "$TMP:busybox:latest"},
// "/" as a corner case
{"busybox@/", "busybox:latest@/", "/:busybox:latest"},
}
func TestReferenceTransport(t *testing.T) {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
assert.Equal(t, Transport, ref.Transport())
}
func TestReferenceStringWithinTransport(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreeStringWithinTransport")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, c := range validReferenceTestCases {
ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir))
require.NoError(t, err, c.input)
stringRef := ref.StringWithinTransport()
assert.Equal(t, withTmpDir(c.stringWithinTransport, tmpDir), stringRef, c.input)
// Do one more round to verify that the output can be parsed, to an equal value.
ref2, err := Transport.ParseReference(stringRef)
require.NoError(t, err, c.input)
stringRef2 := ref2.StringWithinTransport()
assert.Equal(t, stringRef, stringRef2, c.input)
}
}
func TestReferenceDockerReference(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreeDockerReference")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, c := range validReferenceTestCases {
ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir))
require.NoError(t, err, c.input)
dockerRef := ref.DockerReference()
assert.Nil(t, dockerRef, c.input)
}
}
func TestReferencePolicyConfigurationIdentity(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreePolicyConfigurationIdentity")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
for _, c := range validReferenceTestCases {
ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir))
require.NoError(t, err, c.input)
assert.Equal(t, withTmpDir(c.policyConfigurationIdentity, tmpDir), ref.PolicyConfigurationIdentity(), c.input)
}
}
func TestReferencePolicyConfigurationNamespaces(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreePolicyConfigurationNamespaces")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
// Test both that DockerReferenceIdentity returns the expected value (fullName+suffix),
// and that DockerReferenceNamespaces starts with the expected value (fullName), i.e. that the two functions are
// consistent.
for inputName, expectedNS := range map[string][]string{
"example.com/ns/repo": {"example.com/ns/repo", "example.com/ns", "example.com"},
"example.com/repo": {"example.com/repo", "example.com"},
"localhost/ns/repo": {"localhost/ns/repo", "localhost/ns", "localhost"},
"localhost/repo": {"localhost/repo", "localhost"},
"ns/repo": {"ns/repo", "ns"},
"repo": {"repo"},
} {
// Test with a known path which should exist. Test just one non-canonical
// path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit.
for _, repoInput := range []string{tmpDir, tmpDir + "/./."} {
fullName := inputName + ":notlatest"
ref, err := NewReference(fullName, repoInput)
require.NoError(t, err, fullName)
identity := ref.PolicyConfigurationIdentity()
assert.Equal(t, tmpDir+":"+expectedNS[0]+":notlatest", identity, fullName)
ns := ref.PolicyConfigurationNamespaces()
require.NotNil(t, ns, fullName)
require.Len(t, ns, len(expectedNS), fullName)
moreSpecific := identity
for i := range expectedNS {
assert.Equal(t, tmpDir+":"+expectedNS[i], ns[i], fmt.Sprintf("%s item %d", fullName, i))
assert.True(t, strings.HasPrefix(moreSpecific, ns[i]))
moreSpecific = ns[i]
}
}
}
}
func TestReferenceNewImage(t *testing.T) {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
_, err = ref.NewImage(nil)
assert.Error(t, err)
}
func TestReferenceNewImageSource(t *testing.T) {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
_, err = ref.NewImageSource(nil, nil)
assert.Error(t, err)
}
func TestReferenceNewImageDestination(t *testing.T) {
otherTmpDir, err := ioutil.TempDir("", "ostree-transport-test")
require.NoError(t, err)
defer os.RemoveAll(otherTmpDir)
for _, c := range []struct {
ctx *types.SystemContext
tmpDir string
}{
{nil, os.TempDir()},
{&types.SystemContext{}, os.TempDir()},
{&types.SystemContext{OSTreeTmpDirPath: otherTmpDir}, otherTmpDir},
} {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
dest, err := ref.NewImageDestination(c.ctx)
require.NoError(t, err)
ostreeDest, ok := dest.(*ostreeImageDestination)
require.True(t, ok)
assert.Equal(t, c.tmpDir+"/busybox_3Alatest", ostreeDest.tmpDirPath)
defer dest.Close()
}
}
func TestReferenceDeleteImage(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "ostreeDeleteImage")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
ref, err := Transport.ParseReference(withTmpDir("busybox@$TMP/this-repo-does-not-exist", tmpDir))
require.NoError(t, err)
err = ref.DeleteImage(nil)
assert.Error(t, err)
}
func TestEncodeOSTreeRef(t *testing.T) {
// Just a smoke test
assert.Equal(t, "busybox_3Alatest", encodeOStreeRef("busybox:latest"))
}
func TestReferenceManifestPath(t *testing.T) {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
ostreeRef, ok := ref.(ostreeReference)
require.True(t, ok)
assert.Equal(t, fmt.Sprintf("manifest%cmanifest.json", filepath.Separator), ostreeRef.manifestPath())
}
func TestReferenceSignaturePath(t *testing.T) {
ref, err := Transport.ParseReference("busybox")
require.NoError(t, err)
ostreeRef, ok := ref.(ostreeReference)
require.True(t, ok)
for _, c := range []struct {
input int
suffix string
}{
{0, "-1"},
{42, "-43"},
} {
assert.Equal(t, fmt.Sprintf("manifest%csignature%s", filepath.Separator, c.suffix), ostreeRef.signaturePath(c.input), string(c.input))
}
}

View file

@ -1,86 +0,0 @@
package compression
import (
"bytes"
"io"
"io/ioutil"
"os"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDetectCompression(t *testing.T) {
cases := []struct {
filename string
unimplemented bool
}{
{"fixtures/Hello.uncompressed", false},
{"fixtures/Hello.gz", false},
{"fixtures/Hello.bz2", false},
{"fixtures/Hello.xz", true},
}
// The original stream is preserved.
for _, c := range cases {
originalContents, err := ioutil.ReadFile(c.filename)
require.NoError(t, err, c.filename)
stream, err := os.Open(c.filename)
require.NoError(t, err, c.filename)
defer stream.Close()
_, updatedStream, err := DetectCompression(stream)
require.NoError(t, err, c.filename)
updatedContents, err := ioutil.ReadAll(updatedStream)
require.NoError(t, err, c.filename)
assert.Equal(t, originalContents, updatedContents, c.filename)
}
// The correct decompressor is chosen, and the result is as expected.
for _, c := range cases {
stream, err := os.Open(c.filename)
require.NoError(t, err, c.filename)
defer stream.Close()
decompressor, updatedStream, err := DetectCompression(stream)
require.NoError(t, err, c.filename)
var uncompressedStream io.Reader
switch {
case decompressor == nil:
uncompressedStream = updatedStream
case c.unimplemented:
_, err := decompressor(updatedStream)
assert.Error(t, err)
continue
default:
s, err := decompressor(updatedStream)
require.NoError(t, err)
uncompressedStream = s
}
uncompressedContents, err := ioutil.ReadAll(uncompressedStream)
require.NoError(t, err, c.filename)
assert.Equal(t, []byte("Hello"), uncompressedContents, c.filename)
}
// Empty input is handled reasonably.
decompressor, updatedStream, err := DetectCompression(bytes.NewReader([]byte{}))
require.NoError(t, err)
assert.Nil(t, decompressor)
updatedContents, err := ioutil.ReadAll(updatedStream)
require.NoError(t, err)
assert.Equal(t, []byte{}, updatedContents)
// Error reading input
reader, writer := io.Pipe()
defer reader.Close()
writer.CloseWithError(errors.New("Expected error reading input in DetectCompression"))
_, _, err = DetectCompression(reader)
assert.Error(t, err)
}

View file

@ -1 +0,0 @@
Hello

View file

@ -1,86 +0,0 @@
package strslice
import (
"encoding/json"
"reflect"
"testing"
)
func TestStrSliceMarshalJSON(t *testing.T) {
for _, testcase := range []struct {
input StrSlice
expected string
}{
// MADNESS(stevvooe): No clue why nil would be "" but empty would be
// "null". Had to make a change here that may affect compatibility.
{input: nil, expected: "null"},
{StrSlice{}, "[]"},
{StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`},
} {
data, err := json.Marshal(testcase.input)
if err != nil {
t.Fatal(err)
}
if string(data) != testcase.expected {
t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data))
}
}
}
func TestStrSliceUnmarshalJSON(t *testing.T) {
parts := map[string][]string{
"": {"default", "values"},
"[]": {},
`["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"},
}
for json, expectedParts := range parts {
strs := StrSlice{"default", "values"}
if err := strs.UnmarshalJSON([]byte(json)); err != nil {
t.Fatal(err)
}
actualParts := []string(strs)
if !reflect.DeepEqual(actualParts, expectedParts) {
t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts)
}
}
}
func TestStrSliceUnmarshalString(t *testing.T) {
var e StrSlice
echo, err := json.Marshal("echo")
if err != nil {
t.Fatal(err)
}
if err := json.Unmarshal(echo, &e); err != nil {
t.Fatal(err)
}
if len(e) != 1 {
t.Fatalf("expected 1 element after unmarshal: %q", e)
}
if e[0] != "echo" {
t.Fatalf("expected `echo`, got: %q", e[0])
}
}
func TestStrSliceUnmarshalSlice(t *testing.T) {
var e StrSlice
echo, err := json.Marshal([]string{"echo"})
if err != nil {
t.Fatal(err)
}
if err := json.Unmarshal(echo, &e); err != nil {
t.Fatal(err)
}
if len(e) != 1 {
t.Fatalf("expected 1 element after unmarshal: %q", e)
}
if e[0] != "echo" {
t.Fatalf("expected `echo`, got: %q", e[0])
}
}

View file

@ -1,108 +0,0 @@
package signature
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSignDockerManifest(t *testing.T) {
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
defer mech.Close()
if err := mech.SupportsSigning(); err != nil {
t.Skipf("Signing not supported: %v", err)
}
manifest, err := ioutil.ReadFile("fixtures/image.manifest.json")
require.NoError(t, err)
// Successful signing
signature, err := SignDockerManifest(manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
require.NoError(t, err)
verified, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
assert.NoError(t, err)
assert.Equal(t, TestImageSignatureReference, verified.DockerReference)
assert.Equal(t, TestImageManifestDigest, verified.DockerManifestDigest)
// Error computing Docker manifest
invalidManifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
require.NoError(t, err)
_, err = SignDockerManifest(invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint)
assert.Error(t, err)
// Error creating blob to sign
_, err = SignDockerManifest(manifest, "", mech, TestKeyFingerprint)
assert.Error(t, err)
// Error signing
_, err = SignDockerManifest(manifest, TestImageSignatureReference, mech, "this fingerprint doesn't exist")
assert.Error(t, err)
}
func TestVerifyDockerManifestSignature(t *testing.T) {
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
defer mech.Close()
manifest, err := ioutil.ReadFile("fixtures/image.manifest.json")
require.NoError(t, err)
signature, err := ioutil.ReadFile("fixtures/image.signature")
require.NoError(t, err)
// Successful verification
sig, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
require.NoError(t, err)
assert.Equal(t, TestImageSignatureReference, sig.DockerReference)
assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest)
// Verification using a different canonicalization of TestImageSignatureReference
sig, err = VerifyDockerManifestSignature(signature, manifest, "docker.io/"+TestImageSignatureReference, mech, TestKeyFingerprint)
require.NoError(t, err)
assert.Equal(t, TestImageSignatureReference, sig.DockerReference)
assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest)
// For extra paranoia, test that we return nil data on error.
// Invalid docker reference on input
sig, err = VerifyDockerManifestSignature(signature, manifest, "UPPERCASEISINVALID", mech, TestKeyFingerprint)
assert.Error(t, err)
assert.Nil(t, sig)
// Error computing Docker manifest
invalidManifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
require.NoError(t, err)
sig, err = VerifyDockerManifestSignature(signature, invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint)
assert.Error(t, err)
assert.Nil(t, sig)
// Error verifying signature
corruptSignature, err := ioutil.ReadFile("fixtures/corrupt.signature")
sig, err = VerifyDockerManifestSignature(corruptSignature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
assert.Error(t, err)
assert.Nil(t, sig)
// Key fingerprint mismatch
sig, err = VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, "unexpected fingerprint")
assert.Error(t, err)
assert.Nil(t, sig)
// Invalid reference in the signature
invalidReferenceSignature, err := ioutil.ReadFile("fixtures/invalid-reference.signature")
sig, err = VerifyDockerManifestSignature(invalidReferenceSignature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
assert.Error(t, err)
assert.Nil(t, sig)
// Docker reference mismatch
sig, err = VerifyDockerManifestSignature(signature, manifest, "example.com/doesnt/match", mech, TestKeyFingerprint)
assert.Error(t, err)
assert.Nil(t, sig)
// Docker manifest digest mismatch
sig, err = VerifyDockerManifestSignature(signature, []byte("unexpected manifest"), TestImageSignatureReference, mech, TestKeyFingerprint)
assert.Error(t, err)
assert.Nil(t, sig)
}

View file

@ -1,6 +0,0 @@
/*.gpg~
/.gpg-v21-migrated
/private-keys-v1.d
/random_seed
/gnupg_spawn_agent_sentinel.lock
/.#*

View file

@ -1,27 +0,0 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 7023,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
}
],
"extra": "this manifest has been modified"
}

View file

@ -1,26 +0,0 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 7023,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
}
]
}

View file

@ -1,96 +0,0 @@
{
"default": [
{
"type": "reject"
}
],
"transports": {
"dir": {
"": [
{
"type": "insecureAcceptAnything"
}
]
},
"docker": {
"example.com/playground": [
{
"type": "insecureAcceptAnything"
}
],
"example.com/production": [
{
"type": "signedBy",
"keyType": "GPGKeys",
"keyPath": "/keys/employee-gpg-keyring"
}
],
"example.com/hardened": [
{
"type": "signedBy",
"keyType": "GPGKeys",
"keyPath": "/keys/employee-gpg-keyring",
"signedIdentity": {
"type": "matchRepository"
}
},
{
"type": "signedBy",
"keyType": "signedByGPGKeys",
"keyPath": "/keys/public-key-signing-gpg-keyring",
"signedIdentity": {
"type": "matchExact"
}
},
{
"type": "signedBaseLayer",
"baseLayerIdentity": {
"type": "exactRepository",
"dockerRepository": "registry.access.redhat.com/rhel7/rhel"
}
}
],
"example.com/hardened-x509": [
{
"type": "signedBy",
"keyType": "X509Certificates",
"keyPath": "/keys/employee-cert-file",
"signedIdentity": {
"type": "matchRepository"
}
},
{
"type": "signedBy",
"keyType": "signedByX509CAs",
"keyPath": "/keys/public-key-signing-ca-file"
}
],
"registry.access.redhat.com": [
{
"type": "signedBy",
"keyType": "signedByGPGKeys",
"keyPath": "/keys/RH-key-signing-key-gpg-keyring",
"signedIdentity": {
"type": "matchRepoDigestOrExact"
}
}
],
"bogus/key-data-example": [
{
"type": "signedBy",
"keyType": "signedByGPGKeys",
"keyData": "bm9uc2Vuc2U="
}
],
"bogus/signed-identity-example": [
{
"type": "signedBaseLayer",
"baseLayerIdentity": {
"type": "exactReference",
"dockerReference": "registry.access.redhat.com/rhel7/rhel:latest"
}
}
]
}
}
}

View file

@ -1,19 +0,0 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mI0EVurzqQEEAL3qkFq4K2URtSWVDYnQUNA9HdM9sqS2eAWfqUFMrkD5f+oN+LBL
tPyaE5GNLA0vXY7nHAM2TeM8ijZ/eMP17Raj64JL8GhCymL3wn2jNvb9XaF0R0s6
H0IaRPPu45A3SnxLwm4Orc/9Z7/UxtYjKSg9xOaTiVPzJgaf5Vm4J4ApABEBAAG0
EnNrb3BlbyB0ZXN0aW5nIGtleYi4BBMBAgAiBQJW6vOpAhsDBgsJCAcDAgYVCAIJ
CgsEFgIDAQIeAQIXgAAKCRDbcvIYi7RsyBbOBACgJFiKDlQ1UyvsNmGqJ7D0OpbS
1OppJlradKgZXyfahFswhFI+7ZREvELLHbinq3dBy5cLXRWzQKdJZNHknSN5Tjf2
0ipVBQuqpcBo+dnKiG4zH6fhTri7yeTZksIDfsqlI6FXDOdKLUSnahagEBn4yU+x
jHPvZk5SuuZv56A45biNBFbq86kBBADIC/9CsAlOmRALuYUmkhcqEjuFwn3wKz2d
IBjzgvro7zcVNNCgxQfMEjcUsvEh5cx13G3QQHcwOKy3M6Bv6VMhfZjd+1P1el4P
0fJS8GFmhWRBknMN8jFsgyohQeouQ798RFFv94KszfStNnr/ae8oao5URmoUXSCa
/MdUxn0YKwARAQABiJ8EGAECAAkFAlbq86kCGwwACgkQ23LyGIu0bMjUywQAq0dn
lUpDNSoLTcpNWuVvHQ7c/qmnE4TyiSLiRiAywdEWA6gMiyhUUucuGsEhMFP1WX1k
UNwArZ6UG7BDOUsvngP7jKGNqyUOQrq1s/r8D+0MrJGOWErGLlfttO2WeoijECkI
5qm8cXzAra3Xf/Z3VjxYTKSnNu37LtZkakdTdYE=
=tJAt
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -1,11 +0,0 @@
{
"schemaVersion": 1,
"name": "mitr/buxybox",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
],
"history": [
],
"signatures": 1
}

View file

@ -1,14 +0,0 @@
package signature
import "github.com/opencontainers/go-digest"
const (
// TestImageManifestDigest is the Docker manifest digest of "image.manifest.json"
TestImageManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55")
// TestImageSignatureReference is the Docker image reference signed in "image.signature"
TestImageSignatureReference = "testing/manifest"
// TestKeyFingerprint is the fingerprint of the private key in this directory.
TestKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8"
// TestKeyShortID is the short ID of the private key in this directory.
TestKeyShortID = "DB72F2188BB46CC8"
)

View file

@ -1,137 +0,0 @@
package signature
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type mSI map[string]interface{} // To minimize typing the long name
// A short-hand way to get a JSON object field value or panic. No error handling done, we know
// what we are working with, a panic in a test is good enough, and fitting test cases on a single line
// is a priority.
func x(m mSI, fields ...string) mSI {
for _, field := range fields {
// Not .(mSI) because type assertion of an unnamed type to a named type always fails (the types
// are not "identical"), but the assignment is fine because they are "assignable".
m = m[field].(map[string]interface{})
}
return m
}
// implementsUnmarshalJSON is a minimalistic type used to detect that
// paranoidUnmarshalJSONObject uses the json.Unmarshaler interface of resolved
// pointers.
type implementsUnmarshalJSON bool
// Compile-time check that Policy implements json.Unmarshaler.
var _ json.Unmarshaler = (*implementsUnmarshalJSON)(nil)
func (dest *implementsUnmarshalJSON) UnmarshalJSON(data []byte) error {
_ = data // We don't care, not really.
*dest = true // Mark handler as called
return nil
}
func TestParanoidUnmarshalJSONObject(t *testing.T) {
type testStruct struct {
A string
B int
}
ts := testStruct{}
var unmarshalJSONCalled implementsUnmarshalJSON
tsResolver := func(key string) interface{} {
switch key {
case "a":
return &ts.A
case "b":
return &ts.B
case "implementsUnmarshalJSON":
return &unmarshalJSONCalled
default:
return nil
}
}
// Empty object
ts = testStruct{}
err := paranoidUnmarshalJSONObject([]byte(`{}`), tsResolver)
require.NoError(t, err)
assert.Equal(t, testStruct{}, ts)
// Success
ts = testStruct{}
err = paranoidUnmarshalJSONObject([]byte(`{"a":"x", "b":2}`), tsResolver)
require.NoError(t, err)
assert.Equal(t, testStruct{A: "x", B: 2}, ts)
// json.Unamarshaler is used for decoding values
ts = testStruct{}
unmarshalJSONCalled = implementsUnmarshalJSON(false)
err = paranoidUnmarshalJSONObject([]byte(`{"implementsUnmarshalJSON":true}`), tsResolver)
require.NoError(t, err)
assert.Equal(t, unmarshalJSONCalled, implementsUnmarshalJSON(true))
// Various kinds of invalid input
for _, input := range []string{
``, // Empty input
`&`, // Entirely invalid JSON
`1`, // Not an object
`{&}`, // Invalid key JSON
`{1:1}`, // Key not a string
`{"b":1, "b":1}`, // Duplicate key
`{"thisdoesnotexist":1}`, // Key rejected by resolver
`{"a":&}`, // Invalid value JSON
`{"a":1}`, // Type mismatch
`{"a":"value"}{}`, // Extra data after object
} {
ts = testStruct{}
err := paranoidUnmarshalJSONObject([]byte(input), tsResolver)
assert.Error(t, err, input)
}
}
func TestParanoidUnmarshalJSONObjectExactFields(t *testing.T) {
var stringValue string
var float64Value float64
var rawValue json.RawMessage
var unmarshallCalled implementsUnmarshalJSON
exactFields := map[string]interface{}{
"string": &stringValue,
"float64": &float64Value,
"raw": &rawValue,
"unmarshaller": &unmarshallCalled,
}
// Empty object
err := paranoidUnmarshalJSONObjectExactFields([]byte(`{}`), map[string]interface{}{})
require.NoError(t, err)
// Success
err = paranoidUnmarshalJSONObjectExactFields([]byte(`{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`), exactFields)
require.NoError(t, err)
assert.Equal(t, "a", stringValue)
assert.Equal(t, 3.5, float64Value)
assert.Equal(t, json.RawMessage(`{"a":"b"}`), rawValue)
assert.Equal(t, implementsUnmarshalJSON(true), unmarshallCalled)
// Various kinds of invalid input
for _, input := range []string{
``, // Empty input
`&`, // Entirely invalid JSON
`1`, // Not an object
`{&}`, // Invalid key JSON
`{1:1}`, // Key not a string
`{"string": "a", "string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Duplicate key
`{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true, "thisisunknown", 1}`, // Unknown key
`{"string": &, "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Invalid value JSON
`{"string": 1, "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}`, // Type mismatch
`{"string": "a", "float64": 3.5, "raw": {"a":"b"}, "unmarshaller": true}{}`, // Extra data after object
} {
err := paranoidUnmarshalJSONObjectExactFields([]byte(input), exactFields)
assert.Error(t, err, input)
}
}

View file

@ -1,37 +0,0 @@
// +build !containers_image_openpgp
package signature
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGPGMESigningMechanismClose(t *testing.T) {
// Closing an ephemeral mechanism removes the directory.
// (The non-ephemeral case is tested in the common TestGPGSigningMechanismClose)
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
require.NoError(t, err)
gpgMech, ok := mech.(*gpgmeSigningMechanism)
require.True(t, ok)
dir := gpgMech.ephemeralDir
assert.NotEmpty(t, dir)
_, err = os.Lstat(dir)
require.NoError(t, err)
err = mech.Close()
assert.NoError(t, err)
_, err = os.Lstat(dir)
require.Error(t, err)
assert.True(t, os.IsNotExist(err))
}
func TestGPGMESigningMechanismSupportsSigning(t *testing.T) {
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
require.NoError(t, err)
defer mech.Close()
err = mech.SupportsSigning()
assert.NoError(t, err)
}

View file

@ -1,28 +0,0 @@
// +build containers_image_openpgp
package signature
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOpenpgpSigningMechanismSupportsSigning(t *testing.T) {
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
require.NoError(t, err)
defer mech.Close()
err = mech.SupportsSigning()
assert.Error(t, err)
assert.IsType(t, SigningNotSupportedError(""), err)
}
func TestOpenpgpSigningMechanismSign(t *testing.T) {
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
require.NoError(t, err)
defer mech.Close()
_, err = mech.Sign([]byte{}, TestKeyFingerprint)
assert.Error(t, err)
assert.IsType(t, SigningNotSupportedError(""), err)
}

View file

@ -1,297 +0,0 @@
package signature
// These tests are expected to pass unmodified for _both_ mechanism_gpgme.go and mechanism_openpgp.go.
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testGPGHomeDirectory = "./fixtures"
)
func TestSigningNotSupportedError(t *testing.T) {
// A stupid test just to keep code coverage
s := "test"
err := SigningNotSupportedError(s)
assert.Equal(t, s, err.Error())
}
func TestNewGPGSigningMechanism(t *testing.T) {
// A dumb test just for code coverage. We test more with newGPGSigningMechanismInDirectory().
mech, err := NewGPGSigningMechanism()
assert.NoError(t, err)
mech.Close()
}
func TestNewGPGSigningMechanismInDirectory(t *testing.T) {
// A dumb test just for code coverage.
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
assert.NoError(t, err)
mech.Close()
// The various GPG failure cases are not obviously easy to reach.
// Test that using the default directory (presumably in users home)
// cannot use TestKeyFingerprint.
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
require.NoError(t, err)
mech, err = newGPGSigningMechanismInDirectory("")
require.NoError(t, err)
defer mech.Close()
_, _, err = mech.Verify(signature)
assert.Error(t, err)
// Similarly, using a newly created empty directory makes TestKeyFingerprint
// unavailable
emptyDir, err := ioutil.TempDir("", "signing-empty-directory")
require.NoError(t, err)
defer os.RemoveAll(emptyDir)
mech, err = newGPGSigningMechanismInDirectory(emptyDir)
require.NoError(t, err)
defer mech.Close()
_, _, err = mech.Verify(signature)
assert.Error(t, err)
// If pubring.gpg is unreadable in the directory, either initializing
// the mechanism fails (with openpgp), or it succeeds (sadly, gpgme) and
// later verification fails.
unreadableDir, err := ioutil.TempDir("", "signing-unreadable-directory")
require.NoError(t, err)
defer os.RemoveAll(unreadableDir)
f, err := os.OpenFile(filepath.Join(unreadableDir, "pubring.gpg"), os.O_RDONLY|os.O_CREATE, 0000)
require.NoError(t, err)
f.Close()
mech, err = newGPGSigningMechanismInDirectory(unreadableDir)
if err == nil {
defer mech.Close()
_, _, err = mech.Verify(signature)
}
assert.Error(t, err)
// Setting the directory parameter to testGPGHomeDirectory makes the key available.
mech, err = newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
defer mech.Close()
_, _, err = mech.Verify(signature)
assert.NoError(t, err)
// If we use the default directory mechanism, GNUPGHOME is respected.
origGNUPGHOME := os.Getenv("GNUPGHOME")
defer os.Setenv("GNUPGHOME", origGNUPGHOME)
os.Setenv("GNUPGHOME", testGPGHomeDirectory)
mech, err = newGPGSigningMechanismInDirectory("")
require.NoError(t, err)
defer mech.Close()
_, _, err = mech.Verify(signature)
assert.NoError(t, err)
}
func TestNewEphemeralGPGSigningMechanism(t *testing.T) {
// Empty input: This is accepted anyway by GPG, just returns no keys.
mech, keyIdentities, err := NewEphemeralGPGSigningMechanism([]byte{})
require.NoError(t, err)
defer mech.Close()
assert.Empty(t, keyIdentities)
// Try validating a signature when the key is unknown.
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
require.NoError(t, err)
content, signingFingerprint, err := mech.Verify(signature)
require.Error(t, err)
// Successful import
keyBlob, err := ioutil.ReadFile("./fixtures/public-key.gpg")
require.NoError(t, err)
mech, keyIdentities, err = NewEphemeralGPGSigningMechanism(keyBlob)
require.NoError(t, err)
defer mech.Close()
assert.Equal(t, []string{TestKeyFingerprint}, keyIdentities)
// After import, the signature should validate.
content, signingFingerprint, err = mech.Verify(signature)
require.NoError(t, err)
assert.Equal(t, []byte("This is not JSON\n"), content)
assert.Equal(t, TestKeyFingerprint, signingFingerprint)
// Two keys: Read the binary-format pubring.gpg, and concatenate it twice.
// (Using two copies of public-key.gpg, in the ASCII-armored format, works with
// gpgmeSigningMechanism but not openpgpSigningMechanism.)
keyBlob, err = ioutil.ReadFile("./fixtures/pubring.gpg")
require.NoError(t, err)
mech, keyIdentities, err = NewEphemeralGPGSigningMechanism(bytes.Join([][]byte{keyBlob, keyBlob}, nil))
require.NoError(t, err)
defer mech.Close()
assert.Equal(t, []string{TestKeyFingerprint, TestKeyFingerprint}, keyIdentities)
// Invalid input: This is, sadly, accepted anyway by GPG, just returns no keys.
// For openpgpSigningMechanism we can detect this and fail.
mech, keyIdentities, err = NewEphemeralGPGSigningMechanism([]byte("This is invalid"))
assert.True(t, err != nil || len(keyIdentities) == 0)
if err == nil {
mech.Close()
}
assert.Empty(t, keyIdentities)
// The various GPG/GPGME failures cases are not obviously easy to reach.
}
func TestGPGSigningMechanismClose(t *testing.T) {
// Closing a non-ephemeral mechanism does not remove anything in the directory.
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
err = mech.Close()
assert.NoError(t, err)
_, err = os.Lstat(testGPGHomeDirectory)
assert.NoError(t, err)
_, err = os.Lstat(filepath.Join(testGPGHomeDirectory, "pubring.gpg"))
assert.NoError(t, err)
}
func TestGPGSigningMechanismSign(t *testing.T) {
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
defer mech.Close()
if err := mech.SupportsSigning(); err != nil {
t.Skipf("Signing not supported: %v", err)
}
// Successful signing
content := []byte("content")
signature, err := mech.Sign(content, TestKeyFingerprint)
require.NoError(t, err)
signedContent, signingFingerprint, err := mech.Verify(signature)
require.NoError(t, err)
assert.EqualValues(t, content, signedContent)
assert.Equal(t, TestKeyFingerprint, signingFingerprint)
// Error signing
_, err = mech.Sign(content, "this fingerprint doesn't exist")
assert.Error(t, err)
// The various GPG/GPGME failures cases are not obviously easy to reach.
}
func assertSigningError(t *testing.T, content []byte, fingerprint string, err error) {
assert.Error(t, err)
assert.Nil(t, content)
assert.Empty(t, fingerprint)
}
func TestGPGSigningMechanismVerify(t *testing.T) {
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
defer mech.Close()
// Successful verification
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
require.NoError(t, err)
content, signingFingerprint, err := mech.Verify(signature)
require.NoError(t, err)
assert.Equal(t, []byte("This is not JSON\n"), content)
assert.Equal(t, TestKeyFingerprint, signingFingerprint)
// For extra paranoia, test that we return nil data on error.
// Completely invalid signature.
content, signingFingerprint, err = mech.Verify([]byte{})
assertSigningError(t, content, signingFingerprint, err)
content, signingFingerprint, err = mech.Verify([]byte("invalid signature"))
assertSigningError(t, content, signingFingerprint, err)
// Literal packet, not a signature
signature, err = ioutil.ReadFile("./fixtures/unsigned-literal.signature")
require.NoError(t, err)
content, signingFingerprint, err = mech.Verify(signature)
assertSigningError(t, content, signingFingerprint, err)
// Encrypted data, not a signature.
signature, err = ioutil.ReadFile("./fixtures/unsigned-encrypted.signature")
require.NoError(t, err)
content, signingFingerprint, err = mech.Verify(signature)
assertSigningError(t, content, signingFingerprint, err)
// FIXME? Is there a way to create a multi-signature so that gpgme_op_verify returns multiple signatures?
// Expired signature
signature, err = ioutil.ReadFile("./fixtures/expired.signature")
require.NoError(t, err)
content, signingFingerprint, err = mech.Verify(signature)
assertSigningError(t, content, signingFingerprint, err)
// Corrupt signature
signature, err = ioutil.ReadFile("./fixtures/corrupt.signature")
require.NoError(t, err)
content, signingFingerprint, err = mech.Verify(signature)
assertSigningError(t, content, signingFingerprint, err)
// Valid signature with an unknown key
signature, err = ioutil.ReadFile("./fixtures/unknown-key.signature")
require.NoError(t, err)
content, signingFingerprint, err = mech.Verify(signature)
assertSigningError(t, content, signingFingerprint, err)
// The various GPG/GPGME failures cases are not obviously easy to reach.
}
func TestGPGSigningMechanismUntrustedSignatureContents(t *testing.T) {
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
require.NoError(t, err)
defer mech.Close()
// A valid signature
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
require.NoError(t, err)
content, shortKeyID, err := mech.UntrustedSignatureContents(signature)
require.NoError(t, err)
assert.Equal(t, []byte("This is not JSON\n"), content)
assert.Equal(t, TestKeyShortID, shortKeyID)
// Completely invalid signature.
_, _, err = mech.UntrustedSignatureContents([]byte{})
assert.Error(t, err)
_, _, err = mech.UntrustedSignatureContents([]byte("invalid signature"))
assert.Error(t, err)
// Literal packet, not a signature
signature, err = ioutil.ReadFile("./fixtures/unsigned-literal.signature")
require.NoError(t, err)
content, shortKeyID, err = mech.UntrustedSignatureContents(signature)
assert.Error(t, err)
// Encrypted data, not a signature.
signature, err = ioutil.ReadFile("./fixtures/unsigned-encrypted.signature")
require.NoError(t, err)
content, shortKeyID, err = mech.UntrustedSignatureContents(signature)
assert.Error(t, err)
// Expired signature
signature, err = ioutil.ReadFile("./fixtures/expired.signature")
require.NoError(t, err)
content, shortKeyID, err = mech.UntrustedSignatureContents(signature)
require.NoError(t, err)
assert.Equal(t, []byte("This signature is expired.\n"), content)
assert.Equal(t, TestKeyShortID, shortKeyID)
// Corrupt signature
signature, err = ioutil.ReadFile("./fixtures/corrupt.signature")
require.NoError(t, err)
content, shortKeyID, err = mech.UntrustedSignatureContents(signature)
require.NoError(t, err)
assert.Equal(t, []byte(`{"critical":{"identity":{"docker-reference":"testing/manifest"},"image":{"docker-manifest-digest":"sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55"},"type":"atomic container signature"},"optional":{"creator":"atomic ","timestamp":1458239713}}`), content)
assert.Equal(t, TestKeyShortID, shortKeyID)
// Valid signature with an unknown key
signature, err = ioutil.ReadFile("./fixtures/unknown-key.signature")
require.NoError(t, err)
content, shortKeyID, err = mech.UntrustedSignatureContents(signature)
require.NoError(t, err)
assert.Equal(t, []byte(`{"critical":{"identity":{"docker-reference":"testing/manifest"},"image":{"docker-manifest-digest":"sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55"},"type":"atomic container signature"},"optional":{"creator":"atomic 0.1.13-dev","timestamp":1464633474}}`), content)
assert.Equal(t, "E5476D1110D07803", shortKeyID)
}

File diff suppressed because it is too large Load diff

View file

@ -1,24 +0,0 @@
package signature
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestPRSignedBaseLayerIsSignatureAuthorAccepted(t *testing.T) {
pr, err := NewPRSignedBaseLayer(NewPRMMatchRepository())
require.NoError(t, err)
// Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
sar, parsedSig, err := pr.isSignatureAuthorAccepted(nil, nil)
assertSARUnknown(t, sar, parsedSig, err)
}
func TestPRSignedBaseLayerIsRunningImageAllowed(t *testing.T) {
// This will obviously need to change after signedBaseLayer is implemented.
pr, err := NewPRSignedBaseLayer(NewPRMMatchRepository())
require.NoError(t, err)
// Pass a nil pointer to, kind of, test that the return value does not depend on the image.
res, err := pr.isRunningImageAllowed(nil)
assertRunningRejectedPolicyRequirement(t, res, err)
}

View file

@ -1,264 +0,0 @@
package signature
import (
"io/ioutil"
"os"
"path"
"testing"
"github.com/containers/image/directory"
"github.com/containers/image/docker/reference"
"github.com/containers/image/image"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// dirImageMock returns a types.UnparsedImage for a directory, claiming a specified dockerReference.
// The caller must call .Close() on the returned UnparsedImage.
func dirImageMock(t *testing.T, dir, dockerReference string) types.UnparsedImage {
ref, err := reference.ParseNormalizedNamed(dockerReference)
require.NoError(t, err)
return dirImageMockWithRef(t, dir, refImageReferenceMock{ref})
}
// dirImageMockWithRef returns a types.UnparsedImage for a directory, claiming a specified ref.
// The caller must call .Close() on the returned UnparsedImage.
func dirImageMockWithRef(t *testing.T, dir string, ref types.ImageReference) types.UnparsedImage {
srcRef, err := directory.NewReference(dir)
require.NoError(t, err)
src, err := srcRef.NewImageSource(nil, nil)
require.NoError(t, err)
return image.UnparsedFromSource(&dirImageSourceMock{
ImageSource: src,
ref: ref,
})
}
// dirImageSourceMock inherits dirImageSource, but overrides its Reference method.
type dirImageSourceMock struct {
types.ImageSource
ref types.ImageReference
}
func (d *dirImageSourceMock) Reference() types.ImageReference {
return d.ref
}
func TestPRSignedByIsSignatureAuthorAccepted(t *testing.T) {
ktGPG := SBKeyTypeGPGKeys
prm := NewPRMMatchExact()
testImage := dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer testImage.Close()
testImageSig, err := ioutil.ReadFile("fixtures/dir-img-valid/signature-1")
require.NoError(t, err)
// Successful validation, with KeyData and KeyPath
pr, err := NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sar, parsedSig, err := pr.isSignatureAuthorAccepted(testImage, testImageSig)
assertSARAccepted(t, sar, parsedSig, err, Signature{
DockerManifestDigest: TestImageManifestDigest,
DockerReference: "testing/manifest:latest",
})
keyData, err := ioutil.ReadFile("fixtures/public-key.gpg")
require.NoError(t, err)
pr, err = NewPRSignedByKeyData(ktGPG, keyData, prm)
require.NoError(t, err)
sar, parsedSig, err = pr.isSignatureAuthorAccepted(testImage, testImageSig)
assertSARAccepted(t, sar, parsedSig, err, Signature{
DockerManifestDigest: TestImageManifestDigest,
DockerReference: "testing/manifest:latest",
})
// Unimplemented and invalid KeyType values
for _, keyType := range []sbKeyType{SBKeyTypeSignedByGPGKeys,
SBKeyTypeX509Certificates,
SBKeyTypeSignedByX509CAs,
sbKeyType("This is invalid"),
} {
// Do not use NewPRSignedByKeyData, because it would reject invalid values.
pr := &prSignedBy{
KeyType: keyType,
KeyData: []byte("abc"),
SignedIdentity: prm,
}
// Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
sar, parsedSig, err := pr.isSignatureAuthorAccepted(nil, nil)
assertSARRejected(t, sar, parsedSig, err)
}
// Both KeyPath and KeyData set. Do not use NewPRSignedBy*, because it would reject this.
prSB := &prSignedBy{
KeyType: ktGPG,
KeyPath: "/foo/bar",
KeyData: []byte("abc"),
SignedIdentity: prm,
}
// Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
sar, parsedSig, err = prSB.isSignatureAuthorAccepted(nil, nil)
assertSARRejected(t, sar, parsedSig, err)
// Invalid KeyPath
pr, err = NewPRSignedByKeyPath(ktGPG, "/this/does/not/exist", prm)
require.NoError(t, err)
// Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, nil)
assertSARRejected(t, sar, parsedSig, err)
// Errors initializing the temporary GPG directory and mechanism are not obviously easy to reach.
// KeyData has no public keys.
pr, err = NewPRSignedByKeyData(ktGPG, []byte{}, prm)
require.NoError(t, err)
// Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, nil)
assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
// A signature which does not GPG verify
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
// Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater..
sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, []byte("invalid signature"))
assertSARRejected(t, sar, parsedSig, err)
// A valid signature using an unknown key.
// (This is (currently?) rejected through the "mech.Verify fails" path, not the "!identityFound" path,
// because we use a temporary directory and only import the trusted keys.)
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sig, err := ioutil.ReadFile("fixtures/unknown-key.signature")
require.NoError(t, err)
// Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater..
sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, sig)
assertSARRejected(t, sar, parsedSig, err)
// A valid signature of an invalid JSON.
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sig, err = ioutil.ReadFile("fixtures/invalid-blob.signature")
require.NoError(t, err)
// Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater..
sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, sig)
assertSARRejected(t, sar, parsedSig, err)
assert.IsType(t, InvalidSignatureError{}, err)
// A valid signature with a rejected identity.
nonmatchingPRM, err := NewPRMExactReference("this/doesnt:match")
require.NoError(t, err)
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", nonmatchingPRM)
require.NoError(t, err)
sar, parsedSig, err = pr.isSignatureAuthorAccepted(testImage, testImageSig)
assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
// Error reading image manifest
image := dirImageMock(t, "fixtures/dir-img-no-manifest", "testing/manifest:latest")
defer image.Close()
sig, err = ioutil.ReadFile("fixtures/dir-img-no-manifest/signature-1")
require.NoError(t, err)
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sar, parsedSig, err = pr.isSignatureAuthorAccepted(image, sig)
assertSARRejected(t, sar, parsedSig, err)
// Error computing manifest digest
image = dirImageMock(t, "fixtures/dir-img-manifest-digest-error", "testing/manifest:latest")
defer image.Close()
sig, err = ioutil.ReadFile("fixtures/dir-img-manifest-digest-error/signature-1")
require.NoError(t, err)
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sar, parsedSig, err = pr.isSignatureAuthorAccepted(image, sig)
assertSARRejected(t, sar, parsedSig, err)
// A valid signature with a non-matching manifest
image = dirImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest")
defer image.Close()
sig, err = ioutil.ReadFile("fixtures/dir-img-modified-manifest/signature-1")
require.NoError(t, err)
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sar, parsedSig, err = pr.isSignatureAuthorAccepted(image, sig)
assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
}
// createInvalidSigDir creates a directory suitable for dirImageMock, in which image.Signatures()
// fails.
// The caller should eventually call os.RemoveAll on the returned path.
func createInvalidSigDir(t *testing.T) string {
dir, err := ioutil.TempDir("", "skopeo-test-unreadable-signature")
require.NoError(t, err)
err = ioutil.WriteFile(path.Join(dir, "manifest.json"), []byte("{}"), 0644)
require.NoError(t, err)
// Creating a 000-permissions file would work for unprivileged accounts, but root (in particular,
// in the Docker container we use for testing) would still have access. So, create a symlink
// pointing to itself, to cause an ELOOP. (Note that a symlink pointing to a nonexistent file would be treated
// just like a nonexistent signature file, and not an error.)
err = os.Symlink("signature-1", path.Join(dir, "signature-1"))
require.NoError(t, err)
return dir
}
func TestPRSignedByIsRunningImageAllowed(t *testing.T) {
ktGPG := SBKeyTypeGPGKeys
prm := NewPRMMatchExact()
// A simple success case: single valid signature.
image := dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer image.Close()
pr, err := NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err := pr.isRunningImageAllowed(image)
assertRunningAllowed(t, allowed, err)
// Error reading signatures
invalidSigDir := createInvalidSigDir(t)
defer os.RemoveAll(invalidSigDir)
image = dirImageMock(t, invalidSigDir, "testing/manifest:latest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningRejected(t, allowed, err)
// No signatures
image = dirImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningRejectedPolicyRequirement(t, allowed, err)
// 1 invalid signature: use dir-img-valid, but a non-matching Docker reference
image = dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:notlatest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningRejectedPolicyRequirement(t, allowed, err)
// 2 valid signatures
image = dirImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningAllowed(t, allowed, err)
// One invalid, one valid signature (in this order)
image = dirImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningAllowed(t, allowed, err)
// 2 invalid signatures: use dir-img-valid-2, but a non-matching Docker reference
image = dirImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:notlatest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningRejectedPolicyRequirement(t, allowed, err)
}

View file

@ -1,74 +0,0 @@
package signature
import (
"testing"
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
)
// nameOnlyImageMock is a mock of types.UnparsedImage which only allows transports.ImageName to work
type nameOnlyImageMock struct {
forbiddenImageMock
}
func (nameOnlyImageMock) Reference() types.ImageReference {
return nameOnlyImageReferenceMock("== StringWithinTransport mock")
}
// nameOnlyImageReferenceMock is a mock of types.ImageReference which only allows transports.ImageName to work, returning self.
type nameOnlyImageReferenceMock string
func (ref nameOnlyImageReferenceMock) Transport() types.ImageTransport {
return nameImageTransportMock("== Transport mock")
}
func (ref nameOnlyImageReferenceMock) StringWithinTransport() string {
return string(ref)
}
func (ref nameOnlyImageReferenceMock) DockerReference() reference.Named {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) PolicyConfigurationIdentity() string {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) PolicyConfigurationNamespaces() []string {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) NewImage(ctx *types.SystemContext) (types.Image, error) {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) DeleteImage(ctx *types.SystemContext) error {
panic("unexpected call to a mock function")
}
func TestPRInsecureAcceptAnythingIsSignatureAuthorAccepted(t *testing.T) {
pr := NewPRInsecureAcceptAnything()
// Pass nil signature to, kind of, test that the return value does not depend on it.
sar, parsedSig, err := pr.isSignatureAuthorAccepted(nameOnlyImageMock{}, nil)
assertSARUnknown(t, sar, parsedSig, err)
}
func TestPRInsecureAcceptAnythingIsRunningImageAllowed(t *testing.T) {
pr := NewPRInsecureAcceptAnything()
res, err := pr.isRunningImageAllowed(nameOnlyImageMock{})
assertRunningAllowed(t, res, err)
}
func TestPRRejectIsSignatureAuthorAccepted(t *testing.T) {
pr := NewPRReject()
// Pass nil signature to, kind of, test that the return value does not depend on it.
sar, parsedSig, err := pr.isSignatureAuthorAccepted(nameOnlyImageMock{}, nil)
assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
}
func TestPRRejectIsRunningImageAllowed(t *testing.T) {
pr := NewPRReject()
res, err := pr.isRunningImageAllowed(nameOnlyImageMock{})
assertRunningRejectedPolicyRequirement(t, res, err)
}

View file

@ -1,521 +0,0 @@
package signature
import (
"fmt"
"os"
"testing"
"github.com/containers/image/docker"
"github.com/containers/image/docker/policyconfiguration"
"github.com/containers/image/docker/reference"
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPolicyRequirementError(t *testing.T) {
// A stupid test just to keep code coverage
s := "test"
err := PolicyRequirementError(s)
assert.Equal(t, s, err.Error())
}
func TestPolicyContextChangeState(t *testing.T) {
pc, err := NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}})
require.NoError(t, err)
defer pc.Destroy()
require.Equal(t, pcReady, pc.state)
err = pc.changeState(pcReady, pcInUse)
require.NoError(t, err)
err = pc.changeState(pcReady, pcInUse)
require.Error(t, err)
// Return state to pcReady to allow pc.Destroy to clean up.
err = pc.changeState(pcInUse, pcReady)
require.NoError(t, err)
}
func TestPolicyContextNewDestroy(t *testing.T) {
pc, err := NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}})
require.NoError(t, err)
assert.Equal(t, pcReady, pc.state)
err = pc.Destroy()
require.NoError(t, err)
assert.Equal(t, pcDestroyed, pc.state)
// Trying to destroy when not pcReady
pc, err = NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}})
require.NoError(t, err)
err = pc.changeState(pcReady, pcInUse)
require.NoError(t, err)
err = pc.Destroy()
require.Error(t, err)
assert.Equal(t, pcInUse, pc.state) // The state, and hopefully nothing else, has changed.
err = pc.changeState(pcInUse, pcReady)
require.NoError(t, err)
err = pc.Destroy()
assert.NoError(t, err)
}
// pcImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference
// and handles PolicyConfigurationIdentity and PolicyConfigurationReference consistently.
type pcImageReferenceMock struct {
transportName string
ref reference.Named
}
func (ref pcImageReferenceMock) Transport() types.ImageTransport {
return nameImageTransportMock(ref.transportName)
}
func (ref pcImageReferenceMock) StringWithinTransport() string {
// We use this in error messages, so sadly we must return something.
return "== StringWithinTransport mock"
}
func (ref pcImageReferenceMock) DockerReference() reference.Named {
return ref.ref
}
func (ref pcImageReferenceMock) PolicyConfigurationIdentity() string {
res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
if res == "" || err != nil {
panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
}
return res
}
func (ref pcImageReferenceMock) PolicyConfigurationNamespaces() []string {
if ref.ref == nil {
panic("unexpected call to a mock function")
}
return policyconfiguration.DockerReferenceNamespaces(ref.ref)
}
func (ref pcImageReferenceMock) NewImage(ctx *types.SystemContext) (types.Image, error) {
panic("unexpected call to a mock function")
}
func (ref pcImageReferenceMock) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
panic("unexpected call to a mock function")
}
func (ref pcImageReferenceMock) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
panic("unexpected call to a mock function")
}
func (ref pcImageReferenceMock) DeleteImage(ctx *types.SystemContext) error {
panic("unexpected call to a mock function")
}
func TestPolicyContextRequirementsForImageRefNotRegisteredTransport(t *testing.T) {
transports.Delete("docker")
assert.Nil(t, transports.Get("docker"))
defer func() {
assert.Nil(t, transports.Get("docker"))
transports.Register(docker.Transport)
assert.NotNil(t, transports.Get("docker"))
}()
pr := []PolicyRequirement{
xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()),
}
policy := &Policy{
Default: PolicyRequirements{NewPRReject()},
Transports: map[string]PolicyTransportScopes{
"docker": {
"registry.access.redhat.com": pr,
},
},
}
pc, err := NewPolicyContext(policy)
require.NoError(t, err)
ref, err := reference.ParseNormalizedNamed("registry.access.redhat.com/rhel7:latest")
require.NoError(t, err)
reqs := pc.requirementsForImageRef(pcImageReferenceMock{"docker", ref})
assert.True(t, &(reqs[0]) == &(pr[0]))
assert.True(t, len(reqs) == len(pr))
}
func TestPolicyContextRequirementsForImageRef(t *testing.T) {
ktGPG := SBKeyTypeGPGKeys
prm := NewPRMMatchRepoDigestOrExact()
policy := &Policy{
Default: PolicyRequirements{NewPRReject()},
Transports: map[string]PolicyTransportScopes{},
}
// Just put _something_ into the PolicyTransportScopes map for the keys we care about, and make it pairwise
// distinct so that we can compare the values and show them when debugging the tests.
for _, t := range []struct{ transport, scope string }{
{"docker", ""},
{"docker", "unmatched"},
{"docker", "deep.com"},
{"docker", "deep.com/n1"},
{"docker", "deep.com/n1/n2"},
{"docker", "deep.com/n1/n2/n3"},
{"docker", "deep.com/n1/n2/n3/repo"},
{"docker", "deep.com/n1/n2/n3/repo:tag2"},
{"atomic", "unmatched"},
} {
if _, ok := policy.Transports[t.transport]; !ok {
policy.Transports[t.transport] = PolicyTransportScopes{}
}
policy.Transports[t.transport][t.scope] = PolicyRequirements{xNewPRSignedByKeyData(ktGPG, []byte(t.transport+t.scope), prm)}
}
pc, err := NewPolicyContext(policy)
require.NoError(t, err)
for _, c := range []struct{ inputTransport, input, matchedTransport, matched string }{
// Full match
{"docker", "deep.com/n1/n2/n3/repo:tag2", "docker", "deep.com/n1/n2/n3/repo:tag2"},
// Namespace matches
{"docker", "deep.com/n1/n2/n3/repo:nottag2", "docker", "deep.com/n1/n2/n3/repo"},
{"docker", "deep.com/n1/n2/n3/notrepo:tag2", "docker", "deep.com/n1/n2/n3"},
{"docker", "deep.com/n1/n2/notn3/repo:tag2", "docker", "deep.com/n1/n2"},
{"docker", "deep.com/n1/notn2/n3/repo:tag2", "docker", "deep.com/n1"},
// Host name match
{"docker", "deep.com/notn1/n2/n3/repo:tag2", "docker", "deep.com"},
// Default
{"docker", "this.doesnt/match:anything", "docker", ""},
// No match within a matched transport which doesn't have a "" scope
{"atomic", "this.doesnt/match:anything", "", ""},
// No configuration available for this transport at all
{"dir", "what/ever", "", ""}, // "what/ever" is not a valid scope for the real "dir" transport, but we only need it to be a valid reference.Named.
} {
var expected PolicyRequirements
if c.matchedTransport != "" {
e, ok := policy.Transports[c.matchedTransport][c.matched]
require.True(t, ok, fmt.Sprintf("case %s:%s: expected reqs not found", c.inputTransport, c.input))
expected = e
} else {
expected = policy.Default
}
ref, err := reference.ParseNormalizedNamed(c.input)
require.NoError(t, err)
reqs := pc.requirementsForImageRef(pcImageReferenceMock{c.inputTransport, ref})
comment := fmt.Sprintf("case %s:%s: %#v", c.inputTransport, c.input, reqs[0])
// Do not use assert.Equal, which would do a deep contents comparison; we want to compare
// the pointers. Also, == does not work on slices; so test that the slices start at the
// same element and have the same length.
assert.True(t, &(reqs[0]) == &(expected[0]), comment)
assert.True(t, len(reqs) == len(expected), comment)
}
}
// pcImageMock returns a types.UnparsedImage for a directory, claiming a specified dockerReference and implementing PolicyConfigurationIdentity/PolicyConfigurationNamespaces.
// The caller must call .Close() on the returned Image.
func pcImageMock(t *testing.T, dir, dockerReference string) types.UnparsedImage {
ref, err := reference.ParseNormalizedNamed(dockerReference)
require.NoError(t, err)
return dirImageMockWithRef(t, dir, pcImageReferenceMock{"docker", ref})
}
func TestPolicyContextGetSignaturesWithAcceptedAuthor(t *testing.T) {
expectedSig := &Signature{
DockerManifestDigest: TestImageManifestDigest,
DockerReference: "testing/manifest:latest",
}
pc, err := NewPolicyContext(&Policy{
Default: PolicyRequirements{NewPRReject()},
Transports: map[string]PolicyTransportScopes{
"docker": {
"docker.io/testing/manifest:latest": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchExact()),
},
"docker.io/testing/manifest:twoAccepts": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
},
"docker.io/testing/manifest:acceptReject": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
NewPRReject(),
},
"docker.io/testing/manifest:acceptUnknown": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
xNewPRSignedBaseLayer(NewPRMMatchRepository()),
},
"docker.io/testing/manifest:rejectUnknown": {
NewPRReject(),
xNewPRSignedBaseLayer(NewPRMMatchRepository()),
},
"docker.io/testing/manifest:unknown": {
xNewPRSignedBaseLayer(NewPRMMatchRepository()),
},
"docker.io/testing/manifest:unknown2": {
NewPRInsecureAcceptAnything(),
},
"docker.io/testing/manifest:invalidEmptyRequirements": {},
},
},
})
require.NoError(t, err)
defer pc.Destroy()
// Success
img := pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer img.Close()
sigs, err := pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Equal(t, []*Signature{expectedSig}, sigs)
// Two signatures
// FIXME? Use really different signatures for this?
img = pcImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Equal(t, []*Signature{expectedSig, expectedSig}, sigs)
// No signatures
img = pcImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// Only invalid signatures
img = pcImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// 1 invalid, 1 valid signature (in this order)
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Equal(t, []*Signature{expectedSig}, sigs)
// Two sarAccepted results for one signature
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:twoAccepts")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Equal(t, []*Signature{expectedSig}, sigs)
// sarAccepted+sarRejected for a signature
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:acceptReject")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// sarAccepted+sarUnknown for a signature
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:acceptUnknown")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Equal(t, []*Signature{expectedSig}, sigs)
// sarRejected+sarUnknown for a signature
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:rejectUnknown")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// sarUnknown only
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:unknown")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:unknown2")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// Empty list of requirements (invalid)
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:invalidEmptyRequirements")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// Failures: Make sure we return nil sigs.
// Unexpected state (context already destroyed)
destroyedPC, err := NewPolicyContext(pc.Policy)
require.NoError(t, err)
err = destroyedPC.Destroy()
require.NoError(t, err)
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer img.Close()
sigs, err = destroyedPC.GetSignaturesWithAcceptedAuthor(img)
assert.Error(t, err)
assert.Nil(t, sigs)
// Not testing the pcInUse->pcReady transition, that would require custom PolicyRequirement
// implementations meddling with the state, or threads. This is for catching trivial programmer
// mistakes only, anyway.
// Error reading signatures.
invalidSigDir := createInvalidSigDir(t)
defer os.RemoveAll(invalidSigDir)
img = pcImageMock(t, invalidSigDir, "testing/manifest:latest")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
assert.Error(t, err)
assert.Nil(t, sigs)
}
func TestPolicyContextIsRunningImageAllowed(t *testing.T) {
pc, err := NewPolicyContext(&Policy{
Default: PolicyRequirements{NewPRReject()},
Transports: map[string]PolicyTransportScopes{
"docker": {
"docker.io/testing/manifest:latest": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchExact()),
},
"docker.io/testing/manifest:twoAllows": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
},
"docker.io/testing/manifest:allowDeny": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
NewPRReject(),
},
"docker.io/testing/manifest:reject": {
NewPRReject(),
},
"docker.io/testing/manifest:acceptAnything": {
NewPRInsecureAcceptAnything(),
},
"docker.io/testing/manifest:invalidEmptyRequirements": {},
},
},
})
require.NoError(t, err)
defer pc.Destroy()
// Success
img := pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer img.Close()
res, err := pc.IsRunningImageAllowed(img)
assertRunningAllowed(t, res, err)
// Two signatures
// FIXME? Use really different signatures for this?
img = pcImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningAllowed(t, res, err)
// No signatures
img = pcImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningRejectedPolicyRequirement(t, res, err)
// Only invalid signatures
img = pcImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningRejectedPolicyRequirement(t, res, err)
// 1 invalid, 1 valid signature (in this order)
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningAllowed(t, res, err)
// Two allowed results
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:twoAllows")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningAllowed(t, res, err)
// Allow + deny results
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:allowDeny")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningRejectedPolicyRequirement(t, res, err)
// prReject works
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:reject")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningRejectedPolicyRequirement(t, res, err)
// prInsecureAcceptAnything works
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:acceptAnything")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningAllowed(t, res, err)
// Empty list of requirements (invalid)
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:invalidEmptyRequirements")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningRejectedPolicyRequirement(t, res, err)
// Unexpected state (context already destroyed)
destroyedPC, err := NewPolicyContext(pc.Policy)
require.NoError(t, err)
err = destroyedPC.Destroy()
require.NoError(t, err)
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer img.Close()
res, err = destroyedPC.IsRunningImageAllowed(img)
assertRunningRejected(t, res, err)
// Not testing the pcInUse->pcReady transition, that would require custom PolicyRequirement
// implementations meddling with the state, or threads. This is for catching trivial programmer
// mistakes only, anyway.
}
// Helpers for validating PolicyRequirement.isSignatureAuthorAccepted results:
// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarRejected result
// with the expected signature.
func assertSARAccepted(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error, expectedSig Signature) {
assert.Equal(t, sarAccepted, sar)
assert.Equal(t, &expectedSig, parsedSig)
assert.NoError(t, err)
}
// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarRejected result.
func assertSARRejected(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) {
assert.Equal(t, sarRejected, sar)
assert.Nil(t, parsedSig)
assert.Error(t, err)
}
// assertSARRejectedPolicyRequiremnt verifies that isSignatureAuthorAccepted returns a consistent sarRejected resul,
// and that the returned error is a PolicyRequirementError..
func assertSARRejectedPolicyRequirement(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) {
assertSARRejected(t, sar, parsedSig, err)
assert.IsType(t, PolicyRequirementError(""), err)
}
// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarUnknown result.
func assertSARUnknown(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) {
assert.Equal(t, sarUnknown, sar)
assert.Nil(t, parsedSig)
assert.NoError(t, err)
}
// Helpers for validating PolicyRequirement.isRunningImageAllowed results:
// assertRunningAllowed verifies that isRunningImageAllowed returns a consistent true result
func assertRunningAllowed(t *testing.T, allowed bool, err error) {
assert.Equal(t, true, allowed)
assert.NoError(t, err)
}
// assertRunningRejected verifies that isRunningImageAllowed returns a consistent false result
func assertRunningRejected(t *testing.T, allowed bool, err error) {
assert.Equal(t, false, allowed)
assert.Error(t, err)
}
// assertRunningRejectedPolicyRequirement verifies that isRunningImageAllowed returns a consistent false result
// and that the returned error is a PolicyRequirementError.
func assertRunningRejectedPolicyRequirement(t *testing.T, allowed bool, err error) {
assertRunningRejected(t, allowed, err)
assert.IsType(t, PolicyRequirementError(""), err)
}

Some files were not shown because too many files have changed in this diff Show more