Add tests for manifest lists and fix some issues encountered while testing
This commit is contained in:
parent
9994f0ae61
commit
7a794e29c0
9 changed files with 311 additions and 98 deletions
|
@ -98,7 +98,7 @@ def get_legacy_images_for_tags(tags):
|
||||||
.where(ManifestLegacyImage.manifest << [tag.manifest_id for tag in tags]))
|
.where(ManifestLegacyImage.manifest << [tag.manifest_id for tag in tags]))
|
||||||
|
|
||||||
by_manifest = {mli.manifest_id: mli.image for mli in query}
|
by_manifest = {mli.manifest_id: mli.image for mli in query}
|
||||||
return {tag.id: by_manifest[tag.manifest_id] for tag in tags}
|
return {tag.id: by_manifest[tag.manifest_id] for tag in tags if tag.manifest_id in by_manifest}
|
||||||
|
|
||||||
|
|
||||||
def find_matching_tag(repository_id, tag_names, tag_kinds=None):
|
def find_matching_tag(repository_id, tag_names, tag_kinds=None):
|
||||||
|
|
|
@ -217,9 +217,6 @@ class OCIModel(SharedModel, RegistryDataInterface):
|
||||||
return (None, None)
|
return (None, None)
|
||||||
|
|
||||||
legacy_image = oci.shared.get_legacy_image_for_manifest(created_manifest.manifest)
|
legacy_image = oci.shared.get_legacy_image_for_manifest(created_manifest.manifest)
|
||||||
if legacy_image is None:
|
|
||||||
return (None, None)
|
|
||||||
|
|
||||||
li = LegacyImage.for_image(legacy_image)
|
li = LegacyImage.for_image(legacy_image)
|
||||||
wrapped_manifest = Manifest.for_manifest(created_manifest.manifest, li)
|
wrapped_manifest = Manifest.for_manifest(created_manifest.manifest, li)
|
||||||
|
|
||||||
|
|
|
@ -27,8 +27,10 @@ def get_tags(namespace_name, repo_name):
|
||||||
if repository_ref is None:
|
if repository_ref is None:
|
||||||
abort(404)
|
abort(404)
|
||||||
|
|
||||||
|
# TODO(jschorr): Change this to normalize manifest lists back to their legacy image
|
||||||
|
# (if applicable).
|
||||||
tags = registry_model.list_repository_tags(repository_ref, include_legacy_images=True)
|
tags = registry_model.list_repository_tags(repository_ref, include_legacy_images=True)
|
||||||
tag_map = {tag.name: tag.legacy_image.docker_image_id for tag in tags}
|
tag_map = {tag.name: tag.legacy_image.docker_image_id for tag in tags if tag.legacy_image}
|
||||||
return jsonify(tag_map)
|
return jsonify(tag_map)
|
||||||
|
|
||||||
abort(403)
|
abort(403)
|
||||||
|
|
|
@ -278,6 +278,7 @@ def _write_manifest(namespace_name, repo_name, tag_name, manifest_impl):
|
||||||
if repository_ref is None:
|
if repository_ref is None:
|
||||||
raise NameUnknown()
|
raise NameUnknown()
|
||||||
|
|
||||||
|
# Create the manifest(s) and retarget the tag to point to it.
|
||||||
manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref, manifest_impl,
|
manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref, manifest_impl,
|
||||||
tag_name, storage)
|
tag_name, storage)
|
||||||
if manifest is None:
|
if manifest is None:
|
||||||
|
|
|
@ -4,6 +4,10 @@ from six import add_metaclass
|
||||||
@add_metaclass(ABCMeta)
|
@add_metaclass(ABCMeta)
|
||||||
class ManifestInterface(object):
|
class ManifestInterface(object):
|
||||||
""" Defines the interface for the various manifests types supported. """
|
""" Defines the interface for the various manifests types supported. """
|
||||||
|
@abstractproperty
|
||||||
|
def schema_version(self):
|
||||||
|
""" The version of the schema, or None for lists. """
|
||||||
|
|
||||||
@abstractproperty
|
@abstractproperty
|
||||||
def digest(self):
|
def digest(self):
|
||||||
""" The digest of the manifest, including type prefix. """
|
""" The digest of the manifest, including type prefix. """
|
||||||
|
|
|
@ -183,6 +183,11 @@ class DockerSchema2ManifestList(ManifestInterface):
|
||||||
except ValidationError as ve:
|
except ValidationError as ve:
|
||||||
raise MalformedSchema2ManifestList('manifest data does not match schema: %s' % ve)
|
raise MalformedSchema2ManifestList('manifest data does not match schema: %s' % ve)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def schema_version(self):
|
||||||
|
""" The version of the schema, or None for lists. """
|
||||||
|
return None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def digest(self):
|
def digest(self):
|
||||||
""" The digest of the manifest, including type prefix. """
|
""" The digest of the manifest, including type prefix. """
|
||||||
|
@ -241,7 +246,6 @@ class DockerSchema2ManifestList(ManifestInterface):
|
||||||
platform = manifest_ref._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
|
platform = manifest_ref._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
|
||||||
architecture = platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY]
|
architecture = platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY]
|
||||||
os = platform[DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY]
|
os = platform[DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY]
|
||||||
|
|
||||||
if architecture != 'amd64' or os != 'linux':
|
if architecture != 'amd64' or os != 'linux':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
|
@ -102,6 +102,11 @@ def v2_protocol(request, jwk):
|
||||||
return request.param(jwk)
|
return request.param(jwk)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def v22_protocol(request, jwk):
|
||||||
|
return V2Protocol(jwk, schema2=True)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(params=[V1Protocol])
|
@pytest.fixture(params=[V1Protocol])
|
||||||
def v1_protocol(request, jwk):
|
def v1_protocol(request, jwk):
|
||||||
return request.param(jwk)
|
return request.param(jwk)
|
||||||
|
@ -123,6 +128,15 @@ def pusher(request, data_model, jwk):
|
||||||
return V2Protocol(jwk)
|
return V2Protocol(jwk)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(params=['v1', 'v2_1'])
|
||||||
|
def legacy_puller(request, data_model, jwk):
|
||||||
|
if request.param == 'v1':
|
||||||
|
return V1Protocol(jwk)
|
||||||
|
|
||||||
|
return V2Protocol(jwk)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(params=['v1', 'v2_1'])
|
@pytest.fixture(params=['v1', 'v2_1'])
|
||||||
def legacy_pusher(request, data_model, jwk):
|
def legacy_pusher(request, data_model, jwk):
|
||||||
if request.param == 'v1':
|
if request.param == 'v1':
|
||||||
|
|
|
@ -3,10 +3,12 @@ import json
|
||||||
|
|
||||||
from enum import Enum, unique
|
from enum import Enum, unique
|
||||||
|
|
||||||
from image.docker.schema1 import DockerSchema1ManifestBuilder, DockerSchema1Manifest
|
from image.docker.schema1 import (DockerSchema1ManifestBuilder, DockerSchema1Manifest,
|
||||||
from image.docker.schema2.list import DockerSchema2ManifestListBuilder
|
DOCKER_SCHEMA1_CONTENT_TYPES)
|
||||||
|
from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
|
||||||
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
|
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
|
||||||
from image.docker.schema2.config import DockerSchema2Config
|
from image.docker.schema2.config import DockerSchema2Config
|
||||||
|
from image.docker.schema2.list import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
|
||||||
from image.docker.schemas import parse_manifest_from_bytes
|
from image.docker.schemas import parse_manifest_from_bytes
|
||||||
from test.registry.protocols import (RegistryProtocol, Failures, ProtocolOptions, PushResult,
|
from test.registry.protocols import (RegistryProtocol, Failures, ProtocolOptions, PushResult,
|
||||||
PullResult)
|
PullResult)
|
||||||
|
@ -123,6 +125,148 @@ class V2Protocol(RegistryProtocol):
|
||||||
|
|
||||||
return None, response
|
return None, response
|
||||||
|
|
||||||
|
def pull_list(self, session, namespace, repo_name, tag_names, manifestlist,
|
||||||
|
credentials=None, expected_failure=None, options=None):
|
||||||
|
options = options or ProtocolOptions()
|
||||||
|
scopes = options.scopes or ['repository:%s:push,pull' % self.repo_name(namespace, repo_name)]
|
||||||
|
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
|
||||||
|
|
||||||
|
# Ping!
|
||||||
|
self.ping(session)
|
||||||
|
|
||||||
|
# Perform auth and retrieve a token.
|
||||||
|
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
|
||||||
|
expected_failure=expected_failure)
|
||||||
|
if token is None:
|
||||||
|
assert V2Protocol.FAILURE_CODES[V2ProtocolSteps.AUTH].get(expected_failure)
|
||||||
|
return
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'Authorization': 'Bearer ' + token,
|
||||||
|
'Accept': DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE,
|
||||||
|
}
|
||||||
|
|
||||||
|
for tag_name in tag_names:
|
||||||
|
# Retrieve the manifest for the tag or digest.
|
||||||
|
response = self.conduct(session, 'GET',
|
||||||
|
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name),
|
||||||
|
tag_name),
|
||||||
|
expected_status=(200, expected_failure, V2ProtocolSteps.GET_MANIFEST),
|
||||||
|
headers=headers)
|
||||||
|
if expected_failure is not None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Parse the returned manifest list and ensure it matches.
|
||||||
|
assert response.headers['Content-Type'] == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
|
||||||
|
manifest = parse_manifest_from_bytes(response.text, response.headers['Content-Type'])
|
||||||
|
assert manifest.schema_version is None
|
||||||
|
assert manifest.digest == manifestlist.digest
|
||||||
|
|
||||||
|
|
||||||
|
def push_list(self, session, namespace, repo_name, tag_names, manifestlist, blobs,
|
||||||
|
credentials=None, expected_failure=None, options=None):
|
||||||
|
options = options or ProtocolOptions()
|
||||||
|
scopes = options.scopes or ['repository:%s:push,pull' % self.repo_name(namespace, repo_name)]
|
||||||
|
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
|
||||||
|
|
||||||
|
# Ping!
|
||||||
|
self.ping(session)
|
||||||
|
|
||||||
|
# Perform auth and retrieve a token.
|
||||||
|
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
|
||||||
|
expected_failure=expected_failure)
|
||||||
|
if token is None:
|
||||||
|
assert V2Protocol.FAILURE_CODES[V2ProtocolSteps.AUTH].get(expected_failure)
|
||||||
|
return
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'Authorization': 'Bearer ' + token,
|
||||||
|
'Accept': ','.join(options.accept_mimetypes or []),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Push all blobs.
|
||||||
|
if not self._push_blobs(blobs, session, namespace, repo_name, headers, options,
|
||||||
|
expected_failure):
|
||||||
|
return
|
||||||
|
|
||||||
|
# Push the manifest list.
|
||||||
|
for tag_name in tag_names:
|
||||||
|
manifest_headers = {'Content-Type': manifestlist.media_type}
|
||||||
|
manifest_headers.update(headers)
|
||||||
|
|
||||||
|
if options.manifest_content_type is not None:
|
||||||
|
manifest_headers['Content-Type'] = options.manifest_content_type
|
||||||
|
|
||||||
|
self.conduct(session, 'PUT',
|
||||||
|
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name), tag_name),
|
||||||
|
data=manifestlist.bytes,
|
||||||
|
expected_status=(202, expected_failure, V2ProtocolSteps.PUT_MANIFEST),
|
||||||
|
headers=manifest_headers)
|
||||||
|
|
||||||
|
return PushResult(manifests=None, headers=headers)
|
||||||
|
|
||||||
|
def build_schema2(self, images, blobs, options):
|
||||||
|
builder = DockerSchema2ManifestBuilder()
|
||||||
|
for image in images:
|
||||||
|
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
|
||||||
|
blobs[checksum] = image.bytes
|
||||||
|
|
||||||
|
# If invalid blob references were requested, just make it up.
|
||||||
|
if options.manifest_invalid_blob_references:
|
||||||
|
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
|
||||||
|
|
||||||
|
builder.add_layer(checksum, len(image.bytes))
|
||||||
|
|
||||||
|
config = {
|
||||||
|
"os": "linux",
|
||||||
|
"rootfs": {
|
||||||
|
"type": "layers",
|
||||||
|
"diff_ids": []
|
||||||
|
},
|
||||||
|
"history": [{
|
||||||
|
'created': '2018-04-03T18:37:09.284840891Z',
|
||||||
|
'created_by': (('/bin/sh -c #(nop) ENTRYPOINT %s' % image.config['Entrypoint'])
|
||||||
|
if image.config and image.config.get('Entrypoint')
|
||||||
|
else '/bin/sh -c #(nop) %s' % image.id),
|
||||||
|
} for image in images],
|
||||||
|
}
|
||||||
|
|
||||||
|
if images[-1].config:
|
||||||
|
config['config'] = images[-1].config
|
||||||
|
|
||||||
|
config_json = json.dumps(config)
|
||||||
|
schema2_config = DockerSchema2Config(config_json)
|
||||||
|
builder.set_config(schema2_config)
|
||||||
|
|
||||||
|
blobs[schema2_config.digest] = schema2_config.bytes
|
||||||
|
return builder.build()
|
||||||
|
|
||||||
|
def build_schema1(self, namespace, repo_name, tag_name, images, blobs, options):
|
||||||
|
builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name)
|
||||||
|
|
||||||
|
for image in reversed(images):
|
||||||
|
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
|
||||||
|
blobs[checksum] = image.bytes
|
||||||
|
|
||||||
|
# If invalid blob references were requested, just make it up.
|
||||||
|
if options.manifest_invalid_blob_references:
|
||||||
|
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
|
||||||
|
|
||||||
|
layer_dict = {'id': image.id, 'parent': image.parent_id}
|
||||||
|
if image.config is not None:
|
||||||
|
layer_dict['config'] = image.config
|
||||||
|
|
||||||
|
if image.size is not None:
|
||||||
|
layer_dict['Size'] = image.size
|
||||||
|
|
||||||
|
if image.created is not None:
|
||||||
|
layer_dict['created'] = image.created
|
||||||
|
|
||||||
|
builder.add_layer(checksum, json.dumps(layer_dict))
|
||||||
|
|
||||||
|
# Build the manifest.
|
||||||
|
return builder.build(self.jwk)
|
||||||
|
|
||||||
def push(self, session, namespace, repo_name, tag_names, images, credentials=None,
|
def push(self, session, namespace, repo_name, tag_names, images, credentials=None,
|
||||||
expected_failure=None, options=None):
|
expected_failure=None, options=None):
|
||||||
options = options or ProtocolOptions()
|
options = options or ProtocolOptions()
|
||||||
|
@ -141,7 +285,7 @@ class V2Protocol(RegistryProtocol):
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
'Authorization': 'Bearer ' + token,
|
'Authorization': 'Bearer ' + token,
|
||||||
'Accept': options.accept_mimetypes,
|
'Accept': ','.join(options.accept_mimetypes or []),
|
||||||
}
|
}
|
||||||
|
|
||||||
# Build fake manifests.
|
# Build fake manifests.
|
||||||
|
@ -149,67 +293,39 @@ class V2Protocol(RegistryProtocol):
|
||||||
blobs = {}
|
blobs = {}
|
||||||
for tag_name in tag_names:
|
for tag_name in tag_names:
|
||||||
if self.schema2:
|
if self.schema2:
|
||||||
builder = DockerSchema2ManifestBuilder()
|
manifests[tag_name] = self.build_schema2(images, blobs, options)
|
||||||
for image in images:
|
|
||||||
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
|
|
||||||
blobs[checksum] = image.bytes
|
|
||||||
|
|
||||||
# If invalid blob references were requested, just make it up.
|
|
||||||
if options.manifest_invalid_blob_references:
|
|
||||||
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
|
|
||||||
|
|
||||||
builder.add_layer(checksum, len(image.bytes))
|
|
||||||
|
|
||||||
config = {
|
|
||||||
"os": "linux",
|
|
||||||
"rootfs": {
|
|
||||||
"type": "layers",
|
|
||||||
"diff_ids": []
|
|
||||||
},
|
|
||||||
"history": [{
|
|
||||||
'created': '2018-04-03T18:37:09.284840891Z',
|
|
||||||
'created_by': (('/bin/sh -c #(nop) ENTRYPOINT %s' % image.config['Entrypoint'])
|
|
||||||
if image.config and image.config.get('Entrypoint')
|
|
||||||
else '/bin/sh -c #(nop) %s' % image.id),
|
|
||||||
} for image in images],
|
|
||||||
}
|
|
||||||
|
|
||||||
if images[-1].config:
|
|
||||||
config['config'] = images[-1].config
|
|
||||||
|
|
||||||
config_json = json.dumps(config)
|
|
||||||
schema2_config = DockerSchema2Config(config_json)
|
|
||||||
builder.set_config(schema2_config)
|
|
||||||
|
|
||||||
blobs[schema2_config.digest] = schema2_config.bytes
|
|
||||||
manifests[tag_name] = builder.build()
|
|
||||||
else:
|
else:
|
||||||
builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name)
|
manifests[tag_name] = self.build_schema1(namespace, repo_name, tag_name, images, blobs,
|
||||||
|
options)
|
||||||
for image in reversed(images):
|
|
||||||
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
|
|
||||||
blobs[checksum] = image.bytes
|
|
||||||
|
|
||||||
# If invalid blob references were requested, just make it up.
|
|
||||||
if options.manifest_invalid_blob_references:
|
|
||||||
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
|
|
||||||
|
|
||||||
layer_dict = {'id': image.id, 'parent': image.parent_id}
|
|
||||||
if image.config is not None:
|
|
||||||
layer_dict['config'] = image.config
|
|
||||||
|
|
||||||
if image.size is not None:
|
|
||||||
layer_dict['Size'] = image.size
|
|
||||||
|
|
||||||
if image.created is not None:
|
|
||||||
layer_dict['created'] = image.created
|
|
||||||
|
|
||||||
builder.add_layer(checksum, json.dumps(layer_dict))
|
|
||||||
|
|
||||||
# Build the manifest.
|
|
||||||
manifests[tag_name] = builder.build(self.jwk)
|
|
||||||
|
|
||||||
# Push the blob data.
|
# Push the blob data.
|
||||||
|
if not self._push_blobs(blobs, session, namespace, repo_name, headers, options,
|
||||||
|
expected_failure):
|
||||||
|
return
|
||||||
|
|
||||||
|
# Write a manifest for each tag.
|
||||||
|
for tag_name in tag_names:
|
||||||
|
manifest = manifests[tag_name]
|
||||||
|
|
||||||
|
# Write the manifest. If we expect it to be invalid, we expect a 404 code. Otherwise, we
|
||||||
|
# expect a 202 response for success.
|
||||||
|
put_code = 404 if options.manifest_invalid_blob_references else 202
|
||||||
|
manifest_headers = {'Content-Type': manifest.media_type}
|
||||||
|
manifest_headers.update(headers)
|
||||||
|
|
||||||
|
if options.manifest_content_type is not None:
|
||||||
|
manifest_headers['Content-Type'] = options.manifest_content_type
|
||||||
|
|
||||||
|
tag_or_digest = tag_name if not options.push_by_manifest_digest else manifest.digest
|
||||||
|
self.conduct(session, 'PUT',
|
||||||
|
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name), tag_or_digest),
|
||||||
|
data=manifest.bytes,
|
||||||
|
expected_status=(put_code, expected_failure, V2ProtocolSteps.PUT_MANIFEST),
|
||||||
|
headers=manifest_headers)
|
||||||
|
|
||||||
|
return PushResult(manifests=manifests, headers=headers)
|
||||||
|
|
||||||
|
def _push_blobs(self, blobs, session, namespace, repo_name, headers, options, expected_failure):
|
||||||
for blob_digest, blob_bytes in blobs.iteritems():
|
for blob_digest, blob_bytes in blobs.iteritems():
|
||||||
if not options.skip_head_checks:
|
if not options.skip_head_checks:
|
||||||
# Blob data should not yet exist.
|
# Blob data should not yet exist.
|
||||||
|
@ -270,7 +386,7 @@ class V2Protocol(RegistryProtocol):
|
||||||
expected_status=expected_code,
|
expected_status=expected_code,
|
||||||
headers=patch_headers)
|
headers=patch_headers)
|
||||||
if expected_code != 204:
|
if expected_code != 204:
|
||||||
return
|
return False
|
||||||
|
|
||||||
# Retrieve the upload status at each point, and ensure it is valid.
|
# Retrieve the upload status at each point, and ensure it is valid.
|
||||||
status_url = '/v2/%s/blobs/uploads/%s' % (self.repo_name(namespace, repo_name),
|
status_url = '/v2/%s/blobs/uploads/%s' % (self.repo_name(namespace, repo_name),
|
||||||
|
@ -288,7 +404,7 @@ class V2Protocol(RegistryProtocol):
|
||||||
status_url = '/v2/%s/blobs/uploads/%s' % (self.repo_name(namespace, repo_name),
|
status_url = '/v2/%s/blobs/uploads/%s' % (self.repo_name(namespace, repo_name),
|
||||||
upload_uuid)
|
upload_uuid)
|
||||||
self.conduct(session, 'GET', status_url, expected_status=404, headers=headers)
|
self.conduct(session, 'GET', status_url, expected_status=404, headers=headers)
|
||||||
return
|
return False
|
||||||
|
|
||||||
# Finish the blob upload with a PUT.
|
# Finish the blob upload with a PUT.
|
||||||
response = self.conduct(session, 'PUT', location, params=dict(digest=blob_digest),
|
response = self.conduct(session, 'PUT', location, params=dict(digest=blob_digest),
|
||||||
|
@ -310,28 +426,7 @@ class V2Protocol(RegistryProtocol):
|
||||||
headers=headers, expected_status=200)
|
headers=headers, expected_status=200)
|
||||||
assert result.content == blob_bytes
|
assert result.content == blob_bytes
|
||||||
|
|
||||||
# Write a manifest for each tag.
|
return True
|
||||||
for tag_name in tag_names:
|
|
||||||
manifest = manifests[tag_name]
|
|
||||||
|
|
||||||
# Write the manifest. If we expect it to be invalid, we expect a 404 code. Otherwise, we
|
|
||||||
# expect a 202 response for success.
|
|
||||||
put_code = 404 if options.manifest_invalid_blob_references else 202
|
|
||||||
manifest_headers = {'Content-Type': manifest.media_type}
|
|
||||||
manifest_headers.update(headers)
|
|
||||||
|
|
||||||
if options.manifest_content_type is not None:
|
|
||||||
manifest_headers['Content-Type'] = options.manifest_content_type
|
|
||||||
|
|
||||||
tag_or_digest = tag_name if not options.push_by_manifest_digest else manifest.digest
|
|
||||||
self.conduct(session, 'PUT',
|
|
||||||
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name), tag_or_digest),
|
|
||||||
data=manifest.bytes,
|
|
||||||
expected_status=(put_code, expected_failure, V2ProtocolSteps.PUT_MANIFEST),
|
|
||||||
headers=manifest_headers)
|
|
||||||
|
|
||||||
return PushResult(manifests=manifests, headers=headers)
|
|
||||||
|
|
||||||
|
|
||||||
def delete(self, session, namespace, repo_name, tag_names, credentials=None,
|
def delete(self, session, namespace, repo_name, tag_names, credentials=None,
|
||||||
expected_failure=None, options=None):
|
expected_failure=None, options=None):
|
||||||
|
@ -379,8 +474,7 @@ class V2Protocol(RegistryProtocol):
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.schema2:
|
if self.schema2:
|
||||||
headers['Accept'] = [('application/vnd.docker.distribution.manifest.v2+json', 1),
|
headers['Accept'] = ','.join(options.accept_mimetypes or DOCKER_SCHEMA2_CONTENT_TYPES)
|
||||||
('application/vnd.docker.distribution.manifest.list.v2+json', 1)]
|
|
||||||
|
|
||||||
manifests = {}
|
manifests = {}
|
||||||
image_ids = {}
|
image_ids = {}
|
||||||
|
@ -395,6 +489,9 @@ class V2Protocol(RegistryProtocol):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Ensure the manifest returned by us is valid.
|
# Ensure the manifest returned by us is valid.
|
||||||
|
if not self.schema2:
|
||||||
|
assert response.headers['Content-Type'] in DOCKER_SCHEMA1_CONTENT_TYPES
|
||||||
|
|
||||||
manifest = parse_manifest_from_bytes(response.text, response.headers['Content-Type'])
|
manifest = parse_manifest_from_bytes(response.text, response.headers['Content-Type'])
|
||||||
manifests[tag_name] = manifest
|
manifests[tag_name] = manifest
|
||||||
image_ids[tag_name] = manifest.leaf_layer_v1_image_id
|
image_ids[tag_name] = manifest.leaf_layer_v1_image_id
|
||||||
|
|
|
@ -20,6 +20,7 @@ from test.registry.protocols import Failures, Image, layer_bytes_for_contents, P
|
||||||
from app import instance_keys
|
from app import instance_keys
|
||||||
from data.model.tag import list_repository_tags
|
from data.model.tag import list_repository_tags
|
||||||
from image.docker.schema1 import DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
from image.docker.schema1 import DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||||
|
from image.docker.schema2.list import DockerSchema2ManifestListBuilder
|
||||||
from util.security.registry_jwt import decode_bearer_header
|
from util.security.registry_jwt import decode_bearer_header
|
||||||
from util.timedeltastring import convert_to_timedelta
|
from util.timedeltastring import convert_to_timedelta
|
||||||
|
|
||||||
|
@ -613,10 +614,10 @@ def test_unsupported_manifest_content_type(content_type, manifest_protocol, basi
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('accept_mimetypes', [
|
@pytest.mark.parametrize('accept_mimetypes', [
|
||||||
[('application/vnd.oci.image.manifest.v1+json', 1)],
|
['application/vnd.oci.image.manifest.v1+json'],
|
||||||
[('application/vnd.docker.distribution.manifest.v2+json', 1),
|
['application/vnd.docker.distribution.manifest.v2+json',
|
||||||
('application/vnd.docker.distribution.manifest.list.v2+json', 1)],
|
'application/vnd.docker.distribution.manifest.list.v2+json'],
|
||||||
[('application/vnd.foo.bar', 1)],
|
['application/vnd.foo.bar'],
|
||||||
])
|
])
|
||||||
def test_unsupported_manifest_accept_headers(accept_mimetypes, manifest_protocol, basic_images,
|
def test_unsupported_manifest_accept_headers(accept_mimetypes, manifest_protocol, basic_images,
|
||||||
data_model, liveserver_session, app_reloader):
|
data_model, liveserver_session, app_reloader):
|
||||||
|
@ -629,7 +630,7 @@ def test_unsupported_manifest_accept_headers(accept_mimetypes, manifest_protocol
|
||||||
|
|
||||||
options = ProtocolOptions()
|
options = ProtocolOptions()
|
||||||
options.manifest_content_type = DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
options.manifest_content_type = DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||||
options.accept_mimetypes = str(Accept(accept_mimetypes))
|
options.accept_mimetypes = accept_mimetypes
|
||||||
|
|
||||||
# Attempt to push a new repository.
|
# Attempt to push a new repository.
|
||||||
manifest_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
manifest_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
||||||
|
@ -1356,3 +1357,96 @@ def test_push_tag_existing_image(v1_protocol, puller, basic_images, liveserver_s
|
||||||
# Pull the repository to verify.
|
# Pull the repository to verify.
|
||||||
puller.pull(liveserver_session, 'devtable', 'newrepo', 'anothertag', basic_images,
|
puller.pull(liveserver_session, 'devtable', 'newrepo', 'anothertag', basic_images,
|
||||||
credentials=credentials)
|
credentials=credentials)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('schema_version', [
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
])
|
||||||
|
@pytest.mark.parametrize('is_amd', [
|
||||||
|
True,
|
||||||
|
False
|
||||||
|
])
|
||||||
|
def test_push_pull_manifest_list_back_compat(v22_protocol, v2_protocol, basic_images,
|
||||||
|
different_images, liveserver_session, app_reloader,
|
||||||
|
schema_version, data_model, is_amd):
|
||||||
|
""" Test: Push a new tag with a manifest list containing two manifests, one (possibly) legacy
|
||||||
|
and one not, and, if there is a legacy manifest, ensure it can be pulled.
|
||||||
|
"""
|
||||||
|
if data_model != 'oci_model':
|
||||||
|
return
|
||||||
|
|
||||||
|
credentials = ('devtable', 'password')
|
||||||
|
options = ProtocolOptions()
|
||||||
|
|
||||||
|
# Build the manifests that will go in the list.
|
||||||
|
blobs = {}
|
||||||
|
|
||||||
|
signed = v22_protocol.build_schema1('devtable', 'newrepo', 'latest', basic_images, blobs, options)
|
||||||
|
first_manifest = signed.unsigned()
|
||||||
|
if schema_version == 2:
|
||||||
|
first_manifest = v22_protocol.build_schema2(basic_images, blobs, options)
|
||||||
|
|
||||||
|
second_manifest = v22_protocol.build_schema2(different_images, blobs, options)
|
||||||
|
|
||||||
|
# Add the manifests themselves to the blobs map.
|
||||||
|
blobs[str(first_manifest.digest)] = first_manifest.bytes
|
||||||
|
blobs[str(second_manifest.digest)] = second_manifest.bytes
|
||||||
|
|
||||||
|
# Create and push the manifest list.
|
||||||
|
builder = DockerSchema2ManifestListBuilder()
|
||||||
|
builder.add_manifest(first_manifest, 'amd64' if is_amd else 'something', 'linux')
|
||||||
|
builder.add_manifest(second_manifest, 'arm', 'linux')
|
||||||
|
manifestlist = builder.build()
|
||||||
|
|
||||||
|
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist, blobs,
|
||||||
|
credentials=credentials, options=options)
|
||||||
|
|
||||||
|
# Pull the tag and ensure we (don't) get back the basic images, since they are(n't) part of the
|
||||||
|
# amd64+linux manifest.
|
||||||
|
v2_protocol.pull(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
|
||||||
|
credentials=credentials,
|
||||||
|
expected_failure=Failures.UNKNOWN_TAG if not is_amd else None)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('schema_version', [
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
])
|
||||||
|
def test_push_pull_manifest_list(v22_protocol, basic_images, different_images, liveserver_session,
|
||||||
|
app_reloader, schema_version, data_model):
|
||||||
|
""" Test: Push a new tag with a manifest list containing two manifests, one (possibly) legacy
|
||||||
|
and one not, and pull it.
|
||||||
|
"""
|
||||||
|
if data_model != 'oci_model':
|
||||||
|
return
|
||||||
|
|
||||||
|
credentials = ('devtable', 'password')
|
||||||
|
options = ProtocolOptions()
|
||||||
|
|
||||||
|
# Build the manifests that will go in the list.
|
||||||
|
blobs = {}
|
||||||
|
|
||||||
|
signed = v22_protocol.build_schema1('devtable', 'newrepo', 'latest', basic_images, blobs, options)
|
||||||
|
first_manifest = signed.unsigned()
|
||||||
|
if schema_version == 2:
|
||||||
|
first_manifest = v22_protocol.build_schema2(basic_images, blobs, options)
|
||||||
|
|
||||||
|
second_manifest = v22_protocol.build_schema2(different_images, blobs, options)
|
||||||
|
|
||||||
|
# Add the manifests themselves to the blobs map.
|
||||||
|
blobs[str(first_manifest.digest)] = first_manifest.bytes
|
||||||
|
blobs[str(second_manifest.digest)] = second_manifest.bytes
|
||||||
|
|
||||||
|
# Create and push the manifest list.
|
||||||
|
builder = DockerSchema2ManifestListBuilder()
|
||||||
|
builder.add_manifest(first_manifest, 'amd64', 'linux')
|
||||||
|
builder.add_manifest(second_manifest, 'arm', 'linux')
|
||||||
|
manifestlist = builder.build()
|
||||||
|
|
||||||
|
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist, blobs,
|
||||||
|
credentials=credentials, options=options)
|
||||||
|
|
||||||
|
# Pull and verify the manifest list.
|
||||||
|
v22_protocol.pull_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist,
|
||||||
|
credentials=credentials, options=options)
|
||||||
|
|
Reference in a new issue