Convert V2's manifest endpoints to use the new data model interface

This commit is contained in:
Joseph Schorr 2018-10-05 17:30:47 -04:00
parent a172de4fdc
commit 6b5064aba4
10 changed files with 197 additions and 200 deletions

View file

@ -628,6 +628,15 @@ def get_active_tag_for_repo(repo, tag_name):
except RepositoryTag.DoesNotExist: except RepositoryTag.DoesNotExist:
return None return None
def get_expired_tag_in_repo(repo, tag_name):
return (RepositoryTag
.select()
.where(RepositoryTag.name == tag_name, RepositoryTag.repository == repo)
.where(~(RepositoryTag.lifetime_end_ts >> None))
.where(RepositoryTag.lifetime_end_ts <= get_epoch_timestamp())
.get())
def get_possibly_expired_tag(namespace, repo_name, tag_name): def get_possibly_expired_tag(namespace, repo_name, tag_name):
return (RepositoryTag return (RepositoryTag
.select() .select()

View file

@ -7,7 +7,7 @@ from cachetools import lru_cache
from data import model from data import model
from data.registry_model.datatype import datatype, requiresinput, optionalinput from data.registry_model.datatype import datatype, requiresinput, optionalinput
from image.docker.schema1 import DockerSchema1Manifest from image.docker.schema1 import DockerSchema1Manifest, DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
class RepositoryReference(datatype('Repository', [])): class RepositoryReference(datatype('Repository', [])):
@ -118,7 +118,7 @@ class Tag(datatype('Tag', ['name', 'reversion', 'manifest_digest', 'lifetime_sta
return legacy_image return legacy_image
class Manifest(datatype('Manifest', ['digest', 'manifest_bytes'])): class Manifest(datatype('Manifest', ['digest', 'media_type', 'manifest_bytes'])):
""" Manifest represents a manifest in a repository. """ """ Manifest represents a manifest in a repository. """
@classmethod @classmethod
def for_tag_manifest(cls, tag_manifest, legacy_image=None): def for_tag_manifest(cls, tag_manifest, legacy_image=None):
@ -127,6 +127,7 @@ class Manifest(datatype('Manifest', ['digest', 'manifest_bytes'])):
return Manifest(db_id=tag_manifest.id, digest=tag_manifest.digest, return Manifest(db_id=tag_manifest.id, digest=tag_manifest.digest,
manifest_bytes=tag_manifest.json_data, manifest_bytes=tag_manifest.json_data,
media_type=DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE, # Always in legacy.
inputs=dict(legacy_image=legacy_image)) inputs=dict(legacy_image=legacy_image))
@property @property

View file

@ -117,6 +117,12 @@ class RegistryDataInterface(object):
or None if none. or None if none.
""" """
@abstractmethod
def has_expired_tag(self, repository_ref, tag_name):
"""
Returns true if and only if the repository contains a tag with the given name that is expired.
"""
@abstractmethod @abstractmethod
def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image, def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image,
is_reversion=False): is_reversion=False):

View file

@ -106,7 +106,7 @@ class PreOCIModel(RegistryDataInterface):
# Ensure all the blobs in the manifest exist. # Ensure all the blobs in the manifest exist.
digests = manifest_interface_instance.checksums digests = manifest_interface_instance.checksums
query = model.storage.lookup_repo_storages_by_content_checksum(repository_ref._db_id, digests) query = model.storage.lookup_repo_storages_by_content_checksum(repository_ref._db_id, digests)
blob_map = {s.content_checksum: s.id for s in query} blob_map = {s.content_checksum: s for s in query}
for layer in manifest_interface_instance.layers: for layer in manifest_interface_instance.layers:
digest_str = str(layer.digest) digest_str = str(layer.digest)
if digest_str not in blob_map: if digest_str not in blob_map:
@ -116,23 +116,38 @@ class PreOCIModel(RegistryDataInterface):
# This will let us know which v1 images we need to synthesize and which ones are invalid. # This will let us know which v1 images we need to synthesize and which ones are invalid.
docker_image_ids = list(manifest_interface_instance.legacy_image_ids) docker_image_ids = list(manifest_interface_instance.legacy_image_ids)
images_query = model.image.lookup_repository_images(repository_ref._db_id, docker_image_ids) images_query = model.image.lookup_repository_images(repository_ref._db_id, docker_image_ids)
images_map = {i.docker_image_id: i.storage for i in images_query} image_storage_map = {i.docker_image_id: i.storage for i in images_query}
# Rewrite any v1 image IDs that do not match the checksum in the database. # Rewrite any v1 image IDs that do not match the checksum in the database.
try: try:
rewritten_images = list(manifest_interface_instance.rewrite_invalid_image_ids(images_map)) rewritten_images = manifest_interface_instance.rewrite_invalid_image_ids(image_storage_map)
rewritten_images = list(rewritten_images)
parent_image_map = {}
for rewritten_image in rewritten_images: for rewritten_image in rewritten_images:
if not rewritten_image.image_id in images_map: if not rewritten_image.image_id in image_storage_map:
model.image.synthesize_v1_image( parent_image = None
if rewritten_image.parent_image_id:
parent_image = parent_image_map.get(rewritten_image.parent_image_id)
if parent_image is None:
parent_image = model.image.get_image(repository_ref._db_id,
rewritten_image.parent_image_id)
if parent_image is None:
return None, None
synthesized = model.image.synthesize_v1_image(
repository_ref._db_id, repository_ref._db_id,
blob_map[rewritten_image.content_checksum], blob_map[rewritten_image.content_checksum].id,
blob_map[rewritten_image.content_checksum].image_size,
rewritten_image.image_id, rewritten_image.image_id,
rewritten_image.created, rewritten_image.created,
rewritten_image.comment, rewritten_image.comment,
rewritten_image.command, rewritten_image.command,
rewritten_image.compat_json, rewritten_image.compat_json,
rewritten_image.parent_image_id, parent_image,
) )
parent_image_map[rewritten_image.image_id] = synthesized
except ManifestException: except ManifestException:
logger.exception("exception when rewriting v1 metadata") logger.exception("exception when rewriting v1 metadata")
return None, None return None, None
@ -150,7 +165,7 @@ class PreOCIModel(RegistryDataInterface):
# Save the labels on the manifest. # Save the labels on the manifest.
if newly_created: if newly_created:
with self.batch_create_manifest_labels(manifest) as add_label: with self.batch_create_manifest_labels(manifest) as add_label:
for key, value in manifest.layers[-1].v1_metadata.labels.iteritems(): for key, value in manifest_interface_instance.layers[-1].v1_metadata.labels.iteritems():
media_type = 'application/json' if is_json(value) else 'text/plain' media_type = 'application/json' if is_json(value) else 'text/plain'
add_label(key, value, 'manifest', media_type) add_label(key, value, 'manifest', media_type)
@ -289,7 +304,8 @@ class PreOCIModel(RegistryDataInterface):
else None)) else None))
for tag in tags] for tag in tags]
def list_repository_tag_history(self, repository_ref, page=1, size=100, specific_tag_name=None, active_tags_only=False): def list_repository_tag_history(self, repository_ref, page=1, size=100, specific_tag_name=None,
active_tags_only=False):
""" """
Returns the history of all tags in the repository (unless filtered). This includes tags that Returns the history of all tags in the repository (unless filtered). This includes tags that
have been made in-active due to newer versions of those tags coming into service. have been made in-active due to newer versions of those tags coming into service.
@ -302,6 +318,16 @@ class PreOCIModel(RegistryDataInterface):
legacy_image=LegacyImage.for_image(tag.image)) legacy_image=LegacyImage.for_image(tag.image))
for tag in tags], has_more for tag in tags], has_more
def has_expired_tag(self, repository_ref, tag_name):
"""
Returns true if and only if the repository contains a tag with the given name that is expired.
"""
try:
model.tag.get_expired_tag_in_repo(repository_ref._db_id, tag_name)
return True
except database.RepositoryTag.DoesNotExist:
return False
def get_repo_tag(self, repository_ref, tag_name, include_legacy_image=False): def get_repo_tag(self, repository_ref, tag_name, include_legacy_image=False):
""" """
Returns the latest, *active* tag found in the repository, with the matching name Returns the latest, *active* tag found in the repository, with the matching name

View file

@ -8,6 +8,7 @@ import pytest
from mock import patch from mock import patch
from playhouse.test_utils import assert_query_count from playhouse.test_utils import assert_query_count
from app import docker_v2_signing_key
from data import model from data import model
from data.database import (TagManifestLabelMap, TagManifestToManifest, Manifest, ManifestBlob, from data.database import (TagManifestLabelMap, TagManifestToManifest, Manifest, ManifestBlob,
ManifestLegacyImage, ManifestLabel, TagManifest, RepositoryTag, Image, ManifestLegacyImage, ManifestLabel, TagManifest, RepositoryTag, Image,
@ -16,6 +17,7 @@ from data.database import (TagManifestLabelMap, TagManifestToManifest, Manifest,
from data.cache.impl import InMemoryDataModelCache from data.cache.impl import InMemoryDataModelCache
from data.registry_model.registry_pre_oci_model import PreOCIModel from data.registry_model.registry_pre_oci_model import PreOCIModel
from data.registry_model.datatypes import RepositoryReference from data.registry_model.datatypes import RepositoryReference
from image.docker.schema1 import DockerSchema1ManifestBuilder
from test.fixtures import * from test.fixtures import *
@ -235,6 +237,10 @@ def test_repository_tag_history(pre_oci_model):
assert not has_more assert not has_more
assert len(history) == 2 assert len(history) == 2
# Ensure the latest tag is marked expired, since there is an expired one.
with assert_query_count(1):
assert pre_oci_model.has_expired_tag(repository_ref, 'latest')
@pytest.mark.parametrize('repo_namespace, repo_name', [ @pytest.mark.parametrize('repo_namespace, repo_name', [
('devtable', 'simple'), ('devtable', 'simple'),
@ -685,3 +691,27 @@ def test_get_cached_repo_blob(pre_oci_model):
# does not contain the blob. # does not contain the blob.
with pytest.raises(SomeException): with pytest.raises(SomeException):
pre_oci_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', 'some other digest') pre_oci_model.get_cached_repo_blob(model_cache, 'devtable', 'simple', 'some other digest')
def test_create_manifest_and_retarget_tag(pre_oci_model):
repository_ref = pre_oci_model.lookup_repository('devtable', 'simple')
latest_tag = pre_oci_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
manifest = pre_oci_model.get_manifest_for_tag(latest_tag).get_parsed_manifest()
builder = DockerSchema1ManifestBuilder('devtable', 'simple', 'anothertag')
builder.add_layer(manifest.blob_digests[0],
'{"id": "%s"}' % latest_tag.legacy_image.docker_image_id)
sample_manifest = builder.build(docker_v2_signing_key)
assert sample_manifest is not None
another_manifest, tag = pre_oci_model.create_manifest_and_retarget_tag(repository_ref,
sample_manifest,
'anothertag')
assert another_manifest is not None
assert tag is not None
assert tag.name == 'anothertag'
assert another_manifest.get_parsed_manifest().manifest_dict == sample_manifest.manifest_dict
layers = pre_oci_model.list_manifest_layers(another_manifest)
assert len(layers) == 1

View file

@ -1,36 +0,0 @@
import logging
from app import app
from endpoints.v2.models_pre_oci import data_model as model
from util.timedeltastring import convert_to_timedelta
logger = logging.getLogger(__name__)
min_expire_sec = convert_to_timedelta(app.config.get('LABELED_EXPIRATION_MINIMUM', '1h'))
max_expire_sec = convert_to_timedelta(app.config.get('LABELED_EXPIRATION_MAXIMUM', '104w'))
def _expires_after(value, namespace_name, repo_name, digest):
""" Sets the expiration of a manifest based on the quay.expires-in label. """
try:
timedelta = convert_to_timedelta(value)
except ValueError:
logger.exception('Could not convert %s to timedeltastring for %s/%s@%s', value, namespace_name,
repo_name, digest)
return
total_seconds = min(max(timedelta.total_seconds(), min_expire_sec.total_seconds()),
max_expire_sec.total_seconds())
logger.debug('Labeling manifest %s/%s@%s with expiration of %s', namespace_name, repo_name,
digest, total_seconds)
model.set_manifest_expires_after(namespace_name, repo_name, digest, total_seconds)
_LABEL_HANDLES = {
'quay.expires-after': _expires_after,
}
def handle_label(key, value, namespace_name, repo_name, digest):
handler = _LABEL_HANDLES.get(key)
if handler is not None:
handler(value, namespace_name, repo_name, digest)

View file

@ -6,26 +6,22 @@ from flask import request, url_for, Response
import features import features
from app import docker_v2_signing_key, app, metric_queue from app import app, metric_queue
from auth.registry_jwt_auth import process_registry_jwt_auth from auth.registry_jwt_auth import process_registry_jwt_auth
from digest import digest_tools from digest import digest_tools
from data.registry_model import registry_model from data.registry_model import registry_model
from endpoints.decorators import anon_protect, parse_repository_name from endpoints.decorators import anon_protect, parse_repository_name
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write from endpoints.v2 import v2_bp, require_repo_read, require_repo_write
from endpoints.v2.models_interface import Label from endpoints.v2.errors import (ManifestInvalid, ManifestUnknown, TagInvalid,
from endpoints.v2.models_pre_oci import data_model as model NameInvalid, TagExpired, NameUnknown)
from endpoints.v2.errors import (BlobUnknown, ManifestInvalid, ManifestUnknown, TagInvalid,
NameInvalid, TagExpired)
from endpoints.v2.labelhandlers import handle_label
from image.docker import ManifestException from image.docker import ManifestException
from image.docker.schema1 import (DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE, from image.docker.schema1 import DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE, DockerSchema1Manifest
DockerSchema1Manifest, DockerSchema1ManifestBuilder)
from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES, OCI_CONTENT_TYPES from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES, OCI_CONTENT_TYPES
from notifications import spawn_notification from notifications import spawn_notification
from util.audit import track_and_log from util.audit import track_and_log
from util.names import VALID_TAG_PATTERN from util.names import VALID_TAG_PATTERN
from util.registry.replication import queue_replication_batch from util.registry.replication import queue_replication_batch
from util.validation import is_json
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -40,45 +36,37 @@ MANIFEST_TAGNAME_ROUTE = BASE_MANIFEST_ROUTE.format(VALID_TAG_PATTERN)
@require_repo_read @require_repo_read
@anon_protect @anon_protect
def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref): def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
manifest = model.get_manifest_by_tag(namespace_name, repo_name, manifest_ref) repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
if repository_ref is None:
raise NameUnknown()
tag = registry_model.get_repo_tag(repository_ref, manifest_ref)
if tag is None:
if registry_model.has_expired_tag(repository_ref, manifest_ref):
logger.debug('Found expired tag %s for repository %s/%s', manifest_ref, namespace_name,
repo_name)
msg = 'Tag %s was deleted or has expired. To pull, revive via time machine' % manifest_ref
raise TagExpired(msg)
raise ManifestUnknown()
manifest = registry_model.get_manifest_for_tag(tag, backfill_if_necessary=True)
if manifest is None: if manifest is None:
has_tag = model.has_active_tag(namespace_name, repo_name, manifest_ref) # Something went wrong.
if not has_tag: raise ManifestInvalid()
has_expired_tag = model.has_tag(namespace_name, repo_name, manifest_ref)
if has_expired_tag:
logger.debug('Found expired tag %s for repository %s/%s', manifest_ref, namespace_name,
repo_name)
msg = 'Tag %s was deleted or has expired. To pull, revive via time machine' % manifest_ref
raise TagExpired(msg)
else:
raise ManifestUnknown()
repo_ref = registry_model.lookup_repository(namespace_name, repo_name) track_and_log('pull_repo', repository_ref, analytics_name='pull_repo_100x', analytics_sample=0.01,
if repo_ref is None: tag=manifest_ref)
raise ManifestUnknown() metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
tag = registry_model.get_repo_tag(repo_ref, manifest_ref, include_legacy_image=True)
if tag is None:
raise ManifestUnknown()
if not registry_model.backfill_manifest_for_tag(tag):
raise ManifestUnknown()
manifest = model.get_manifest_by_tag(namespace_name, repo_name, manifest_ref)
if manifest is None:
raise ManifestUnknown()
repo = model.get_repository(namespace_name, repo_name)
if repo is not None:
track_and_log('pull_repo', repo, analytics_name='pull_repo_100x', analytics_sample=0.01,
tag=manifest_ref)
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
return Response( return Response(
manifest.json, manifest.manifest_bytes,
status=200, status=200,
headers={'Content-Type': manifest.media_type, headers={
'Docker-Content-Digest': manifest.digest},) 'Content-Type': manifest.media_type,
'Docker-Content-Digest': manifest.digest,
},
)
@v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['GET']) @v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['GET'])
@ -87,19 +75,21 @@ def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
@require_repo_read @require_repo_read
@anon_protect @anon_protect
def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref): def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref):
manifest = model.get_manifest_by_digest(namespace_name, repo_name, manifest_ref) repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
if repository_ref is None:
raise NameUnknown()
manifest = registry_model.lookup_manifest_by_digest(repository_ref, manifest_ref)
if manifest is None: if manifest is None:
# Without a tag name to reference, we can't make an attempt to generate the manifest
raise ManifestUnknown() raise ManifestUnknown()
repo = model.get_repository(namespace_name, repo_name) track_and_log('pull_repo', repository_ref, manifest_digest=manifest_ref)
if repo is not None: metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
track_and_log('pull_repo', repo, manifest_digest=manifest_ref)
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
return Response(manifest.json, status=200, headers={ return Response(manifest.manifest_bytes, status=200, headers={
'Content-Type': manifest.media_type, 'Content-Type': manifest.media_type,
'Docker-Content-Digest': manifest.digest}) 'Docker-Content-Digest': manifest.digest,
})
def _reject_manifest2_schema2(func): def _reject_manifest2_schema2(func):
@ -158,99 +148,6 @@ def write_manifest_by_digest(namespace_name, repo_name, manifest_ref):
return _write_manifest_and_log(namespace_name, repo_name, manifest) return _write_manifest_and_log(namespace_name, repo_name, manifest)
def _write_manifest(namespace_name, repo_name, manifest):
if (manifest.namespace == '' and features.LIBRARY_SUPPORT and
namespace_name == app.config['LIBRARY_NAMESPACE']):
pass
elif manifest.namespace != namespace_name:
raise NameInvalid()
if manifest.repo_name != repo_name:
raise NameInvalid()
# Ensure that the repository exists.
repo = model.get_repository(namespace_name, repo_name)
if repo is None:
raise NameInvalid()
if not manifest.layers:
raise ManifestInvalid(detail={'message': 'manifest does not reference any layers'})
# Ensure all the blobs in the manifest exist.
blob_map = model.lookup_blobs_by_digest(repo, manifest.checksums)
for layer in manifest.layers:
digest_str = str(layer.digest)
if digest_str not in blob_map:
raise BlobUnknown(detail={'digest': digest_str})
# Lookup all the images and their parent images (if any) inside the manifest.
# This will let us know which v1 images we need to synthesize and which ones are invalid.
all_image_ids = list(manifest.parent_image_ids | manifest.image_ids)
images_map = model.get_docker_v1_metadata_by_image_id(repo, all_image_ids)
# Rewrite any v1 image IDs that do not match the checksum in the database.
try:
# TODO: make this batch and read the parent image from the previous iteration, rather than
# reloading it.
rewritten_images = list(manifest.rewrite_invalid_image_ids(images_map))
for rewritten_image in rewritten_images:
if not rewritten_image.image_id in images_map:
model.synthesize_v1_image(
repo,
blob_map[rewritten_image.content_checksum],
rewritten_image.image_id,
rewritten_image.created,
rewritten_image.comment,
rewritten_image.command,
rewritten_image.compat_json,
rewritten_image.parent_image_id,
)
except ManifestException as me:
logger.exception("exception when rewriting v1 metadata")
raise ManifestInvalid(detail={'message': 'failed synthesizing v1 metadata: %s' % me.message})
# Store the manifest pointing to the tag.
leaf_layer_id = rewritten_images[-1].image_id
newly_created = model.save_manifest(repo, manifest.tag, manifest, leaf_layer_id, blob_map)
if newly_created:
# TODO: make this batch
labels = []
for key, value in manifest.layers[-1].v1_metadata.labels.iteritems():
media_type = 'application/json' if is_json(value) else 'text/plain'
labels.append(Label(key=key, value=value, source_type='manifest', media_type=media_type))
handle_label(key, value, namespace_name, repo_name, manifest.digest)
model.create_manifest_labels(namespace_name, repo_name, manifest.digest, labels)
return repo, blob_map
def _write_manifest_and_log(namespace_name, repo_name, manifest):
repo, blob_map = _write_manifest(namespace_name, repo_name, manifest)
# Queue all blob manifests for replication.
if features.STORAGE_REPLICATION:
with queue_replication_batch(namespace_name) as queue_storage_replication:
for layer in manifest.layers:
digest_str = str(layer.digest)
queue_storage_replication(blob_map[digest_str])
track_and_log('push_repo', repo, tag=manifest.tag)
spawn_notification(repo, 'repo_push', {'updated_tags': [manifest.tag]})
metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
return Response(
'OK',
status=202,
headers={
'Docker-Content-Digest': manifest.digest,
'Location':
url_for('v2.fetch_manifest_by_digest', repository='%s/%s' % (namespace_name, repo_name),
manifest_ref=manifest.digest),
},
)
@v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['DELETE']) @v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['DELETE'])
@parse_repository_name() @parse_repository_name()
@process_registry_jwt_auth(scopes=['pull', 'push']) @process_registry_jwt_auth(scopes=['pull', 'push'])
@ -263,11 +160,75 @@ def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref):
Note: there is no equivalent method for deleting by tag name because it is Note: there is no equivalent method for deleting by tag name because it is
forbidden by the spec. forbidden by the spec.
""" """
tags = model.delete_manifest_by_digest(namespace_name, repo_name, manifest_ref) repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
if repository_ref is None:
raise NameUnknown()
manifest = registry_model.lookup_manifest_by_digest(repository_ref, manifest_ref)
if manifest is None:
raise ManifestUnknown()
tags = registry_model.delete_tags_for_manifest(manifest)
if not tags: if not tags:
raise ManifestUnknown() raise ManifestUnknown()
for tag in tags: for tag in tags:
track_and_log('delete_tag', tag.repository, tag=tag.name, digest=manifest_ref) track_and_log('delete_tag', repository_ref, tag=tag.name, digest=manifest_ref)
return Response(status=202) return Response(status=202)
def _write_manifest_and_log(namespace_name, repo_name, manifest_impl):
repository_ref, manifest, tag = _write_manifest(namespace_name, repo_name, manifest_impl)
# Queue all blob manifests for replication.
if features.STORAGE_REPLICATION:
layers = registry_model.list_manifest_layers(manifest)
if layers is None:
raise ManifestInvalid()
with queue_replication_batch(namespace_name) as queue_storage_replication:
for layer in layers:
queue_storage_replication(layer.blob)
track_and_log('push_repo', repository_ref, tag=manifest_impl.tag)
spawn_notification(repository_ref, 'repo_push', {'updated_tags': [manifest_impl.tag]})
metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
return Response(
'OK',
status=202,
headers={
'Docker-Content-Digest': manifest.digest,
'Location':
url_for('v2.fetch_manifest_by_digest',
repository='%s/%s' % (namespace_name, repo_name),
manifest_ref=manifest.digest),
},
)
def _write_manifest(namespace_name, repo_name, manifest_impl):
if (manifest_impl.namespace == '' and features.LIBRARY_SUPPORT and
namespace_name == app.config['LIBRARY_NAMESPACE']):
pass
elif manifest_impl.namespace != namespace_name:
raise NameInvalid()
if manifest_impl.repo_name != repo_name:
raise NameInvalid()
if not manifest_impl.layers:
raise ManifestInvalid(detail={'message': 'manifest does not reference any layers'})
# Ensure that the repository exists.
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
if repository_ref is None:
raise NameUnknown()
manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref, manifest_impl,
manifest_impl.tag)
if manifest is None:
raise ManifestInvalid()
return repository_ref, manifest, tag

View file

@ -52,4 +52,4 @@ def test_e2e_query_count_manifest_norewrite(client, app):
conduct_call(client, 'v2.write_manifest_by_digest', url_for, 'PUT', params, expected_code=202, conduct_call(client, 'v2.write_manifest_by_digest', url_for, 'PUT', params, expected_code=202,
headers=headers, raw_body=tag_manifest.json_data) headers=headers, raw_body=tag_manifest.json_data)
assert counter.count <= 15 assert counter.count <= 16

View file

@ -51,7 +51,7 @@ class V2Protocol(RegistryProtocol):
Failures.MISSING_TAG: 404, Failures.MISSING_TAG: 404,
Failures.INVALID_TAG: 404, Failures.INVALID_TAG: 404,
Failures.INVALID_IMAGES: 400, Failures.INVALID_IMAGES: 400,
Failures.INVALID_BLOB: 404, Failures.INVALID_BLOB: 400,
Failures.UNSUPPORTED_CONTENT_TYPE: 415, Failures.UNSUPPORTED_CONTENT_TYPE: 415,
}, },
} }

View file

@ -678,9 +678,9 @@ class V2RegistryPushMixin(V2RegistryMixin):
for tag_name in tag_names: for tag_name in tag_names:
manifest = manifests[tag_name] manifest = manifests[tag_name]
# Write the manifest. If we expect it to be invalid, we expect a 404 code. Otherwise, we expect # Write the manifest. If we expect it to be invalid, we expect a 400 code. Otherwise, we expect
# a 202 response for success. # a 202 response for success.
put_code = 404 if invalid else 202 put_code = 400 if invalid else 202
self.conduct('PUT', '/v2/%s/manifests/%s' % (repo_name, tag_name), self.conduct('PUT', '/v2/%s/manifests/%s' % (repo_name, tag_name),
data=manifest.bytes, expected_code=put_code, data=manifest.bytes, expected_code=put_code,
headers={'Content-Type': 'application/json'}, auth='jwt') headers={'Content-Type': 'application/json'}, auth='jwt')
@ -1682,9 +1682,9 @@ class V2RegistryTests(V2RegistryPullMixin, V2RegistryPushMixin, RegistryTestsMix
manifest = builder.build(_JWK) manifest = builder.build(_JWK)
response = self.conduct('PUT', '/v2/%s/manifests/%s' % (repo_name, tag_name), response = self.conduct('PUT', '/v2/%s/manifests/%s' % (repo_name, tag_name),
data=manifest.bytes, expected_code=404, data=manifest.bytes, expected_code=400,
headers={'Content-Type': 'application/json'}, auth='jwt') headers={'Content-Type': 'application/json'}, auth='jwt')
self.assertEquals('BLOB_UNKNOWN', response.json()['errors'][0]['code']) self.assertEquals('MANIFEST_INVALID', response.json()['errors'][0]['code'])
def test_delete_manifest(self): def test_delete_manifest(self):