Merge pull request #3257 from quay/joseph.schorr/QUAY-1030/interfacing-part-9-2

Finish changing V1 to use new registry data model
This commit is contained in:
Joseph Schorr 2018-09-30 15:59:50 -04:00 committed by GitHub
commit ce19273c54
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 606 additions and 683 deletions

View file

@ -366,6 +366,13 @@ def get_image(repo, docker_image_id):
return None return None
def get_image_by_db_id(id):
try:
return Image.get(id=id)
except Image.DoesNotExist:
return None
def synthesize_v1_image(repo, image_storage_id, storage_image_size, docker_image_id, def synthesize_v1_image(repo, image_storage_id, storage_image_size, docker_image_id,
created_date_str, comment, command, v1_json_metadata, parent_image=None): created_date_str, comment, command, v1_json_metadata, parent_image=None):
""" Find an existing image with this docker image id, and if none exists, write one with the """ Find an existing image with this docker image id, and if none exists, write one with the

View file

@ -50,3 +50,16 @@ def requiresinput(input_name):
return wrapper return wrapper
return inner return inner
def optionalinput(input_name):
""" Marks a property on the data type as having an input be optional when invoked. """
def inner(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
kwargs[input_name] = self._inputs.get(input_name)
result = func(self, *args, **kwargs)
return result
return wrapper
return inner

View file

@ -6,7 +6,7 @@ from enum import Enum, unique
from cachetools import lru_cache from cachetools import lru_cache
from data import model from data import model
from data.registry_model.datatype import datatype, requiresinput from data.registry_model.datatype import datatype, requiresinput, optionalinput
from image.docker.schema1 import DockerSchema1Manifest from image.docker.schema1 import DockerSchema1Manifest
@ -17,17 +17,41 @@ class RepositoryReference(datatype('Repository', [])):
if repo_obj is None: if repo_obj is None:
return None return None
return RepositoryReference(db_id=repo_obj.id) return RepositoryReference(db_id=repo_obj.id,
inputs=dict(
kind=model.repository.get_repo_kind_name(repo_obj),
is_public=model.repository.is_repository_public(repo_obj)
))
@classmethod @classmethod
def for_id(cls, repo_id): def for_id(cls, repo_id):
return RepositoryReference(db_id=repo_id) return RepositoryReference(db_id=repo_id, inputs=dict(kind=None, is_public=None))
@property @property
@lru_cache(maxsize=1) @lru_cache(maxsize=1)
def _repository_obj(self): def _repository_obj(self):
return model.repository.lookup_repository(self._db_id) return model.repository.lookup_repository(self._db_id)
@property
@optionalinput('kind')
def kind(self, kind):
""" Returns the kind of the repository. """
return kind or model.repository.get_repo_kind_name(self._repositry_obj)
@property
@optionalinput('is_public')
def is_public(self, is_public):
""" Returns whether the repository is public. """
if is_public is not None:
return is_public
return model.repository.is_repository_public(self._repository_obj)
@property
def id(self):
""" Returns the database ID of the repository. """
return self._db_id
@property @property
def namespace_name(self): def namespace_name(self):
""" Returns the namespace name of this repository. """ Returns the namespace name of this repository.
@ -119,7 +143,8 @@ class Manifest(datatype('Manifest', ['digest', 'manifest_bytes'])):
class LegacyImage(datatype('LegacyImage', ['docker_image_id', 'created', 'comment', 'command', class LegacyImage(datatype('LegacyImage', ['docker_image_id', 'created', 'comment', 'command',
'image_size', 'aggregate_size', 'uploading'])): 'image_size', 'aggregate_size', 'uploading',
'v1_metadata_string'])):
""" LegacyImage represents a Docker V1-style image found in a repository. """ """ LegacyImage represents a Docker V1-style image found in a repository. """
@classmethod @classmethod
def for_image(cls, image, images_map=None, tags_map=None, blob=None): def for_image(cls, image, images_map=None, tags_map=None, blob=None):
@ -134,6 +159,7 @@ class LegacyImage(datatype('LegacyImage', ['docker_image_id', 'created', 'commen
created=image.created, created=image.created,
comment=image.comment, comment=image.comment,
command=image.command, command=image.command,
v1_metadata_string=image.v1_json_metadata,
image_size=image.storage.image_size, image_size=image.storage.image_size,
aggregate_size=image.aggregate_size, aggregate_size=image.aggregate_size,
uploading=image.storage.uploading) uploading=image.storage.uploading)
@ -143,7 +169,8 @@ class LegacyImage(datatype('LegacyImage', ['docker_image_id', 'created', 'commen
@requiresinput('ancestor_id_list') @requiresinput('ancestor_id_list')
def parents(self, images_map, ancestor_id_list): def parents(self, images_map, ancestor_id_list):
""" Returns the parent images for this image. Raises an exception if the parents have """ Returns the parent images for this image. Raises an exception if the parents have
not been loaded before this property is invoked. not been loaded before this property is invoked. Parents are returned starting at the
leaf image.
""" """
return [LegacyImage.for_image(images_map[ancestor_id], images_map=images_map) return [LegacyImage.for_image(images_map[ancestor_id], images_map=images_map)
for ancestor_id in reversed(ancestor_id_list) for ancestor_id in reversed(ancestor_id_list)

View file

@ -0,0 +1,202 @@
import logging
import json
import uuid
from collections import namedtuple
from flask import session
from data import model
from data.database import db_transaction
from data.registry_model import registry_model
logger = logging.getLogger(__name__)
ManifestLayer = namedtuple('ManifestLayer', ['layer_id', 'v1_metadata_string', 'db_id'])
_BuilderState = namedtuple('_BuilderState', ['builder_id', 'images', 'tags', 'checksums'])
_SESSION_KEY = '__manifestbuilder'
def create_manifest_builder(repository_ref):
""" Creates a new manifest builder for populating manifests under the specified repository
and returns it. Returns None if the builder could not be constructed.
"""
builder_id = str(uuid.uuid4())
builder = _ManifestBuilder(repository_ref, _BuilderState(builder_id, {}, {}, {}))
builder._save_to_session()
return builder
def lookup_manifest_builder(repository_ref, builder_id):
""" Looks up the manifest builder with the given ID under the specified repository and returns
it or None if none.
"""
builder_state_tuple = session.get(_SESSION_KEY)
if builder_state_tuple is None:
return None
builder_state = _BuilderState(*builder_state_tuple)
if builder_state.builder_id != builder_id:
return None
return _ManifestBuilder(repository_ref, builder_state)
class _ManifestBuilder(object):
""" Helper class which provides an interface for bookkeeping the layers and configuration of
manifests being constructed.
"""
def __init__(self, repository_ref, builder_state):
self._repository_ref = repository_ref
self._builder_state = builder_state
@property
def builder_id(self):
""" Returns the unique ID for this builder. """
return self._builder_state.builder_id
@property
def committed_tags(self):
""" Returns the tags committed by this builder, if any. """
return [registry_model.get_repo_tag(self._repository_ref, tag_name, include_legacy_image=True)
for tag_name in self._builder_state.tags.keys()]
def start_layer(self, layer_id, v1_metadata_string, location_name, calling_user,
temp_tag_expiration):
""" Starts a new layer with the given ID to be placed into a manifest. Returns the layer
started or None if an error occurred.
"""
# Ensure the repository still exists.
repository = model.repository.lookup_repository(self._repository_ref._db_id)
if repository is None:
return None
namespace_name = repository.namespace_user.username
repo_name = repository.name
try:
v1_metadata = json.loads(v1_metadata_string)
except ValueError:
logger.exception('Exception when trying to parse V1 metadata JSON for layer %s', layer_id)
return None
except TypeError:
logger.exception('Exception when trying to parse V1 metadata JSON for layer %s', layer_id)
return None
# Sanity check that the ID matches the v1 metadata.
if layer_id != v1_metadata['id']:
return None
# Ensure the parent already exists in the repository.
parent_id = v1_metadata.get('parent', None)
parent_image = None
if parent_id is not None:
parent_image = model.image.get_repo_image(namespace_name, repo_name, parent_id)
if parent_image is None:
return None
# Check to see if this layer already exists in the repository. If so, we can skip the creation.
existing_image = registry_model.get_legacy_image(self._repository_ref, layer_id)
if existing_image is not None:
self._builder_state.images[layer_id] = existing_image.id
self._save_to_session()
return ManifestLayer(layer_id, v1_metadata_string, existing_image.id)
with db_transaction():
# Otherwise, create a new legacy image and point a temporary tag at it.
created = model.image.find_create_or_link_image(layer_id, repository, calling_user, {},
location_name)
model.tag.create_temporary_hidden_tag(repository, created, temp_tag_expiration)
# Mark the image as uploading.
created.storage.uploading = True
created.storage.save()
# Save its V1 metadata.
command_list = v1_metadata.get('container_config', {}).get('Cmd', None)
command = json.dumps(command_list) if command_list else None
model.image.set_image_metadata(layer_id, namespace_name, repo_name,
v1_metadata.get('created'),
v1_metadata.get('comment'),
command, json.dumps(v1_metadata),
parent=parent_image)
# Save the changes to the builder.
self._builder_state.images[layer_id] = created.id
self._save_to_session()
return ManifestLayer(layer_id, v1_metadata_string, created.id)
def lookup_layer(self, layer_id):
""" Returns a layer with the given ID under this builder. If none exists, returns None. """
if layer_id not in self._builder_state.images:
return None
image = model.image.get_image_by_db_id(self._builder_state.images[layer_id])
if image is None:
return None
return ManifestLayer(layer_id, image.v1_json_metadata, image.id)
def assign_layer_blob(self, layer, blob, computed_checksums):
""" Assigns a blob to a layer. """
assert blob
repo_image = model.image.get_image_by_db_id(layer.db_id)
if repo_image is None:
return None
with db_transaction():
existing_storage = repo_image.storage
repo_image.storage = blob._db_id
repo_image.save()
existing_storage.delete_instance(recursive=True)
self._builder_state.checksums[layer.layer_id] = computed_checksums
self._save_to_session()
return True
def validate_layer_checksum(self, layer, checksum):
""" Returns whether the checksum for a layer matches that specified.
"""
return checksum in self.get_layer_checksums(layer)
def get_layer_checksums(self, layer):
""" Returns the registered defined for the layer, if any. """
return self._builder_state.checksums.get(layer.layer_id) or []
def save_precomputed_checksum(self, layer, checksum):
""" Saves a precomputed checksum for a layer. """
checksums = self._builder_state.checksums.get(layer.layer_id) or []
checksums.append(checksum)
self._builder_state.checksums[layer.layer_id] = checksums
self._save_to_session()
def commit_tag_and_manifest(self, tag_name, layer):
""" Commits a new tag + manifest for that tag to the repository with the given name,
pointing to the given layer.
"""
legacy_image = registry_model.get_legacy_image(self._repository_ref, layer.layer_id)
if legacy_image is None:
return None
tag = registry_model.retarget_tag(self._repository_ref, tag_name, legacy_image)
if tag is None:
return None
self._builder_state.tags[tag_name] = tag._db_id
self._save_to_session()
return tag
def done(self):
""" Marks the manifest builder as complete and disposes of any state. This call is optional
and it is expected manifest builders will eventually time out if unused for an
extended period of time.
"""
session.pop(_SESSION_KEY, None)
def _save_to_session(self):
session[_SESSION_KEY] = self._builder_state

View file

@ -0,0 +1,95 @@
import hashlib
import json
from io import BytesIO
import pytest
from mock import patch
from data.registry_model.blobuploader import BlobUploadSettings, upload_blob
from data.registry_model.manifestbuilder import create_manifest_builder, lookup_manifest_builder
from data.registry_model.registry_pre_oci_model import PreOCIModel
from storage.distributedstorage import DistributedStorage
from storage.fakestorage import FakeStorage
from test.fixtures import *
@pytest.fixture()
def pre_oci_model(initialized_db):
return PreOCIModel()
@pytest.fixture()
def fake_session():
with patch('data.registry_model.manifestbuilder.session', {}):
yield
@pytest.mark.parametrize('layers', [
pytest.param([('someid', None, 'some data')], id='Single layer'),
pytest.param([('parentid', None, 'some parent data'),
('someid', 'parentid', 'some data')],
id='Multi layer'),
])
def test_build_manifest(layers, fake_session, pre_oci_model):
repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
storage = DistributedStorage({'local_us': FakeStorage(None)}, ['local_us'])
settings = BlobUploadSettings('2M', 512 * 1024, 3600)
app_config = {'TESTING': True}
builder = create_manifest_builder(repository_ref)
assert lookup_manifest_builder(repository_ref, 'anotherid') is None
assert lookup_manifest_builder(repository_ref, builder.builder_id) is not None
blobs_by_layer = {}
for layer_id, parent_id, layer_bytes in layers:
# Start a new layer.
assert builder.start_layer(layer_id, json.dumps({'id': layer_id, 'parent': parent_id}),
'local_us', None, 60)
checksum = hashlib.sha1(layer_bytes).hexdigest()
# Assign it a blob.
with upload_blob(repository_ref, storage, settings) as uploader:
uploader.upload_chunk(app_config, BytesIO(layer_bytes))
blob = uploader.commit_to_blob(app_config)
blobs_by_layer[layer_id] = blob
builder.assign_layer_blob(builder.lookup_layer(layer_id), blob, [checksum])
# Validate the checksum.
assert builder.validate_layer_checksum(builder.lookup_layer(layer_id), checksum)
# Commit the manifest to a tag.
tag = builder.commit_tag_and_manifest('somenewtag', builder.lookup_layer(layers[-1][0]))
assert tag
assert tag in builder.committed_tags
# Verify the legacy image for the tag.
found = pre_oci_model.get_repo_tag(repository_ref, 'somenewtag', include_legacy_image=True)
assert found
assert found.name == 'somenewtag'
assert found.legacy_image.docker_image_id == layers[-1][0]
# Verify the blob and manifest.
manifest = pre_oci_model.get_manifest_for_tag(found)
assert manifest
parsed = manifest.get_parsed_manifest()
assert len(list(parsed.layers)) == len(layers)
for index, (layer_id, parent_id, layer_bytes) in enumerate(layers):
assert list(parsed.blob_digests)[index] == blobs_by_layer[layer_id].digest
assert list(parsed.layers)[index].v1_metadata.image_id == layer_id
assert list(parsed.layers)[index].v1_metadata.parent_image_id == parent_id
assert parsed.leaf_layer_v1_image_id == layers[-1][0]
def test_build_manifest_missing_parent(fake_session, pre_oci_model):
repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
builder = create_manifest_builder(repository_ref)
assert builder.start_layer('somelayer', json.dumps({'id': 'somelayer', 'parent': 'someparent'}),
'local_us', None, 60) is None

View file

@ -14,9 +14,11 @@ from auth.permissions import (
ModifyRepositoryPermission, UserAdminPermission, ReadRepositoryPermission, ModifyRepositoryPermission, UserAdminPermission, ReadRepositoryPermission,
CreateRepositoryPermission, repository_read_grant, repository_write_grant) CreateRepositoryPermission, repository_read_grant, repository_write_grant)
from auth.signedgrant import generate_signed_token from auth.signedgrant import generate_signed_token
from data import model
from data.registry_model import registry_model
from data.registry_model.manifestbuilder import create_manifest_builder, lookup_manifest_builder
from endpoints.decorators import anon_protect, anon_allowed, parse_repository_name from endpoints.decorators import anon_protect, anon_allowed, parse_repository_name
from endpoints.v1 import v1_bp from endpoints.v1 import v1_bp
from endpoints.v1.models_pre_oci import pre_oci_model as model
from notifications import spawn_notification from notifications import spawn_notification
from util.audit import track_and_log from util.audit import track_and_log
from util.http import abort from util.http import abort
@ -33,7 +35,9 @@ class GrantType(object):
def ensure_namespace_enabled(f): def ensure_namespace_enabled(f):
@wraps(f) @wraps(f)
def wrapper(namespace_name, repo_name, *args, **kwargs): def wrapper(namespace_name, repo_name, *args, **kwargs):
if not model.is_namespace_enabled(namespace_name): namespace = model.user.get_namespace_user(namespace_name)
is_namespace_enabled = namespace is not None and namespace.enabled
if not is_namespace_enabled:
abort(400, message='Namespace is disabled. Please contact your system administrator.') abort(400, message='Namespace is disabled. Please contact your system administrator.')
return f(namespace_name, repo_name, *args, **kwargs) return f(namespace_name, repo_name, *args, **kwargs)
@ -148,11 +152,13 @@ def update_user(username):
if 'password' in update_request: if 'password' in update_request:
logger.debug('Updating user password') logger.debug('Updating user password')
model.change_user_password(get_authenticated_user(), update_request['password']) model.user.change_password(get_authenticated_user(), update_request['password'])
return jsonify({ return jsonify({
'username': get_authenticated_user().username, 'username': get_authenticated_user().username,
'email': get_authenticated_user().email}) 'email': get_authenticated_user().email,
})
abort(403) abort(403)
@ -168,25 +174,22 @@ def create_repository(namespace_name, repo_name):
abort(400, message='Invalid repository name. Repository names cannot contain slashes.') abort(400, message='Invalid repository name. Repository names cannot contain slashes.')
logger.debug('Looking up repository %s/%s', namespace_name, repo_name) logger.debug('Looking up repository %s/%s', namespace_name, repo_name)
repo = model.get_repository(namespace_name, repo_name) repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
if repository_ref is None and get_authenticated_user() is None:
logger.debug('Found repository %s/%s', namespace_name, repo_name)
if not repo and get_authenticated_user() is None:
logger.debug('Attempt to create repository %s/%s without user auth', namespace_name, repo_name) logger.debug('Attempt to create repository %s/%s without user auth', namespace_name, repo_name)
abort(401, abort(401,
message='Cannot create a repository as a guest. Please login via "docker login" first.', message='Cannot create a repository as a guest. Please login via "docker login" first.',
issue='no-login') issue='no-login')
elif repository_ref:
elif repo:
modify_perm = ModifyRepositoryPermission(namespace_name, repo_name) modify_perm = ModifyRepositoryPermission(namespace_name, repo_name)
if not modify_perm.can(): if not modify_perm.can():
abort(403, abort(403,
message='You do not have permission to modify repository %(namespace)s/%(repository)s', message='You do not have permission to modify repository %(namespace)s/%(repository)s',
issue='no-repo-write-permission', namespace=namespace_name, repository=repo_name) issue='no-repo-write-permission', namespace=namespace_name, repository=repo_name)
elif repo.kind != 'image': elif repository_ref.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind msg = ('This repository is for managing %s resources and not container images.' %
repository_ref.kind)
abort(405, message=msg, namespace=namespace_name) abort(405, message=msg, namespace=namespace_name)
else: else:
create_perm = CreateRepositoryPermission(namespace_name) create_perm = CreateRepositoryPermission(namespace_name)
if not create_perm.can(): if not create_perm.can():
@ -199,17 +202,27 @@ def create_repository(namespace_name, repo_name):
logger.debug('Creating repository %s/%s with owner: %s', namespace_name, repo_name, logger.debug('Creating repository %s/%s with owner: %s', namespace_name, repo_name,
get_authenticated_user().username) get_authenticated_user().username)
model.create_repository(namespace_name, repo_name, get_authenticated_user()) repository_ref = model.repository.create_repository(namespace_name, repo_name,
get_authenticated_user())
if get_authenticated_user(): if get_authenticated_user():
user_event_data = { user_event_data = {
'action': 'push_start', 'action': 'push_start',
'repository': repo_name, 'repository': repo_name,
'namespace': namespace_name,} 'namespace': namespace_name,
}
event = userevents.get_event(get_authenticated_user().username) event = userevents.get_event(get_authenticated_user().username)
event.publish_event_data('docker-cli', user_event_data) event.publish_event_data('docker-cli', user_event_data)
# Start a new builder for the repository and save its ID in the session.
assert repository_ref
builder = create_manifest_builder(repository_ref)
logger.debug('Started repo push with manifest builder %s', builder)
if builder is None:
abort(404, message='Unknown repository', issue='unknown-repo')
session['manifest_builder'] = builder.builder_id
return make_response('Created', 201) return make_response('Created', 201)
@ -224,24 +237,26 @@ def update_images(namespace_name, repo_name):
if permission.can(): if permission.can():
logger.debug('Looking up repository') logger.debug('Looking up repository')
repo = model.get_repository(namespace_name, repo_name) repository_ref = registry_model.lookup_repository(namespace_name, repo_name,
if not repo: kind_filter='image')
if repository_ref is None:
# Make sure the repo actually exists. # Make sure the repo actually exists.
abort(404, message='Unknown repository', issue='unknown-repo') abort(404, message='Unknown repository', issue='unknown-repo')
elif repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
abort(405, message=msg, namespace=namespace_name) if builder is None:
abort(400)
# Generate a job for each notification that has been added to this repo # Generate a job for each notification that has been added to this repo
logger.debug('Adding notifications for repository') logger.debug('Adding notifications for repository')
updated_tags = session.get('pushed_tags', {})
event_data = { event_data = {
'updated_tags': updated_tags.keys(), 'updated_tags': [tag.name for tag in builder.committed_tags],
} }
track_and_log('push_repo', repo) builder.done()
spawn_notification(repo, 'repo_push', event_data)
track_and_log('push_repo', repository_ref)
spawn_notification(repository_ref, 'repo_push', event_data)
metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, 'v1', True]) metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, 'v1', True])
return make_response('Updated', 204) return make_response('Updated', 204)
@ -255,24 +270,22 @@ def update_images(namespace_name, repo_name):
@generate_headers(scope=GrantType.READ_REPOSITORY) @generate_headers(scope=GrantType.READ_REPOSITORY)
@anon_protect @anon_protect
def get_repository_images(namespace_name, repo_name): def get_repository_images(namespace_name, repo_name):
permission = ReadRepositoryPermission(namespace_name, repo_name) repository_ref = registry_model.lookup_repository(namespace_name, repo_name,
kind_filter='image')
# TODO invalidate token? permission = ReadRepositoryPermission(namespace_name, repo_name)
if permission.can() or model.repository_is_public(namespace_name, repo_name): if permission.can() or (repository_ref and repository_ref.is_public):
# We can't rely on permissions to tell us if a repo exists anymore # We can't rely on permissions to tell us if a repo exists anymore
logger.debug('Looking up repository') if repository_ref is None:
repo = model.get_repository(namespace_name, repo_name)
if not repo:
abort(404, message='Unknown repository', issue='unknown-repo') abort(404, message='Unknown repository', issue='unknown-repo')
elif repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, namespace=namespace_name)
logger.debug('Building repository image response') logger.debug('Building repository image response')
resp = make_response(json.dumps([]), 200) resp = make_response(json.dumps([]), 200)
resp.mimetype = 'application/json' resp.mimetype = 'application/json'
track_and_log('pull_repo', repo, analytics_name='pull_repo_100x', analytics_sample=0.01) track_and_log('pull_repo', repository_ref,
analytics_name='pull_repo_100x',
analytics_sample=0.01)
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v1', True]) metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v1', True])
return resp return resp
@ -332,18 +345,21 @@ def _conduct_repo_search(username, query, limit=25, page=1):
offset = (page - 1) * limit offset = (page - 1) * limit
if query: if query:
matching_repos = model.get_sorted_matching_repositories(query, username, limit=limit + 1, matching_repos = model.repository.get_filtered_matching_repositories(query,
offset=offset) filter_username=username,
offset=offset,
limit=limit + 1)
else: else:
matching_repos = [] matching_repos = []
results = [] results = []
for repo in matching_repos[0:limit]: for repo in matching_repos[0:limit]:
results.append({ results.append({
'name': repo.namespace_name + '/' + repo.name, 'name': repo.namespace_user.username + '/' + repo.name,
'description': repo.description, 'description': repo.description,
'is_public': repo.is_public, 'is_public': model.repository.is_repository_public(repo),
'href': '/repository/' + repo.namespace_name + '/' + repo.name}) 'href': '/repository/' + repo.namespace_user.username + '/' + repo.name
})
# Defined: https://docs.docker.com/v1.6/reference/api/registry_api/ # Defined: https://docs.docker.com/v1.6/reference/api/registry_api/
return { return {
@ -352,4 +368,5 @@ def _conduct_repo_search(username, query, limit=25, page=1):
'num_pages': page + 1 if len(matching_repos) > limit else page, 'num_pages': page + 1 if len(matching_repos) > limit else page,
'page': page, 'page': page,
'page_size': limit, 'page_size': limit,
'results': results,} 'results': results,
}

View file

@ -1,210 +0,0 @@
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from six import add_metaclass
class Repository(
namedtuple('Repository', ['id', 'name', 'namespace_name', 'description', 'is_public',
'kind'])):
"""
Repository represents a namespaced collection of tags.
:type id: int
:type name: string
:type namespace_name: string
:type description: string
:type is_public: bool
:type kind: string
"""
@add_metaclass(ABCMeta)
class DockerRegistryV1DataInterface(object):
"""
Interface that represents all data store interactions required by a Docker Registry v1.
"""
@abstractmethod
def placement_locations_and_path_docker_v1(self, namespace_name, repo_name, image_id):
"""
Returns all the placements for the image with the given V1 Docker ID, found under the given
repository or None if no image was found.
"""
pass
@abstractmethod
def docker_v1_metadata(self, namespace_name, repo_name, image_id):
"""
Returns various pieces of metadata associated with an image with the given V1 Docker ID,
including the checksum and its V1 JSON metadata.
"""
pass
@abstractmethod
def update_docker_v1_metadata(self, namespace_name, repo_name, image_id, created_date_str,
comment, command, compat_json, parent_image_id=None):
"""
Updates various pieces of V1 metadata associated with a particular image.
"""
pass
@abstractmethod
def storage_exists(self, namespace_name, repo_name, image_id):
"""
Returns whether storage already exists for the image with the V1 Docker ID under the given
repository.
"""
pass
@abstractmethod
def store_docker_v1_checksums(self, namespace_name, repo_name, image_id, checksum,
content_checksum):
"""
Stores the various V1 checksums for the image with the V1 Docker ID.
"""
pass
@abstractmethod
def is_image_uploading(self, namespace_name, repo_name, image_id):
"""
Returns whether the image with the V1 Docker ID is currently marked as uploading.
"""
pass
@abstractmethod
def update_image_uploading(self, namespace_name, repo_name, image_id, is_uploading):
"""
Marks the image with the V1 Docker ID with the given uploading status.
"""
pass
@abstractmethod
def update_image_blob(self, namespace_name, repo_name, image_id, blob):
"""
Updates the blob for the image with the given V1 Docker ID.
"""
pass
@abstractmethod
def get_image_size(self, namespace_name, repo_name, image_id):
"""
Returns the wire size of the image with the given Docker V1 ID.
"""
pass
@abstractmethod
def create_bittorrent_pieces(self, namespace_name, repo_name, image_id, pieces_bytes):
"""
Saves the BitTorrent piece hashes for the image with the given Docker V1 ID.
"""
pass
@abstractmethod
def image_ancestry(self, namespace_name, repo_name, image_id):
"""
Returns a list containing the full ancestry of Docker V1 IDs, in order, for the image with the
given Docker V1 ID.
"""
pass
@abstractmethod
def repository_exists(self, namespace_name, repo_name):
"""
Returns whether the repository with the given name and namespace exists.
"""
pass
@abstractmethod
def create_or_link_image(self, username, namespace_name, repo_name, image_id, storage_location):
"""
Adds the given image to the given repository, by either linking to an existing image visible to
the user with the given username, or creating a new one if no existing image matches.
"""
pass
@abstractmethod
def create_temp_hidden_tag(self, namespace_name, repo_name, image_id, expiration):
"""
Creates a hidden tag under the matching namespace pointing to the image with the given V1 Docker
ID.
"""
pass
@abstractmethod
def list_tags(self, namespace_name, repo_name):
"""
Returns all the tags defined in the repository with the given namespace and name.
"""
pass
@abstractmethod
def create_or_update_tag(self, namespace_name, repo_name, image_id, tag_name):
"""
Creates or updates a tag under the matching repository to point to the image with the given
Docker V1 ID.
"""
pass
@abstractmethod
def find_image_id_by_tag(self, namespace_name, repo_name, tag_name):
"""
Returns the Docker V1 image ID for the HEAD image for the tag with the given name under the
matching repository, or None if none.
"""
pass
@abstractmethod
def delete_tag(self, namespace_name, repo_name, tag_name):
"""
Deletes the given tag from the given repository.
"""
pass
@abstractmethod
def change_user_password(self, user, new_password):
"""
Changes the password associated with the given user.
"""
pass
@abstractmethod
def get_repository(self, namespace_name, repo_name):
"""
Returns the repository with the given name under the given namespace or None
if none.
"""
pass
@abstractmethod
def create_repository(self, namespace_name, repo_name, user=None):
"""
Creates a new repository under the given namespace with the given name, for
the given user.
"""
pass
@abstractmethod
def repository_is_public(self, namespace_name, repo_name):
"""
Returns whether the repository with the given name under the given namespace
is public. If no matching repository was found, returns False.
"""
pass
@abstractmethod
def validate_oauth_token(self, token):
""" Returns whether the given OAuth token validates. """
pass
@abstractmethod
def get_sorted_matching_repositories(self, search_term, filter_username=None, offset=0,
limit=25):
"""
Returns a sorted list of repositories matching the given search term.
"""
pass
@abstractmethod
def is_namespace_enabled(self, namespace_name):
""" Returns whether the given namespace exists and is enabled. """
pass

View file

@ -1,193 +0,0 @@
from app import app, storage as store
from data import model
from data.database import db_transaction
from endpoints.v1.models_interface import DockerRegistryV1DataInterface, Repository
from util.morecollections import AttrDict
class PreOCIModel(DockerRegistryV1DataInterface):
"""
PreOCIModel implements the data model for the v1 Docker Registry protocol using a database schema
before it was changed to support the OCI specification.
"""
def placement_locations_and_path_docker_v1(self, namespace_name, repo_name, image_id):
image, placements = model.image.get_image_and_placements(namespace_name, repo_name, image_id)
if image is None:
return None, None
locations = {placement.location.name for placement in placements}
return locations, model.storage.get_layer_path(image.storage)
def docker_v1_metadata(self, namespace_name, repo_name, image_id):
repo_image = model.image.get_repo_image(namespace_name, repo_name, image_id)
if repo_image is None:
return None
return AttrDict({
'namespace_name': namespace_name,
'repo_name': repo_name,
'image_id': image_id,
'checksum': repo_image.v1_checksum,
'compat_json': repo_image.v1_json_metadata,})
def update_docker_v1_metadata(self, namespace_name, repo_name, image_id, created_date_str,
comment, command, compat_json, parent_image_id=None):
parent_image = None
if parent_image_id is not None:
parent_image = model.image.get_repo_image(namespace_name, repo_name, parent_image_id)
model.image.set_image_metadata(image_id, namespace_name, repo_name, created_date_str, comment,
command, compat_json, parent=parent_image)
def storage_exists(self, namespace_name, repo_name, image_id):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return False
if repo_image.storage.uploading:
return False
layer_path = model.storage.get_layer_path(repo_image.storage)
return store.exists(repo_image.storage.locations, layer_path)
def store_docker_v1_checksums(self, namespace_name, repo_name, image_id, checksum,
content_checksum):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return
assert repo_image.storage.content_checksum == content_checksum
with model.db_transaction():
repo_image.v1_checksum = checksum
repo_image.storage.save()
repo_image.save()
def is_image_uploading(self, namespace_name, repo_name, image_id):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return False
return repo_image.storage.uploading
def update_image_uploading(self, namespace_name, repo_name, image_id, is_uploading):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return
repo_image.storage.uploading = is_uploading
repo_image.storage.save()
return repo_image.storage
def update_image_blob(self, namespace_name, repo_name, image_id, blob):
# Retrieve the existing image storage record and replace it with that given by the blob.
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None or not repo_image.storage.uploading:
return False
with db_transaction():
existing_storage = repo_image.storage
repo_image.storage = blob._db_id
repo_image.save()
existing_storage.delete_instance(recursive=True)
def get_image_size(self, namespace_name, repo_name, image_id):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return None
return repo_image.storage.image_size
def create_bittorrent_pieces(self, namespace_name, repo_name, image_id, pieces_bytes):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return
model.storage.save_torrent_info(repo_image.storage, app.config['BITTORRENT_PIECE_SIZE'],
pieces_bytes)
def image_ancestry(self, namespace_name, repo_name, image_id):
try:
image = model.image.get_image_by_id(namespace_name, repo_name, image_id)
except model.InvalidImageException:
return None
parents = model.image.get_parent_images(namespace_name, repo_name, image)
ancestry_docker_ids = [image.docker_image_id]
ancestry_docker_ids.extend([parent.docker_image_id for parent in parents])
return ancestry_docker_ids
def repository_exists(self, namespace_name, repo_name):
repo = model.repository.get_repository(namespace_name, repo_name)
return repo is not None
def create_or_link_image(self, username, namespace_name, repo_name, image_id, storage_location):
repo = model.repository.get_repository(namespace_name, repo_name)
model.image.find_create_or_link_image(image_id, repo, username, {}, storage_location)
def create_temp_hidden_tag(self, namespace_name, repo_name, image_id, expiration):
repo_image = model.image.get_repo_image(namespace_name, repo_name, image_id)
if repo_image is None:
return
repo = repo_image.repository
model.tag.create_temporary_hidden_tag(repo, repo_image, expiration)
def list_tags(self, namespace_name, repo_name):
return model.tag.list_repository_tags(namespace_name, repo_name)
def create_or_update_tag(self, namespace_name, repo_name, image_id, tag_name):
model.tag.create_or_update_tag(namespace_name, repo_name, tag_name, image_id)
def find_image_id_by_tag(self, namespace_name, repo_name, tag_name):
try:
tag_image = model.tag.get_tag_image(namespace_name, repo_name, tag_name)
except model.DataModelException:
return None
return tag_image.docker_image_id
def delete_tag(self, namespace_name, repo_name, tag_name):
model.tag.delete_tag(namespace_name, repo_name, tag_name)
def change_user_password(self, user, new_password):
model.user.change_password(user, new_password)
def get_repository(self, namespace_name, repo_name):
repo = model.repository.get_repository(namespace_name, repo_name)
if repo is None:
return None
return _repository_for_repo(repo)
def create_repository(self, namespace_name, repo_name, user=None):
model.repository.create_repository(namespace_name, repo_name, user)
def repository_is_public(self, namespace_name, repo_name):
return model.repository.repository_is_public(namespace_name, repo_name)
def validate_oauth_token(self, token):
return bool(model.oauth.validate_access_token(token))
def get_sorted_matching_repositories(self, search_term, filter_username=None, offset=0,
limit=25):
repos = model.repository.get_filtered_matching_repositories(
search_term, filter_username=filter_username, offset=offset, limit=limit)
return [_repository_for_repo(repo) for repo in repos]
def is_namespace_enabled(self, namespace_name):
namespace = model.user.get_namespace_user(namespace_name)
return namespace is not None and namespace.enabled
def _repository_for_repo(repo):
""" Returns a Repository object representing the Pre-OCI data model instance of a repository. """
return Repository(
id=repo.id,
name=repo.name,
namespace_name=repo.namespace_user.username,
description=repo.description,
is_public=model.repository.is_repository_public(repo),
kind=model.repository.get_repo_kind_name(repo),)
pre_oci_model = PreOCIModel()

View file

@ -7,38 +7,37 @@ from time import time
from flask import make_response, request, session, Response, redirect, abort as flask_abort from flask import make_response, request, session, Response, redirect, abort as flask_abort
from app import storage as store, app, metric_queue from app import storage as store, app
from auth.auth_context import get_authenticated_user from auth.auth_context import get_authenticated_user
from auth.decorators import extract_namespace_repo_from_session, process_auth from auth.decorators import extract_namespace_repo_from_session, process_auth
from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission) from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission)
from data import model, database from data import database
from data.registry_model import registry_model from data.registry_model import registry_model
from data.registry_model.blobuploader import upload_blob, BlobUploadSettings, BlobUploadException from data.registry_model.blobuploader import upload_blob, BlobUploadSettings, BlobUploadException
from data.registry_model.manifestbuilder import lookup_manifest_builder
from digest import checksums from digest import checksums
from endpoints.v1 import v1_bp from endpoints.v1 import v1_bp
from endpoints.v1.models_pre_oci import pre_oci_model as model
from endpoints.v1.index import ensure_namespace_enabled from endpoints.v1.index import ensure_namespace_enabled
from endpoints.decorators import anon_protect from endpoints.decorators import anon_protect
from util.http import abort, exact_abort from util.http import abort, exact_abort
from util.registry.filelike import SocketReader
from util.registry import gzipstream
from util.registry.replication import queue_storage_replication from util.registry.replication import queue_storage_replication
from util.registry.torrent import PieceHasher
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def require_completion(f): def require_completion(f):
"""This make sure that the image push correctly finished.""" """ This make sure that the image push correctly finished. """
@wraps(f) @wraps(f)
def wrapper(namespace, repository, *args, **kwargs): def wrapper(namespace, repository, *args, **kwargs):
image_id = kwargs['image_id'] image_id = kwargs['image_id']
if model.is_image_uploading(namespace, repository, image_id): repository_ref = registry_model.lookup_repository(namespace, repository)
if repository_ref is not None:
legacy_image = registry_model.get_legacy_image(repository_ref, image_id)
if legacy_image is not None and legacy_image.uploading:
abort(400, 'Image %(image_id)s is being uploaded, retry later', issue='upload-in-progress', abort(400, 'Image %(image_id)s is being uploaded, retry later', issue='upload-in-progress',
image_id=image_id) image_id=image_id)
return f(namespace, repository, *args, **kwargs)
return f(namespace, repository, *args, **kwargs)
return wrapper return wrapper
@ -76,24 +75,23 @@ def set_cache_headers(f):
@anon_protect @anon_protect
def head_image_layer(namespace, repository, image_id, headers): def head_image_layer(namespace, repository, image_id, headers):
permission = ReadRepositoryPermission(namespace, repository) permission = ReadRepositoryPermission(namespace, repository)
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
logger.debug('Checking repo permissions') logger.debug('Checking repo permissions')
if permission.can() or model.repository_is_public(namespace, repository): if permission.can() or (repository_ref is not None and repository_ref.is_public):
repo = model.get_repository(namespace, repository) if repository_ref is None:
if repo.kind != 'image': abort(404)
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, image_id=image_id)
logger.debug('Looking up placement locations') logger.debug('Looking up placement locations')
locations, _ = model.placement_locations_and_path_docker_v1(namespace, repository, image_id) legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
if locations is None: if legacy_image is None:
logger.debug('Could not find any blob placement locations') logger.debug('Could not find any blob placement locations')
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
# Add the Accept-Ranges header if the storage engine supports resumable # Add the Accept-Ranges header if the storage engine supports resumable
# downloads. # downloads.
extra_headers = {} extra_headers = {}
if store.get_supports_resumable_downloads(locations): if store.get_supports_resumable_downloads(legacy_image.blob.placements):
logger.debug('Storage supports resumable downloads') logger.debug('Storage supports resumable downloads')
extra_headers['Accept-Ranges'] = 'bytes' extra_headers['Accept-Ranges'] = 'bytes'
@ -114,21 +112,23 @@ def head_image_layer(namespace, repository, image_id, headers):
@anon_protect @anon_protect
def get_image_layer(namespace, repository, image_id, headers): def get_image_layer(namespace, repository, image_id, headers):
permission = ReadRepositoryPermission(namespace, repository) permission = ReadRepositoryPermission(namespace, repository)
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
logger.debug('Checking repo permissions') logger.debug('Checking repo permissions')
if permission.can() or model.repository_is_public(namespace, repository): if permission.can() or (repository_ref is not None and repository_ref.is_public):
repo = model.get_repository(namespace, repository) if repository_ref is None:
if repo.kind != 'image': abort(404)
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, image_id=image_id)
logger.debug('Looking up placement locations and path') legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
locations, path = model.placement_locations_and_path_docker_v1(namespace, repository, image_id) if legacy_image is None:
if not locations or not path:
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
path = legacy_image.blob.storage_path
try: try:
logger.debug('Looking up the direct download URL for path: %s', path) logger.debug('Looking up the direct download URL for path: %s', path)
direct_download_url = store.get_direct_download_url(locations, path, request.remote_addr) direct_download_url = store.get_direct_download_url(legacy_image.blob.placements, path,
request.remote_addr)
if direct_download_url: if direct_download_url:
logger.debug('Returning direct download URL') logger.debug('Returning direct download URL')
resp = redirect(direct_download_url) resp = redirect(direct_download_url)
@ -137,7 +137,7 @@ def get_image_layer(namespace, repository, image_id, headers):
# Close the database handle here for this process before we send the long download. # Close the database handle here for this process before we send the long download.
database.close_db_filter(None) database.close_db_filter(None)
logger.debug('Streaming layer data') logger.debug('Streaming layer data')
return Response(store.stream_read(locations, path), headers=headers) return Response(store.stream_read(legacy_image.blob.placements, path), headers=headers)
except (IOError, AttributeError): except (IOError, AttributeError):
logger.exception('Image layer data not found') logger.exception('Image layer data not found')
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
@ -156,31 +156,31 @@ def put_image_layer(namespace, repository, image_id):
if not permission.can(): if not permission.can():
abort(403) abort(403)
repo = model.get_repository(namespace, repository) repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
if repo.kind != 'image': if repository_ref is None:
msg = 'This repository is for managing %s resources and not container images.' % repo.kind abort(403)
abort(405, message=msg, image_id=image_id)
logger.debug('Retrieving image') logger.debug('Retrieving image')
if model.storage_exists(namespace, repository, image_id): legacy_image = registry_model.get_legacy_image(repository_ref, image_id)
if legacy_image is not None and not legacy_image.uploading:
exact_abort(409, 'Image already exists') exact_abort(409, 'Image already exists')
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id) logger.debug('Checking for image in manifest builder')
if v1_metadata is None: builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
if builder is None:
abort(400)
layer = builder.lookup_layer(image_id)
if layer is None:
abort(404) abort(404)
logger.debug('Storing layer data') logger.debug('Storing layer data')
input_stream = request.stream input_stream = request.stream
if request.headers.get('transfer-encoding') == 'chunked': if request.headers.get('transfer-encoding') == 'chunked':
# Careful, might work only with WSGI servers supporting chunked # Careful, might work only with WSGI servers supporting chunked
# encoding (Gunicorn) # encoding (Gunicorn)
input_stream = request.environ['wsgi.input'] input_stream = request.environ['wsgi.input']
repository_ref = registry_model.lookup_repository(namespace, repository)
if repository_ref is None:
abort(404)
expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'] expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
settings = BlobUploadSettings(maximum_blob_size=app.config['MAXIMUM_LAYER_SIZE'], settings = BlobUploadSettings(maximum_blob_size=app.config['MAXIMUM_LAYER_SIZE'],
bittorrent_piece_size=app.config['BITTORRENT_PIECE_SIZE'], bittorrent_piece_size=app.config['BITTORRENT_PIECE_SIZE'],
@ -190,13 +190,13 @@ def put_image_layer(namespace, repository, image_id):
# Add a handler that copies the data into a temp file. This is used to calculate the tarsum, # Add a handler that copies the data into a temp file. This is used to calculate the tarsum,
# which is only needed for older versions of Docker. # which is only needed for older versions of Docker.
requires_tarsum = session.get('checksum_format') == 'tarsum' requires_tarsum = bool(builder.get_layer_checksums(layer))
if requires_tarsum: if requires_tarsum:
tmp, tmp_hndlr = store.temp_store_handler() tmp, tmp_hndlr = store.temp_store_handler()
extra_handlers.append(tmp_hndlr) extra_handlers.append(tmp_hndlr)
# Add a handler which computes the simple Docker V1 checksum. # Add a handler which computes the simple Docker V1 checksum.
h, sum_hndlr = checksums.simple_checksum_handler(v1_metadata.compat_json) h, sum_hndlr = checksums.simple_checksum_handler(layer.v1_metadata_string)
extra_handlers.append(sum_hndlr) extra_handlers.append(sum_hndlr)
uploaded_blob = None uploaded_blob = None
@ -209,37 +209,34 @@ def put_image_layer(namespace, repository, image_id):
logger.exception('Exception when writing image data') logger.exception('Exception when writing image data')
abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id) abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id)
# Save the blob for the image. # Compute the final checksum
model.update_image_blob(namespace, repository, image_id, uploaded_blob)
# Send a job to the work queue to replicate the image layer.
# TODO: move this into a better place.
queue_storage_replication(namespace, uploaded_blob)
# Append the computed checksum.
csums = [] csums = []
csums.append('sha256:{0}'.format(h.hexdigest())) csums.append('sha256:{0}'.format(h.hexdigest()))
try: try:
if requires_tarsum: if requires_tarsum:
tmp.seek(0) tmp.seek(0)
csums.append(checksums.compute_tarsum(tmp, v1_metadata.compat_json)) csums.append(checksums.compute_tarsum(tmp, layer.v1_metadata_string))
tmp.close() tmp.close()
except (IOError, checksums.TarError) as exc: except (IOError, checksums.TarError) as exc:
logger.debug('put_image_layer: Error when computing tarsum %s', exc) logger.debug('put_image_layer: Error when computing tarsum %s', exc)
if v1_metadata.checksum is None: # If there was already a precomputed checksum, validate against it now.
# We don't have a checksum stored yet, that's fine skipping the check. if builder.get_layer_checksums(layer):
# Not removing the mark though, image is not downloadable yet. checksum = builder.get_layer_checksums(layer)[0]
session['checksum'] = csums if not builder.validate_layer_checksum(layer, checksum):
session['content_checksum'] = uploaded_blob.digest logger.debug('put_image_checksum: Wrong checksum. Given: %s and expected: %s', checksum,
return make_response('true', 200) builder.get_layer_checksums(layer))
abort(400, 'Checksum mismatch for image: %(image_id)s', issue='checksum-mismatch',
image_id=image_id)
# We check if the checksums provided matches one the one we computed # Assign the blob to the layer in the manifest.
if v1_metadata.checksum not in csums: if not builder.assign_layer_blob(layer, uploaded_blob, csums):
logger.warning('put_image_layer: Wrong checksum') abort(500, 'Something went wrong')
abort(400, 'Checksum mismatch; ignoring the layer for image %(image_id)s',
issue='checksum-mismatch', image_id=image_id) # Send a job to the work queue to replicate the image layer.
# TODO: move this into a better place.
queue_storage_replication(namespace, uploaded_blob)
return make_response('true', 200) return make_response('true', 200)
@ -255,10 +252,9 @@ def put_image_checksum(namespace, repository, image_id):
if not permission.can(): if not permission.can():
abort(403) abort(403)
repo = model.get_repository(namespace, repository) repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
if repo.kind != 'image': if repository_ref is None:
msg = 'This repository is for managing %s resources and not container images.' % repo.kind abort(403)
abort(405, message=msg, image_id=image_id)
# Docker Version < 0.10 (tarsum+sha): # Docker Version < 0.10 (tarsum+sha):
old_checksum = request.headers.get('X-Docker-Checksum') old_checksum = request.headers.get('X-Docker-Checksum')
@ -266,40 +262,27 @@ def put_image_checksum(namespace, repository, image_id):
# Docker Version >= 0.10 (sha): # Docker Version >= 0.10 (sha):
new_checksum = request.headers.get('X-Docker-Checksum-Payload') new_checksum = request.headers.get('X-Docker-Checksum-Payload')
# Store whether we need to calculate the tarsum.
if new_checksum:
session['checksum_format'] = 'sha256'
else:
session['checksum_format'] = 'tarsum'
checksum = new_checksum or old_checksum checksum = new_checksum or old_checksum
if not checksum: if not checksum:
abort(400, "Missing checksum for image %(image_id)s", issue='missing-checksum', abort(400, "Missing checksum for image %(image_id)s", issue='missing-checksum',
image_id=image_id) image_id=image_id)
if not session.get('checksum'): logger.debug('Checking for image in manifest builder')
abort(400, 'Checksum not found in Cookie for image %(image_id)s', builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
issue='missing-checksum-cookie', image_id=image_id) if builder is None:
abort(400)
logger.debug('Looking up repo image') layer = builder.lookup_layer(image_id)
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id) if layer is None:
if not v1_metadata: abort(404)
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
logger.debug('Looking up repo layer data') if old_checksum:
if not v1_metadata.compat_json: builder.save_precomputed_checksum(layer, checksum)
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id) return make_response('true', 200)
logger.debug('Storing image and checksum') if not builder.validate_layer_checksum(layer, checksum):
content_checksum = session.get('content_checksum', None) logger.debug('put_image_checksum: Wrong checksum. Given: %s and expected: %s', checksum,
checksum_parts = checksum.split(':') builder.get_layer_checksums(layer))
if len(checksum_parts) != 2:
abort(400, 'Invalid checksum format')
if checksum not in session.get('checksum', []):
logger.debug('session checksums: %s', session.get('checksum', []))
logger.debug('client supplied checksum: %s', checksum)
logger.debug('put_image_checksum: Wrong checksum')
abort(400, 'Checksum mismatch for image: %(image_id)s', issue='checksum-mismatch', abort(400, 'Checksum mismatch for image: %(image_id)s', issue='checksum-mismatch',
image_id=image_id) image_id=image_id)
@ -316,27 +299,22 @@ def put_image_checksum(namespace, repository, image_id):
def get_image_json(namespace, repository, image_id, headers): def get_image_json(namespace, repository, image_id, headers):
logger.debug('Checking repo permissions') logger.debug('Checking repo permissions')
permission = ReadRepositoryPermission(namespace, repository) permission = ReadRepositoryPermission(namespace, repository)
if not permission.can() and not model.repository_is_public(namespace, repository): repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
if not permission.can() and not (repository_ref is not None and repository_ref.is_public):
abort(403) abort(403)
repo = model.get_repository(namespace, repository)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, image_id=image_id)
logger.debug('Looking up repo image') logger.debug('Looking up repo image')
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id) legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
if v1_metadata is None: if legacy_image is None:
flask_abort(404) flask_abort(404)
logger.debug('Looking up repo layer size') size = legacy_image.blob.compressed_size
size = model.get_image_size(namespace, repository, image_id)
if size is not None: if size is not None:
# Note: X-Docker-Size is optional and we *can* end up with a NULL image_size, # Note: X-Docker-Size is optional and we *can* end up with a NULL image_size,
# so handle this case rather than failing. # so handle this case rather than failing.
headers['X-Docker-Size'] = str(size) headers['X-Docker-Size'] = str(size)
response = make_response(v1_metadata.compat_json, 200) response = make_response(legacy_image.v1_metadata_string, 200)
response.headers.extend(headers) response.headers.extend(headers)
return response return response
@ -351,20 +329,19 @@ def get_image_json(namespace, repository, image_id, headers):
def get_image_ancestry(namespace, repository, image_id, headers): def get_image_ancestry(namespace, repository, image_id, headers):
logger.debug('Checking repo permissions') logger.debug('Checking repo permissions')
permission = ReadRepositoryPermission(namespace, repository) permission = ReadRepositoryPermission(namespace, repository)
if not permission.can() and not model.repository_is_public(namespace, repository): repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
if not permission.can() and not (repository_ref is not None and repository_ref.is_public):
abort(403) abort(403)
repo = model.get_repository(namespace, repository) logger.debug('Looking up repo image')
if repo.kind != 'image': legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_parents=True)
msg = 'This repository is for managing %s resources and not container images.' % repo.kind if legacy_image is None:
abort(405, message=msg, image_id=image_id)
ancestry_docker_ids = model.image_ancestry(namespace, repository, image_id)
if ancestry_docker_ids is None:
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
# We can not use jsonify here because we are returning a list not an object # NOTE: We can not use jsonify here because we are returning a list not an object.
response = make_response(json.dumps(ancestry_docker_ids), 200) ancestor_ids = ([legacy_image.docker_image_id] +
[a.docker_image_id for a in legacy_image.parents])
response = make_response(json.dumps(ancestor_ids), 200)
response.headers.extend(headers) response.headers.extend(headers)
return response return response
@ -380,10 +357,13 @@ def put_image_json(namespace, repository, image_id):
if not permission.can(): if not permission.can():
abort(403) abort(403)
repo = model.get_repository(namespace, repository) repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
if repo.kind != 'image': if repository_ref is None:
msg = 'This repository is for managing %s resources and not container images.' % repo.kind abort(403)
abort(405, message=msg, image_id=image_id)
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
if builder is None:
abort(400)
logger.debug('Parsing image JSON') logger.debug('Parsing image JSON')
try: try:
@ -405,46 +385,12 @@ def put_image_json(namespace, repository, image_id):
image_id=image_id) image_id=image_id)
logger.debug('Looking up repo image') logger.debug('Looking up repo image')
if not model.repository_exists(namespace, repository):
abort(404, 'Repository does not exist: %(namespace)s/%(repository)s', issue='no-repo',
namespace=namespace, repository=repository)
parent_id = data.get('parent', None)
if parent_id:
logger.debug('Looking up parent image')
if model.docker_v1_metadata(namespace, repository, parent_id) is None:
abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s',
issue='invalid-request', image_id=image_id, parent_id=parent_id)
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
if v1_metadata is None:
username = get_authenticated_user() and get_authenticated_user().username
logger.debug('Image not found, creating or linking image with initiating user context: %s',
username)
location_pref = store.preferred_locations[0] location_pref = store.preferred_locations[0]
model.create_or_link_image(username, namespace, repository, image_id, location_pref) username = get_authenticated_user() and get_authenticated_user().username
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id) layer = builder.start_layer(image_id, uploaded_metadata, location_pref, username,
# Create a temporary tag to prevent this image from getting garbage collected while the push
# is in progress.
model.create_temp_hidden_tag(namespace, repository, image_id,
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']) app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
if layer is None:
logger.debug('Checking if image already exists') abort(400, 'Image %(image_id)s has invalid metadata',
if v1_metadata and not model.is_image_uploading(namespace, repository, image_id): issue='invalid-request', image_id=image_id)
exact_abort(409, 'Image already exists')
model.update_image_uploading(namespace, repository, image_id, True)
# If we reach that point, it means that this is a new image or a retry
# on a failed push, save the metadata
command_list = data.get('container_config', {}).get('Cmd', None)
command = json.dumps(command_list) if command_list else None
logger.debug('Setting image metadata')
model.update_docker_v1_metadata(namespace, repository, image_id,
data.get('created'),
data.get('comment'), command, uploaded_metadata, parent_id)
return make_response('true', 200) return make_response('true', 200)

View file

@ -5,10 +5,10 @@ from flask import abort, request, jsonify, make_response, session
from auth.decorators import process_auth from auth.decorators import process_auth
from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission) from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission)
from data import model from data.registry_model import registry_model
from data.registry_model.manifestbuilder import lookup_manifest_builder
from endpoints.decorators import anon_protect, parse_repository_name from endpoints.decorators import anon_protect, parse_repository_name
from endpoints.v1 import v1_bp from endpoints.v1 import v1_bp
from endpoints.v1.models_pre_oci import pre_oci_model as model
from util.audit import track_and_log from util.audit import track_and_log
from util.names import TAG_ERROR, TAG_REGEX from util.names import TAG_ERROR, TAG_REGEX
@ -21,15 +21,13 @@ logger = logging.getLogger(__name__)
@parse_repository_name() @parse_repository_name()
def get_tags(namespace_name, repo_name): def get_tags(namespace_name, repo_name):
permission = ReadRepositoryPermission(namespace_name, repo_name) permission = ReadRepositoryPermission(namespace_name, repo_name)
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
if permission.can() or (repository_ref is not None and repository_ref.is_public):
if repository_ref is None:
abort(404)
if permission.can() or model.repository_is_public(namespace_name, repo_name): tags = registry_model.list_repository_tags(repository_ref, include_legacy_images=True)
repo = model.get_repository(namespace_name, repo_name) tag_map = {tag.name: tag.legacy_image.docker_image_id for tag in tags}
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, namespace=namespace_name)
tags = model.list_tags(namespace_name, repo_name)
tag_map = {tag.name: tag.image.docker_image_id for tag in tags}
return jsonify(tag_map) return jsonify(tag_map)
abort(403) abort(403)
@ -41,18 +39,16 @@ def get_tags(namespace_name, repo_name):
@parse_repository_name() @parse_repository_name()
def get_tag(namespace_name, repo_name, tag): def get_tag(namespace_name, repo_name, tag):
permission = ReadRepositoryPermission(namespace_name, repo_name) permission = ReadRepositoryPermission(namespace_name, repo_name)
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
if permission.can() or model.repository_is_public(namespace_name, repo_name): if permission.can() or (repository_ref is not None and repository_ref.is_public):
repo = model.get_repository(namespace_name, repo_name) if repository_ref is None:
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, namespace=namespace_name)
image_id = model.find_image_id_by_tag(namespace_name, repo_name, tag)
if image_id is None:
abort(404) abort(404)
resp = make_response('"%s"' % image_id) tag = registry_model.get_repo_tag(repository_ref, tag, include_legacy_image=True)
if tag is None:
abort(404)
resp = make_response('"%s"' % tag.legacy_image.docker_image_id)
resp.headers['Content-Type'] = 'application/json' resp.headers['Content-Type'] = 'application/json'
return resp return resp
@ -65,24 +61,33 @@ def get_tag(namespace_name, repo_name, tag):
@parse_repository_name() @parse_repository_name()
def put_tag(namespace_name, repo_name, tag): def put_tag(namespace_name, repo_name, tag):
permission = ModifyRepositoryPermission(namespace_name, repo_name) permission = ModifyRepositoryPermission(namespace_name, repo_name)
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
if permission.can(): if permission.can() and repository_ref is not None:
if not TAG_REGEX.match(tag): if not TAG_REGEX.match(tag):
abort(400, TAG_ERROR) abort(400, TAG_ERROR)
repo = model.get_repository(namespace_name, repo_name)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, namespace=namespace_name)
image_id = json.loads(request.data) image_id = json.loads(request.data)
model.create_or_update_tag(namespace_name, repo_name, image_id, tag)
# Store the updated tag. # Check for the image ID first in a builder (for an in-progress push).
if 'pushed_tags' not in session: builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
session['pushed_tags'] = {} if builder is not None:
layer = builder.lookup_layer(image_id)
if layer is not None:
commited_tag = builder.commit_tag_and_manifest(tag, layer)
if commited_tag is None:
abort(400)
session['pushed_tags'][tag] = image_id return make_response('Created', 200)
# Check if there is an existing image we should use (for PUT calls outside of a normal push
# operation).
legacy_image = registry_model.get_legacy_image(repository_ref, image_id)
if legacy_image is None:
abort(400)
if registry_model.retarget_tag(repository_ref, tag, legacy_image) is None:
abort(400)
return make_response('Created', 200) return make_response('Created', 200)
@ -95,18 +100,13 @@ def put_tag(namespace_name, repo_name, tag):
@parse_repository_name() @parse_repository_name()
def delete_tag(namespace_name, repo_name, tag): def delete_tag(namespace_name, repo_name, tag):
permission = ModifyRepositoryPermission(namespace_name, repo_name) permission = ModifyRepositoryPermission(namespace_name, repo_name)
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
if permission.can(): if permission.can() and repository_ref is not None:
repo = model.get_repository(namespace_name, repo_name) if not registry_model.delete_tag(repository_ref, tag):
if repo is None: abort(404)
abort(403)
if repo.kind != 'image': track_and_log('delete_tag', repository_ref, tag=tag)
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, namespace=namespace_name)
model.delete_tag(namespace_name, repo_name, tag)
track_and_log('delete_tag', model.get_repository(namespace_name, repo_name), tag=tag)
return make_response('Deleted', 200) return make_response('Deleted', 200)
abort(403) abort(403)

View file

@ -3,7 +3,7 @@ import json
from cStringIO import StringIO from cStringIO import StringIO
from enum import Enum, unique from enum import Enum, unique
from digest.checksums import compute_simple from digest.checksums import compute_simple, compute_tarsum
from test.registry.protocols import (RegistryProtocol, Failures, ProtocolOptions, PushResult, from test.registry.protocols import (RegistryProtocol, Failures, ProtocolOptions, PushResult,
PullResult) PullResult)
@ -31,7 +31,7 @@ class V1Protocol(RegistryProtocol):
V1ProtocolSteps.GET_IMAGES: { V1ProtocolSteps.GET_IMAGES: {
Failures.UNAUTHENTICATED: 403, Failures.UNAUTHENTICATED: 403,
Failures.UNAUTHORIZED: 403, Failures.UNAUTHORIZED: 403,
Failures.APP_REPOSITORY: 405, Failures.APP_REPOSITORY: 404,
Failures.ANONYMOUS_NOT_ALLOWED: 401, Failures.ANONYMOUS_NOT_ALLOWED: 401,
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400, Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
Failures.NAMESPACE_DISABLED: 400, Failures.NAMESPACE_DISABLED: 400,
@ -93,7 +93,7 @@ class V1Protocol(RegistryProtocol):
# GET /v1/repositories/{namespace}/{repository}/tags # GET /v1/repositories/{namespace}/{repository}/tags
image_ids = self.conduct(session, 'GET', prefix + 'tags', headers=headers).json() image_ids = self.conduct(session, 'GET', prefix + 'tags', headers=headers).json()
assert len(image_ids.values()) == len(tag_names) assert len(image_ids.values()) >= len(tag_names)
for tag_name in tag_names: for tag_name in tag_names:
if tag_name not in image_ids: if tag_name not in image_ids:
@ -165,13 +165,21 @@ class V1Protocol(RegistryProtocol):
expected_status=(200, expected_failure, expected_status=(200, expected_failure,
V1ProtocolSteps.PUT_IMAGE_JSON)) V1ProtocolSteps.PUT_IMAGE_JSON))
if response.status_code != 200: if response.status_code != 200:
break return
# PUT /v1/images/{imageID}/checksum (old style)
old_checksum = compute_tarsum(StringIO(image.bytes), json.dumps(image_json_data))
checksum_headers = {'X-Docker-Checksum': old_checksum}
checksum_headers.update(headers)
self.conduct(session, 'PUT', '/v1/images/%s/checksum' % image.id,
headers=checksum_headers)
# PUT /v1/images/{imageID}/layer # PUT /v1/images/{imageID}/layer
self.conduct(session, 'PUT', '/v1/images/%s/layer' % image.id, self.conduct(session, 'PUT', '/v1/images/%s/layer' % image.id,
data=StringIO(image.bytes), headers=headers) data=StringIO(image.bytes), headers=headers)
# PUT /v1/images/{imageID}/checksum # PUT /v1/images/{imageID}/checksum (new style)
checksum = compute_simple(StringIO(image.bytes), json.dumps(image_json_data)) checksum = compute_simple(StringIO(image.bytes), json.dumps(image_json_data))
checksum_headers = {'X-Docker-Checksum-Payload': checksum} checksum_headers = {'X-Docker-Checksum-Payload': checksum}
checksum_headers.update(headers) checksum_headers.update(headers)
@ -208,3 +216,12 @@ class V1Protocol(RegistryProtocol):
'/v1/repositories/%s/tags/%s' % (self.repo_name(namespace, repo_name), tag_name), '/v1/repositories/%s/tags/%s' % (self.repo_name(namespace, repo_name), tag_name),
auth=auth, auth=auth,
expected_status=(200, expected_failure, V1ProtocolSteps.DELETE_TAG)) expected_status=(200, expected_failure, V1ProtocolSteps.DELETE_TAG))
def tag(self, session, namespace, repo_name, tag_name, image, credentials=None,
expected_failure=None, options=None):
auth = self._auth_for_credentials(credentials)
self.conduct(session, 'PUT',
'/v1/repositories/%s/tags/%s' % (self.repo_name(namespace, repo_name), tag_name),
data='"%s"' % image.id,
auth=auth,
expected_status=(200, expected_failure, V1ProtocolSteps.PUT_TAG))

View file

@ -97,6 +97,11 @@ class RegistryProtocol(object):
the given credentials. the given credentials.
""" """
@abstractmethod
def delete(self, session, namespace, repo_name, tag_names, credentials=None,
expected_failure=None, options=None):
""" Deletes some tags. """
def repo_name(self, namespace, repo_name): def repo_name(self, namespace, repo_name):
if namespace: if namespace:
return '%s/%s' % (namespace, repo_name) return '%s/%s' % (namespace, repo_name)

View file

@ -226,7 +226,7 @@ def test_push_pull_logging(credentials, namespace, expected_performer, pusher, p
credentials = credentials(api_caller, registry_server_executor.on(liveserver)) credentials = credentials(api_caller, registry_server_executor.on(liveserver))
# Push to the repository with the specified credentials. # Push to the repository with the specified credentials.
pusher.push(liveserver_session, namespace, 'newrepo', 'latest', basic_images, pusher.push(liveserver_session, namespace, 'newrepo', 'anothertag', basic_images,
credentials=credentials) credentials=credentials)
# Check the logs for the push. # Check the logs for the push.
@ -243,7 +243,7 @@ def test_push_pull_logging(credentials, namespace, expected_performer, pusher, p
assert logs[0]['performer']['name'] == expected_performer assert logs[0]['performer']['name'] == expected_performer
# Pull the repository to verify. # Pull the repository to verify.
puller.pull(liveserver_session, namespace, 'newrepo', 'latest', basic_images, puller.pull(liveserver_session, namespace, 'newrepo', 'anothertag', basic_images,
credentials=credentials) credentials=credentials)
# Check the logs for the pull. # Check the logs for the pull.
@ -1299,3 +1299,20 @@ def test_push_pull_same_blobs(pusher, puller, liveserver_session, app_reloader):
# Pull the repository to verify. # Pull the repository to verify.
puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', images, puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', images,
credentials=credentials, options=options) credentials=credentials, options=options)
def test_push_tag_existing_image(v1_protocol, puller, basic_images, liveserver_session, app_reloader):
""" Test: Push a new tag on an existing manifest/image. """
credentials = ('devtable', 'password')
# Push a new repository.
result = v1_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
credentials=credentials)
# Push the same image/manifest to another tag in the repository.
v1_protocol.tag(liveserver_session, 'devtable', 'newrepo', 'anothertag', basic_images[-1],
credentials=credentials)
# Pull the repository to verify.
puller.pull(liveserver_session, 'devtable', 'newrepo', 'anothertag', basic_images,
credentials=credentials)

View file

@ -793,26 +793,6 @@ class RegistryTestsMixin(object):
# Pull the repository to verify. # Pull the repository to verify.
self.do_pull('public', 'foo.bar', 'public', 'password') self.do_pull('public', 'foo.bar', 'public', 'password')
def test_application_repo(self):
# Create an application repository via the API.
self.conduct_api_login('devtable', 'password')
data = {
'repository': 'someapprepo',
'visibility': 'private',
'repo_kind': 'application',
'description': 'test app repo',
}
self.conduct('POST', '/api/v1/repository', json_data=data, expected_code=201)
# Try to push to the repo, which should fail with a 405.
self.do_push('devtable', 'someapprepo', 'devtable', 'password',
expect_failure=FailureCodes.APP_REPOSITORY)
# Try to pull from the repo, which should fail with a 405.
self.do_pull('devtable', 'someapprepo', 'devtable', 'password',
expect_failure=FailureCodes.APP_REPOSITORY)
def test_middle_layer_different_sha(self): def test_middle_layer_different_sha(self):
if self.push_version == 'v1': if self.push_version == 'v1':
# No SHAs to munge in V1. # No SHAs to munge in V1.

View file

@ -137,11 +137,11 @@ def build_v1_index_specs():
IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID), IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID),
PUBLIC_REPO, 403, 403, 403, 403, 403).set_method('PUT'), PUBLIC_REPO, 403, 403, 403, 403, 403).set_method('PUT'),
IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID), IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID),
PRIVATE_REPO, 403, 403, 403, 403, 404).set_method('PUT'), PRIVATE_REPO, 403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID), IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID),
ORG_REPO, 403, 403, 403, 403, 404).set_method('PUT'), ORG_REPO, 403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID), IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID),
ANOTHER_ORG_REPO, 403, 403, 403, 403, 404).set_method('PUT'), ANOTHER_ORG_REPO, 403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.put_image_checksum', IndexV1TestSpec(url_for('v1.put_image_checksum',
image_id=FAKE_IMAGE_ID), image_id=FAKE_IMAGE_ID),
@ -205,11 +205,11 @@ def build_v1_index_specs():
IndexV1TestSpec(url_for('v1.update_images', repository=PUBLIC_REPO), IndexV1TestSpec(url_for('v1.update_images', repository=PUBLIC_REPO),
NO_REPO, 403, 403, 403, 403, 403).set_method('PUT'), NO_REPO, 403, 403, 403, 403, 403).set_method('PUT'),
IndexV1TestSpec(url_for('v1.update_images', repository=PRIVATE_REPO), IndexV1TestSpec(url_for('v1.update_images', repository=PRIVATE_REPO),
NO_REPO, 403, 403, 403, 403, 204).set_method('PUT'), NO_REPO, 403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.update_images', repository=ORG_REPO), NO_REPO, IndexV1TestSpec(url_for('v1.update_images', repository=ORG_REPO), NO_REPO,
403, 403, 403, 403, 204).set_method('PUT'), 403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.update_images', repository=ANOTHER_ORG_REPO), NO_REPO, IndexV1TestSpec(url_for('v1.update_images', repository=ANOTHER_ORG_REPO), NO_REPO,
403, 403, 403, 403, 204).set_method('PUT'), 403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.get_repository_images', IndexV1TestSpec(url_for('v1.get_repository_images',
repository=PUBLIC_REPO), repository=PUBLIC_REPO),