Change V1 to use the manifest builder and new registry data model

This commit is contained in:
Joseph Schorr 2018-09-24 18:57:27 -04:00
parent 65d5be23c7
commit 4520f9e842
12 changed files with 291 additions and 689 deletions

View file

@ -1,3 +1,4 @@
import logging
import json
import uuid
@ -9,11 +10,11 @@ from data import model
from data.database import db_transaction
from data.registry_model import registry_model
logger = logging.getLogger(__name__)
ManifestLayer = namedtuple('ManifestLayer', ['layer_id', 'v1_metadata', 'db_id'])
ManifestLayer = namedtuple('ManifestLayer', ['layer_id', 'v1_metadata_string', 'db_id'])
_BuilderState = namedtuple('_BuilderState', ['builder_id', 'images', 'tags', 'checksums'])
_SESSION_KEY = '__manifestbuilder'
@ -61,7 +62,8 @@ class _ManifestBuilder(object):
return [registry_model.get_repo_tag(self._repository_ref, tag_name, include_legacy_image=True)
for tag_name in self._builder_state.tags.keys()]
def start_layer(self, layer_id, v1_metadata, location_name, calling_user, temp_tag_expiration):
def start_layer(self, layer_id, v1_metadata_string, location_name, calling_user,
temp_tag_expiration):
""" Starts a new layer with the given ID to be placed into a manifest. Returns the layer
started or None if an error occurred.
"""
@ -73,6 +75,15 @@ class _ManifestBuilder(object):
namespace_name = repository.namespace_user.username
repo_name = repository.name
try:
v1_metadata = json.loads(v1_metadata_string)
except ValueError:
logger.exception('Exception when trying to parse V1 metadata JSON for layer %s', layer_id)
return None
except TypeError:
logger.exception('Exception when trying to parse V1 metadata JSON for layer %s', layer_id)
return None
# Sanity check that the ID matches the v1 metadata.
if layer_id != v1_metadata['id']:
return None
@ -91,7 +102,7 @@ class _ManifestBuilder(object):
if existing_image is not None:
self._builder_state.images[layer_id] = existing_image.id
self._save_to_session()
return ManifestLayer(layer_id, v1_metadata, existing_image.id)
return ManifestLayer(layer_id, v1_metadata_string, existing_image.id)
with db_transaction():
# Otherwise, create a new legacy image and point a temporary tag at it.
@ -117,7 +128,7 @@ class _ManifestBuilder(object):
self._builder_state.images[layer_id] = created.id
self._save_to_session()
return ManifestLayer(layer_id, v1_metadata, created.id)
return ManifestLayer(layer_id, v1_metadata_string, created.id)
def lookup_layer(self, layer_id):
""" Returns a layer with the given ID under this builder. If none exists, returns None. """
@ -128,7 +139,7 @@ class _ManifestBuilder(object):
if image is None:
return None
return ManifestLayer(layer_id, json.loads(image.v1_json_metadata), image.id)
return ManifestLayer(layer_id, image.v1_json_metadata, image.id)
def assign_layer_blob(self, layer, blob, computed_checksums):
""" Assigns a blob to a layer. """
@ -151,7 +162,18 @@ class _ManifestBuilder(object):
def validate_layer_checksum(self, layer, checksum):
""" Returns whether the checksum for a layer matches that specified.
"""
return checksum in self._builder_state.checksums.get(layer.layer_id)
return checksum in self.get_layer_checksums(layer)
def get_layer_checksums(self, layer):
""" Returns the registered defined for the layer, if any. """
return self._builder_state.checksums.get(layer.layer_id) or []
def save_precomputed_checksum(self, layer, checksum):
""" Saves a precomputed checksum for a layer. """
checksums = self._builder_state.checksums.get(layer.layer_id) or []
checksums.append(checksum)
self._builder_state.checksums[layer.layer_id] = checksums
self._save_to_session()
def commit_tag_and_manifest(self, tag_name, layer):
""" Commits a new tag + manifest for that tag to the repository with the given name,
@ -171,7 +193,7 @@ class _ManifestBuilder(object):
def done(self):
""" Marks the manifest builder as complete and disposes of any state. This call is optional
and it is expected manifest builders will eventually time out if unusued for an
and it is expected manifest builders will eventually time out if unused for an
extended period of time.
"""
session.pop(_SESSION_KEY, None)

View file

@ -1,4 +1,5 @@
import hashlib
import json
from io import BytesIO
@ -45,8 +46,8 @@ def test_build_manifest(layers, fake_session, pre_oci_model):
blobs_by_layer = {}
for layer_id, parent_id, layer_bytes in layers:
# Start a new layer.
assert builder.start_layer(layer_id, {'id': layer_id, 'parent': parent_id}, 'local_us', None,
60)
assert builder.start_layer(layer_id, json.dumps({'id': layer_id, 'parent': parent_id}),
'local_us', None, 60)
checksum = hashlib.sha1(layer_bytes).hexdigest()
@ -90,5 +91,5 @@ def test_build_manifest_missing_parent(fake_session, pre_oci_model):
repository_ref = pre_oci_model.lookup_repository('devtable', 'complex')
builder = create_manifest_builder(repository_ref)
assert builder.start_layer('somelayer', {'id': 'somelayer', 'parent': 'someparent'},
assert builder.start_layer('somelayer', json.dumps({'id': 'somelayer', 'parent': 'someparent'}),
'local_us', None, 60) is None

View file

@ -14,9 +14,11 @@ from auth.permissions import (
ModifyRepositoryPermission, UserAdminPermission, ReadRepositoryPermission,
CreateRepositoryPermission, repository_read_grant, repository_write_grant)
from auth.signedgrant import generate_signed_token
from data import model
from data.registry_model import registry_model
from data.registry_model.manifestbuilder import create_manifest_builder, lookup_manifest_builder
from endpoints.decorators import anon_protect, anon_allowed, parse_repository_name
from endpoints.v1 import v1_bp
from endpoints.v1.models_pre_oci import pre_oci_model as model
from notifications import spawn_notification
from util.audit import track_and_log
from util.http import abort
@ -33,7 +35,9 @@ class GrantType(object):
def ensure_namespace_enabled(f):
@wraps(f)
def wrapper(namespace_name, repo_name, *args, **kwargs):
if not model.is_namespace_enabled(namespace_name):
namespace = model.user.get_namespace_user(namespace_name)
is_namespace_enabled = namespace is not None and namespace.enabled
if not is_namespace_enabled:
abort(400, message='Namespace is disabled. Please contact your system administrator.')
return f(namespace_name, repo_name, *args, **kwargs)
@ -148,11 +152,13 @@ def update_user(username):
if 'password' in update_request:
logger.debug('Updating user password')
model.change_user_password(get_authenticated_user(), update_request['password'])
model.user.change_password(get_authenticated_user(), update_request['password'])
return jsonify({
'username': get_authenticated_user().username,
'email': get_authenticated_user().email})
'email': get_authenticated_user().email,
})
abort(403)
@ -168,25 +174,22 @@ def create_repository(namespace_name, repo_name):
abort(400, message='Invalid repository name. Repository names cannot contain slashes.')
logger.debug('Looking up repository %s/%s', namespace_name, repo_name)
repo = model.get_repository(namespace_name, repo_name)
logger.debug('Found repository %s/%s', namespace_name, repo_name)
if not repo and get_authenticated_user() is None:
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
if repository_ref is None and get_authenticated_user() is None:
logger.debug('Attempt to create repository %s/%s without user auth', namespace_name, repo_name)
abort(401,
message='Cannot create a repository as a guest. Please login via "docker login" first.',
issue='no-login')
elif repo:
elif repository_ref:
modify_perm = ModifyRepositoryPermission(namespace_name, repo_name)
if not modify_perm.can():
abort(403,
message='You do not have permission to modify repository %(namespace)s/%(repository)s',
issue='no-repo-write-permission', namespace=namespace_name, repository=repo_name)
elif repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
elif repository_ref.kind != 'image':
msg = ('This repository is for managing %s resources and not container images.' %
repository_ref.kind)
abort(405, message=msg, namespace=namespace_name)
else:
create_perm = CreateRepositoryPermission(namespace_name)
if not create_perm.can():
@ -199,17 +202,27 @@ def create_repository(namespace_name, repo_name):
logger.debug('Creating repository %s/%s with owner: %s', namespace_name, repo_name,
get_authenticated_user().username)
model.create_repository(namespace_name, repo_name, get_authenticated_user())
repository_ref = model.repository.create_repository(namespace_name, repo_name,
get_authenticated_user())
if get_authenticated_user():
user_event_data = {
'action': 'push_start',
'repository': repo_name,
'namespace': namespace_name,}
'namespace': namespace_name,
}
event = userevents.get_event(get_authenticated_user().username)
event.publish_event_data('docker-cli', user_event_data)
# Start a new builder for the repository and save its ID in the session.
assert repository_ref
builder = create_manifest_builder(repository_ref)
logger.debug('Started repo push with manifest builder %s', builder)
if builder is None:
abort(404, message='Unknown repository', issue='unknown-repo')
session['manifest_builder'] = builder.builder_id
return make_response('Created', 201)
@ -224,24 +237,26 @@ def update_images(namespace_name, repo_name):
if permission.can():
logger.debug('Looking up repository')
repo = model.get_repository(namespace_name, repo_name)
if not repo:
repository_ref = registry_model.lookup_repository(namespace_name, repo_name,
kind_filter='image')
if repository_ref is None:
# Make sure the repo actually exists.
abort(404, message='Unknown repository', issue='unknown-repo')
elif repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, namespace=namespace_name)
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
if builder is None:
abort(400)
# Generate a job for each notification that has been added to this repo
logger.debug('Adding notifications for repository')
updated_tags = session.get('pushed_tags', {})
event_data = {
'updated_tags': updated_tags.keys(),
'updated_tags': [tag.name for tag in builder.committed_tags],
}
track_and_log('push_repo', repo)
spawn_notification(repo, 'repo_push', event_data)
builder.done()
track_and_log('push_repo', repository_ref)
spawn_notification(repository_ref, 'repo_push', event_data)
metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, 'v1', True])
return make_response('Updated', 204)
@ -255,24 +270,22 @@ def update_images(namespace_name, repo_name):
@generate_headers(scope=GrantType.READ_REPOSITORY)
@anon_protect
def get_repository_images(namespace_name, repo_name):
permission = ReadRepositoryPermission(namespace_name, repo_name)
repository_ref = registry_model.lookup_repository(namespace_name, repo_name,
kind_filter='image')
# TODO invalidate token?
if permission.can() or model.repository_is_public(namespace_name, repo_name):
permission = ReadRepositoryPermission(namespace_name, repo_name)
if permission.can() or (repository_ref and repository_ref.is_public):
# We can't rely on permissions to tell us if a repo exists anymore
logger.debug('Looking up repository')
repo = model.get_repository(namespace_name, repo_name)
if not repo:
if repository_ref is None:
abort(404, message='Unknown repository', issue='unknown-repo')
elif repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, namespace=namespace_name)
logger.debug('Building repository image response')
resp = make_response(json.dumps([]), 200)
resp.mimetype = 'application/json'
track_and_log('pull_repo', repo, analytics_name='pull_repo_100x', analytics_sample=0.01)
track_and_log('pull_repo', repository_ref,
analytics_name='pull_repo_100x',
analytics_sample=0.01)
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v1', True])
return resp
@ -332,18 +345,21 @@ def _conduct_repo_search(username, query, limit=25, page=1):
offset = (page - 1) * limit
if query:
matching_repos = model.get_sorted_matching_repositories(query, username, limit=limit + 1,
offset=offset)
matching_repos = model.repository.get_filtered_matching_repositories(query,
filter_username=username,
offset=offset,
limit=limit + 1)
else:
matching_repos = []
results = []
for repo in matching_repos[0:limit]:
results.append({
'name': repo.namespace_name + '/' + repo.name,
'name': repo.namespace_user.username + '/' + repo.name,
'description': repo.description,
'is_public': repo.is_public,
'href': '/repository/' + repo.namespace_name + '/' + repo.name})
'is_public': model.repository.is_repository_public(repo),
'href': '/repository/' + repo.namespace_user.username + '/' + repo.name
})
# Defined: https://docs.docker.com/v1.6/reference/api/registry_api/
return {
@ -352,4 +368,5 @@ def _conduct_repo_search(username, query, limit=25, page=1):
'num_pages': page + 1 if len(matching_repos) > limit else page,
'page': page,
'page_size': limit,
'results': results,}
'results': results,
}

View file

@ -1,210 +0,0 @@
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from six import add_metaclass
class Repository(
namedtuple('Repository', ['id', 'name', 'namespace_name', 'description', 'is_public',
'kind'])):
"""
Repository represents a namespaced collection of tags.
:type id: int
:type name: string
:type namespace_name: string
:type description: string
:type is_public: bool
:type kind: string
"""
@add_metaclass(ABCMeta)
class DockerRegistryV1DataInterface(object):
"""
Interface that represents all data store interactions required by a Docker Registry v1.
"""
@abstractmethod
def placement_locations_and_path_docker_v1(self, namespace_name, repo_name, image_id):
"""
Returns all the placements for the image with the given V1 Docker ID, found under the given
repository or None if no image was found.
"""
pass
@abstractmethod
def docker_v1_metadata(self, namespace_name, repo_name, image_id):
"""
Returns various pieces of metadata associated with an image with the given V1 Docker ID,
including the checksum and its V1 JSON metadata.
"""
pass
@abstractmethod
def update_docker_v1_metadata(self, namespace_name, repo_name, image_id, created_date_str,
comment, command, compat_json, parent_image_id=None):
"""
Updates various pieces of V1 metadata associated with a particular image.
"""
pass
@abstractmethod
def storage_exists(self, namespace_name, repo_name, image_id):
"""
Returns whether storage already exists for the image with the V1 Docker ID under the given
repository.
"""
pass
@abstractmethod
def store_docker_v1_checksums(self, namespace_name, repo_name, image_id, checksum,
content_checksum):
"""
Stores the various V1 checksums for the image with the V1 Docker ID.
"""
pass
@abstractmethod
def is_image_uploading(self, namespace_name, repo_name, image_id):
"""
Returns whether the image with the V1 Docker ID is currently marked as uploading.
"""
pass
@abstractmethod
def update_image_uploading(self, namespace_name, repo_name, image_id, is_uploading):
"""
Marks the image with the V1 Docker ID with the given uploading status.
"""
pass
@abstractmethod
def update_image_blob(self, namespace_name, repo_name, image_id, blob):
"""
Updates the blob for the image with the given V1 Docker ID.
"""
pass
@abstractmethod
def get_image_size(self, namespace_name, repo_name, image_id):
"""
Returns the wire size of the image with the given Docker V1 ID.
"""
pass
@abstractmethod
def create_bittorrent_pieces(self, namespace_name, repo_name, image_id, pieces_bytes):
"""
Saves the BitTorrent piece hashes for the image with the given Docker V1 ID.
"""
pass
@abstractmethod
def image_ancestry(self, namespace_name, repo_name, image_id):
"""
Returns a list containing the full ancestry of Docker V1 IDs, in order, for the image with the
given Docker V1 ID.
"""
pass
@abstractmethod
def repository_exists(self, namespace_name, repo_name):
"""
Returns whether the repository with the given name and namespace exists.
"""
pass
@abstractmethod
def create_or_link_image(self, username, namespace_name, repo_name, image_id, storage_location):
"""
Adds the given image to the given repository, by either linking to an existing image visible to
the user with the given username, or creating a new one if no existing image matches.
"""
pass
@abstractmethod
def create_temp_hidden_tag(self, namespace_name, repo_name, image_id, expiration):
"""
Creates a hidden tag under the matching namespace pointing to the image with the given V1 Docker
ID.
"""
pass
@abstractmethod
def list_tags(self, namespace_name, repo_name):
"""
Returns all the tags defined in the repository with the given namespace and name.
"""
pass
@abstractmethod
def create_or_update_tag(self, namespace_name, repo_name, image_id, tag_name):
"""
Creates or updates a tag under the matching repository to point to the image with the given
Docker V1 ID.
"""
pass
@abstractmethod
def find_image_id_by_tag(self, namespace_name, repo_name, tag_name):
"""
Returns the Docker V1 image ID for the HEAD image for the tag with the given name under the
matching repository, or None if none.
"""
pass
@abstractmethod
def delete_tag(self, namespace_name, repo_name, tag_name):
"""
Deletes the given tag from the given repository.
"""
pass
@abstractmethod
def change_user_password(self, user, new_password):
"""
Changes the password associated with the given user.
"""
pass
@abstractmethod
def get_repository(self, namespace_name, repo_name):
"""
Returns the repository with the given name under the given namespace or None
if none.
"""
pass
@abstractmethod
def create_repository(self, namespace_name, repo_name, user=None):
"""
Creates a new repository under the given namespace with the given name, for
the given user.
"""
pass
@abstractmethod
def repository_is_public(self, namespace_name, repo_name):
"""
Returns whether the repository with the given name under the given namespace
is public. If no matching repository was found, returns False.
"""
pass
@abstractmethod
def validate_oauth_token(self, token):
""" Returns whether the given OAuth token validates. """
pass
@abstractmethod
def get_sorted_matching_repositories(self, search_term, filter_username=None, offset=0,
limit=25):
"""
Returns a sorted list of repositories matching the given search term.
"""
pass
@abstractmethod
def is_namespace_enabled(self, namespace_name):
""" Returns whether the given namespace exists and is enabled. """
pass

View file

@ -1,193 +0,0 @@
from app import app, storage as store
from data import model
from data.database import db_transaction
from endpoints.v1.models_interface import DockerRegistryV1DataInterface, Repository
from util.morecollections import AttrDict
class PreOCIModel(DockerRegistryV1DataInterface):
"""
PreOCIModel implements the data model for the v1 Docker Registry protocol using a database schema
before it was changed to support the OCI specification.
"""
def placement_locations_and_path_docker_v1(self, namespace_name, repo_name, image_id):
image, placements = model.image.get_image_and_placements(namespace_name, repo_name, image_id)
if image is None:
return None, None
locations = {placement.location.name for placement in placements}
return locations, model.storage.get_layer_path(image.storage)
def docker_v1_metadata(self, namespace_name, repo_name, image_id):
repo_image = model.image.get_repo_image(namespace_name, repo_name, image_id)
if repo_image is None:
return None
return AttrDict({
'namespace_name': namespace_name,
'repo_name': repo_name,
'image_id': image_id,
'checksum': repo_image.v1_checksum,
'compat_json': repo_image.v1_json_metadata,})
def update_docker_v1_metadata(self, namespace_name, repo_name, image_id, created_date_str,
comment, command, compat_json, parent_image_id=None):
parent_image = None
if parent_image_id is not None:
parent_image = model.image.get_repo_image(namespace_name, repo_name, parent_image_id)
model.image.set_image_metadata(image_id, namespace_name, repo_name, created_date_str, comment,
command, compat_json, parent=parent_image)
def storage_exists(self, namespace_name, repo_name, image_id):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return False
if repo_image.storage.uploading:
return False
layer_path = model.storage.get_layer_path(repo_image.storage)
return store.exists(repo_image.storage.locations, layer_path)
def store_docker_v1_checksums(self, namespace_name, repo_name, image_id, checksum,
content_checksum):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return
assert repo_image.storage.content_checksum == content_checksum
with model.db_transaction():
repo_image.v1_checksum = checksum
repo_image.storage.save()
repo_image.save()
def is_image_uploading(self, namespace_name, repo_name, image_id):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return False
return repo_image.storage.uploading
def update_image_uploading(self, namespace_name, repo_name, image_id, is_uploading):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return
repo_image.storage.uploading = is_uploading
repo_image.storage.save()
return repo_image.storage
def update_image_blob(self, namespace_name, repo_name, image_id, blob):
# Retrieve the existing image storage record and replace it with that given by the blob.
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None or not repo_image.storage.uploading:
return False
with db_transaction():
existing_storage = repo_image.storage
repo_image.storage = blob._db_id
repo_image.save()
existing_storage.delete_instance(recursive=True)
def get_image_size(self, namespace_name, repo_name, image_id):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return None
return repo_image.storage.image_size
def create_bittorrent_pieces(self, namespace_name, repo_name, image_id, pieces_bytes):
repo_image = model.image.get_repo_image_and_storage(namespace_name, repo_name, image_id)
if repo_image is None or repo_image.storage is None:
return
model.storage.save_torrent_info(repo_image.storage, app.config['BITTORRENT_PIECE_SIZE'],
pieces_bytes)
def image_ancestry(self, namespace_name, repo_name, image_id):
try:
image = model.image.get_image_by_id(namespace_name, repo_name, image_id)
except model.InvalidImageException:
return None
parents = model.image.get_parent_images(namespace_name, repo_name, image)
ancestry_docker_ids = [image.docker_image_id]
ancestry_docker_ids.extend([parent.docker_image_id for parent in parents])
return ancestry_docker_ids
def repository_exists(self, namespace_name, repo_name):
repo = model.repository.get_repository(namespace_name, repo_name)
return repo is not None
def create_or_link_image(self, username, namespace_name, repo_name, image_id, storage_location):
repo = model.repository.get_repository(namespace_name, repo_name)
model.image.find_create_or_link_image(image_id, repo, username, {}, storage_location)
def create_temp_hidden_tag(self, namespace_name, repo_name, image_id, expiration):
repo_image = model.image.get_repo_image(namespace_name, repo_name, image_id)
if repo_image is None:
return
repo = repo_image.repository
model.tag.create_temporary_hidden_tag(repo, repo_image, expiration)
def list_tags(self, namespace_name, repo_name):
return model.tag.list_repository_tags(namespace_name, repo_name)
def create_or_update_tag(self, namespace_name, repo_name, image_id, tag_name):
model.tag.create_or_update_tag(namespace_name, repo_name, tag_name, image_id)
def find_image_id_by_tag(self, namespace_name, repo_name, tag_name):
try:
tag_image = model.tag.get_tag_image(namespace_name, repo_name, tag_name)
except model.DataModelException:
return None
return tag_image.docker_image_id
def delete_tag(self, namespace_name, repo_name, tag_name):
model.tag.delete_tag(namespace_name, repo_name, tag_name)
def change_user_password(self, user, new_password):
model.user.change_password(user, new_password)
def get_repository(self, namespace_name, repo_name):
repo = model.repository.get_repository(namespace_name, repo_name)
if repo is None:
return None
return _repository_for_repo(repo)
def create_repository(self, namespace_name, repo_name, user=None):
model.repository.create_repository(namespace_name, repo_name, user)
def repository_is_public(self, namespace_name, repo_name):
return model.repository.repository_is_public(namespace_name, repo_name)
def validate_oauth_token(self, token):
return bool(model.oauth.validate_access_token(token))
def get_sorted_matching_repositories(self, search_term, filter_username=None, offset=0,
limit=25):
repos = model.repository.get_filtered_matching_repositories(
search_term, filter_username=filter_username, offset=offset, limit=limit)
return [_repository_for_repo(repo) for repo in repos]
def is_namespace_enabled(self, namespace_name):
namespace = model.user.get_namespace_user(namespace_name)
return namespace is not None and namespace.enabled
def _repository_for_repo(repo):
""" Returns a Repository object representing the Pre-OCI data model instance of a repository. """
return Repository(
id=repo.id,
name=repo.name,
namespace_name=repo.namespace_user.username,
description=repo.description,
is_public=model.repository.is_repository_public(repo),
kind=model.repository.get_repo_kind_name(repo),)
pre_oci_model = PreOCIModel()

View file

@ -7,38 +7,37 @@ from time import time
from flask import make_response, request, session, Response, redirect, abort as flask_abort
from app import storage as store, app, metric_queue
from app import storage as store, app
from auth.auth_context import get_authenticated_user
from auth.decorators import extract_namespace_repo_from_session, process_auth
from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission)
from data import model, database
from data import database
from data.registry_model import registry_model
from data.registry_model.blobuploader import upload_blob, BlobUploadSettings, BlobUploadException
from data.registry_model.manifestbuilder import lookup_manifest_builder
from digest import checksums
from endpoints.v1 import v1_bp
from endpoints.v1.models_pre_oci import pre_oci_model as model
from endpoints.v1.index import ensure_namespace_enabled
from endpoints.decorators import anon_protect
from util.http import abort, exact_abort
from util.registry.filelike import SocketReader
from util.registry import gzipstream
from util.registry.replication import queue_storage_replication
from util.registry.torrent import PieceHasher
logger = logging.getLogger(__name__)
def require_completion(f):
""" This make sure that the image push correctly finished. """
@wraps(f)
def wrapper(namespace, repository, *args, **kwargs):
image_id = kwargs['image_id']
if model.is_image_uploading(namespace, repository, image_id):
repository_ref = registry_model.lookup_repository(namespace, repository)
if repository_ref is not None:
legacy_image = registry_model.get_legacy_image(repository_ref, image_id)
if legacy_image is not None and legacy_image.uploading:
abort(400, 'Image %(image_id)s is being uploaded, retry later', issue='upload-in-progress',
image_id=image_id)
return f(namespace, repository, *args, **kwargs)
return f(namespace, repository, *args, **kwargs)
return wrapper
@ -76,24 +75,23 @@ def set_cache_headers(f):
@anon_protect
def head_image_layer(namespace, repository, image_id, headers):
permission = ReadRepositoryPermission(namespace, repository)
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
logger.debug('Checking repo permissions')
if permission.can() or model.repository_is_public(namespace, repository):
repo = model.get_repository(namespace, repository)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, image_id=image_id)
if permission.can() or (repository_ref is not None and repository_ref.is_public):
if repository_ref is None:
abort(404)
logger.debug('Looking up placement locations')
locations, _ = model.placement_locations_and_path_docker_v1(namespace, repository, image_id)
if locations is None:
legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
if legacy_image is None:
logger.debug('Could not find any blob placement locations')
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
# Add the Accept-Ranges header if the storage engine supports resumable
# downloads.
extra_headers = {}
if store.get_supports_resumable_downloads(locations):
if store.get_supports_resumable_downloads(legacy_image.blob.placements):
logger.debug('Storage supports resumable downloads')
extra_headers['Accept-Ranges'] = 'bytes'
@ -114,21 +112,23 @@ def head_image_layer(namespace, repository, image_id, headers):
@anon_protect
def get_image_layer(namespace, repository, image_id, headers):
permission = ReadRepositoryPermission(namespace, repository)
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
logger.debug('Checking repo permissions')
if permission.can() or model.repository_is_public(namespace, repository):
repo = model.get_repository(namespace, repository)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, image_id=image_id)
if permission.can() or (repository_ref is not None and repository_ref.is_public):
if repository_ref is None:
abort(404)
logger.debug('Looking up placement locations and path')
locations, path = model.placement_locations_and_path_docker_v1(namespace, repository, image_id)
if not locations or not path:
legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
if legacy_image is None:
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
path = legacy_image.blob.storage_path
try:
logger.debug('Looking up the direct download URL for path: %s', path)
direct_download_url = store.get_direct_download_url(locations, path, request.remote_addr)
direct_download_url = store.get_direct_download_url(legacy_image.blob.placements, path,
request.remote_addr)
if direct_download_url:
logger.debug('Returning direct download URL')
resp = redirect(direct_download_url)
@ -137,7 +137,7 @@ def get_image_layer(namespace, repository, image_id, headers):
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
logger.debug('Streaming layer data')
return Response(store.stream_read(locations, path), headers=headers)
return Response(store.stream_read(legacy_image.blob.placements, path), headers=headers)
except (IOError, AttributeError):
logger.exception('Image layer data not found')
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
@ -156,31 +156,31 @@ def put_image_layer(namespace, repository, image_id):
if not permission.can():
abort(403)
repo = model.get_repository(namespace, repository)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, image_id=image_id)
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
if repository_ref is None:
abort(403)
logger.debug('Retrieving image')
if model.storage_exists(namespace, repository, image_id):
legacy_image = registry_model.get_legacy_image(repository_ref, image_id)
if legacy_image is not None and not legacy_image.uploading:
exact_abort(409, 'Image already exists')
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
if v1_metadata is None:
logger.debug('Checking for image in manifest builder')
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
if builder is None:
abort(400)
layer = builder.lookup_layer(image_id)
if layer is None:
abort(404)
logger.debug('Storing layer data')
input_stream = request.stream
if request.headers.get('transfer-encoding') == 'chunked':
# Careful, might work only with WSGI servers supporting chunked
# encoding (Gunicorn)
input_stream = request.environ['wsgi.input']
repository_ref = registry_model.lookup_repository(namespace, repository)
if repository_ref is None:
abort(404)
expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
settings = BlobUploadSettings(maximum_blob_size=app.config['MAXIMUM_LAYER_SIZE'],
bittorrent_piece_size=app.config['BITTORRENT_PIECE_SIZE'],
@ -190,13 +190,13 @@ def put_image_layer(namespace, repository, image_id):
# Add a handler that copies the data into a temp file. This is used to calculate the tarsum,
# which is only needed for older versions of Docker.
requires_tarsum = session.get('checksum_format') == 'tarsum'
requires_tarsum = bool(builder.get_layer_checksums(layer))
if requires_tarsum:
tmp, tmp_hndlr = store.temp_store_handler()
extra_handlers.append(tmp_hndlr)
# Add a handler which computes the simple Docker V1 checksum.
h, sum_hndlr = checksums.simple_checksum_handler(v1_metadata.compat_json)
h, sum_hndlr = checksums.simple_checksum_handler(layer.v1_metadata_string)
extra_handlers.append(sum_hndlr)
uploaded_blob = None
@ -209,37 +209,34 @@ def put_image_layer(namespace, repository, image_id):
logger.exception('Exception when writing image data')
abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id)
# Save the blob for the image.
model.update_image_blob(namespace, repository, image_id, uploaded_blob)
# Send a job to the work queue to replicate the image layer.
# TODO: move this into a better place.
queue_storage_replication(namespace, uploaded_blob)
# Append the computed checksum.
# Compute the final checksum
csums = []
csums.append('sha256:{0}'.format(h.hexdigest()))
try:
if requires_tarsum:
tmp.seek(0)
csums.append(checksums.compute_tarsum(tmp, v1_metadata.compat_json))
csums.append(checksums.compute_tarsum(tmp, layer.v1_metadata_string))
tmp.close()
except (IOError, checksums.TarError) as exc:
logger.debug('put_image_layer: Error when computing tarsum %s', exc)
if v1_metadata.checksum is None:
# We don't have a checksum stored yet, that's fine skipping the check.
# Not removing the mark though, image is not downloadable yet.
session['checksum'] = csums
session['content_checksum'] = uploaded_blob.digest
return make_response('true', 200)
# If there was already a precomputed checksum, validate against it now.
if builder.get_layer_checksums(layer):
checksum = builder.get_layer_checksums(layer)[0]
if not builder.validate_layer_checksum(layer, checksum):
logger.debug('put_image_checksum: Wrong checksum. Given: %s and expected: %s', checksum,
builder.get_layer_checksums(layer))
abort(400, 'Checksum mismatch for image: %(image_id)s', issue='checksum-mismatch',
image_id=image_id)
# We check if the checksums provided matches one the one we computed
if v1_metadata.checksum not in csums:
logger.warning('put_image_layer: Wrong checksum')
abort(400, 'Checksum mismatch; ignoring the layer for image %(image_id)s',
issue='checksum-mismatch', image_id=image_id)
# Assign the blob to the layer in the manifest.
if not builder.assign_layer_blob(layer, uploaded_blob, csums):
abort(500, 'Something went wrong')
# Send a job to the work queue to replicate the image layer.
# TODO: move this into a better place.
queue_storage_replication(namespace, uploaded_blob)
return make_response('true', 200)
@ -255,10 +252,9 @@ def put_image_checksum(namespace, repository, image_id):
if not permission.can():
abort(403)
repo = model.get_repository(namespace, repository)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, image_id=image_id)
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
if repository_ref is None:
abort(403)
# Docker Version < 0.10 (tarsum+sha):
old_checksum = request.headers.get('X-Docker-Checksum')
@ -266,40 +262,27 @@ def put_image_checksum(namespace, repository, image_id):
# Docker Version >= 0.10 (sha):
new_checksum = request.headers.get('X-Docker-Checksum-Payload')
# Store whether we need to calculate the tarsum.
if new_checksum:
session['checksum_format'] = 'sha256'
else:
session['checksum_format'] = 'tarsum'
checksum = new_checksum or old_checksum
if not checksum:
abort(400, "Missing checksum for image %(image_id)s", issue='missing-checksum',
image_id=image_id)
if not session.get('checksum'):
abort(400, 'Checksum not found in Cookie for image %(image_id)s',
issue='missing-checksum-cookie', image_id=image_id)
logger.debug('Checking for image in manifest builder')
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
if builder is None:
abort(400)
logger.debug('Looking up repo image')
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
if not v1_metadata:
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
layer = builder.lookup_layer(image_id)
if layer is None:
abort(404)
logger.debug('Looking up repo layer data')
if not v1_metadata.compat_json:
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
if old_checksum:
builder.save_precomputed_checksum(layer, checksum)
return make_response('true', 200)
logger.debug('Storing image and checksum')
content_checksum = session.get('content_checksum', None)
checksum_parts = checksum.split(':')
if len(checksum_parts) != 2:
abort(400, 'Invalid checksum format')
if checksum not in session.get('checksum', []):
logger.debug('session checksums: %s', session.get('checksum', []))
logger.debug('client supplied checksum: %s', checksum)
logger.debug('put_image_checksum: Wrong checksum')
if not builder.validate_layer_checksum(layer, checksum):
logger.debug('put_image_checksum: Wrong checksum. Given: %s and expected: %s', checksum,
builder.get_layer_checksums(layer))
abort(400, 'Checksum mismatch for image: %(image_id)s', issue='checksum-mismatch',
image_id=image_id)
@ -316,27 +299,22 @@ def put_image_checksum(namespace, repository, image_id):
def get_image_json(namespace, repository, image_id, headers):
logger.debug('Checking repo permissions')
permission = ReadRepositoryPermission(namespace, repository)
if not permission.can() and not model.repository_is_public(namespace, repository):
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
if not permission.can() and not (repository_ref is not None and repository_ref.is_public):
abort(403)
repo = model.get_repository(namespace, repository)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, image_id=image_id)
logger.debug('Looking up repo image')
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
if v1_metadata is None:
legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
if legacy_image is None:
flask_abort(404)
logger.debug('Looking up repo layer size')
size = model.get_image_size(namespace, repository, image_id)
size = legacy_image.blob.compressed_size
if size is not None:
# Note: X-Docker-Size is optional and we *can* end up with a NULL image_size,
# so handle this case rather than failing.
headers['X-Docker-Size'] = str(size)
response = make_response(v1_metadata.compat_json, 200)
response = make_response(legacy_image.v1_metadata_string, 200)
response.headers.extend(headers)
return response
@ -351,20 +329,19 @@ def get_image_json(namespace, repository, image_id, headers):
def get_image_ancestry(namespace, repository, image_id, headers):
logger.debug('Checking repo permissions')
permission = ReadRepositoryPermission(namespace, repository)
if not permission.can() and not model.repository_is_public(namespace, repository):
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
if not permission.can() and not (repository_ref is not None and repository_ref.is_public):
abort(403)
repo = model.get_repository(namespace, repository)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, image_id=image_id)
ancestry_docker_ids = model.image_ancestry(namespace, repository, image_id)
if ancestry_docker_ids is None:
logger.debug('Looking up repo image')
legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_parents=True)
if legacy_image is None:
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
# We can not use jsonify here because we are returning a list not an object
response = make_response(json.dumps(ancestry_docker_ids), 200)
# NOTE: We can not use jsonify here because we are returning a list not an object.
ancestor_ids = ([legacy_image.docker_image_id] +
[a.docker_image_id for a in legacy_image.parents])
response = make_response(json.dumps(ancestor_ids), 200)
response.headers.extend(headers)
return response
@ -380,10 +357,13 @@ def put_image_json(namespace, repository, image_id):
if not permission.can():
abort(403)
repo = model.get_repository(namespace, repository)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, image_id=image_id)
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
if repository_ref is None:
abort(403)
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
if builder is None:
abort(400)
logger.debug('Parsing image JSON')
try:
@ -405,46 +385,12 @@ def put_image_json(namespace, repository, image_id):
image_id=image_id)
logger.debug('Looking up repo image')
if not model.repository_exists(namespace, repository):
abort(404, 'Repository does not exist: %(namespace)s/%(repository)s', issue='no-repo',
namespace=namespace, repository=repository)
parent_id = data.get('parent', None)
if parent_id:
logger.debug('Looking up parent image')
if model.docker_v1_metadata(namespace, repository, parent_id) is None:
abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s',
issue='invalid-request', image_id=image_id, parent_id=parent_id)
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
if v1_metadata is None:
username = get_authenticated_user() and get_authenticated_user().username
logger.debug('Image not found, creating or linking image with initiating user context: %s',
username)
location_pref = store.preferred_locations[0]
model.create_or_link_image(username, namespace, repository, image_id, location_pref)
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
# Create a temporary tag to prevent this image from getting garbage collected while the push
# is in progress.
model.create_temp_hidden_tag(namespace, repository, image_id,
username = get_authenticated_user() and get_authenticated_user().username
layer = builder.start_layer(image_id, uploaded_metadata, location_pref, username,
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
logger.debug('Checking if image already exists')
if v1_metadata and not model.is_image_uploading(namespace, repository, image_id):
exact_abort(409, 'Image already exists')
model.update_image_uploading(namespace, repository, image_id, True)
# If we reach that point, it means that this is a new image or a retry
# on a failed push, save the metadata
command_list = data.get('container_config', {}).get('Cmd', None)
command = json.dumps(command_list) if command_list else None
logger.debug('Setting image metadata')
model.update_docker_v1_metadata(namespace, repository, image_id,
data.get('created'),
data.get('comment'), command, uploaded_metadata, parent_id)
if layer is None:
abort(400, 'Image %(image_id)s has invalid metadata',
issue='invalid-request', image_id=image_id)
return make_response('true', 200)

View file

@ -5,10 +5,10 @@ from flask import abort, request, jsonify, make_response, session
from auth.decorators import process_auth
from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission)
from data import model
from data.registry_model import registry_model
from data.registry_model.manifestbuilder import lookup_manifest_builder
from endpoints.decorators import anon_protect, parse_repository_name
from endpoints.v1 import v1_bp
from endpoints.v1.models_pre_oci import pre_oci_model as model
from util.audit import track_and_log
from util.names import TAG_ERROR, TAG_REGEX
@ -21,15 +21,13 @@ logger = logging.getLogger(__name__)
@parse_repository_name()
def get_tags(namespace_name, repo_name):
permission = ReadRepositoryPermission(namespace_name, repo_name)
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
if permission.can() or (repository_ref is not None and repository_ref.is_public):
if repository_ref is None:
abort(404)
if permission.can() or model.repository_is_public(namespace_name, repo_name):
repo = model.get_repository(namespace_name, repo_name)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, namespace=namespace_name)
tags = model.list_tags(namespace_name, repo_name)
tag_map = {tag.name: tag.image.docker_image_id for tag in tags}
tags = registry_model.list_repository_tags(repository_ref, include_legacy_images=True)
tag_map = {tag.name: tag.legacy_image.docker_image_id for tag in tags}
return jsonify(tag_map)
abort(403)
@ -41,18 +39,16 @@ def get_tags(namespace_name, repo_name):
@parse_repository_name()
def get_tag(namespace_name, repo_name, tag):
permission = ReadRepositoryPermission(namespace_name, repo_name)
if permission.can() or model.repository_is_public(namespace_name, repo_name):
repo = model.get_repository(namespace_name, repo_name)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, namespace=namespace_name)
image_id = model.find_image_id_by_tag(namespace_name, repo_name, tag)
if image_id is None:
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
if permission.can() or (repository_ref is not None and repository_ref.is_public):
if repository_ref is None:
abort(404)
resp = make_response('"%s"' % image_id)
tag = registry_model.get_repo_tag(repository_ref, tag, include_legacy_image=True)
if tag is None:
abort(404)
resp = make_response('"%s"' % tag.legacy_image.docker_image_id)
resp.headers['Content-Type'] = 'application/json'
return resp
@ -65,24 +61,33 @@ def get_tag(namespace_name, repo_name, tag):
@parse_repository_name()
def put_tag(namespace_name, repo_name, tag):
permission = ModifyRepositoryPermission(namespace_name, repo_name)
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
if permission.can():
if permission.can() and repository_ref is not None:
if not TAG_REGEX.match(tag):
abort(400, TAG_ERROR)
repo = model.get_repository(namespace_name, repo_name)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, namespace=namespace_name)
image_id = json.loads(request.data)
model.create_or_update_tag(namespace_name, repo_name, image_id, tag)
# Store the updated tag.
if 'pushed_tags' not in session:
session['pushed_tags'] = {}
# Check for the image ID first in a builder (for an in-progress push).
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'))
if builder is not None:
layer = builder.lookup_layer(image_id)
if layer is not None:
commited_tag = builder.commit_tag_and_manifest(tag, layer)
if commited_tag is None:
abort(400)
session['pushed_tags'][tag] = image_id
return make_response('Created', 200)
# Check if there is an existing image we should use (for PUT calls outside of a normal push
# operation).
legacy_image = registry_model.get_legacy_image(repository_ref, image_id)
if legacy_image is None:
abort(400)
if registry_model.retarget_tag(repository_ref, tag, legacy_image) is None:
abort(400)
return make_response('Created', 200)
@ -95,18 +100,13 @@ def put_tag(namespace_name, repo_name, tag):
@parse_repository_name()
def delete_tag(namespace_name, repo_name, tag):
permission = ModifyRepositoryPermission(namespace_name, repo_name)
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
if permission.can():
repo = model.get_repository(namespace_name, repo_name)
if repo is None:
abort(403)
if permission.can() and repository_ref is not None:
if not registry_model.delete_tag(repository_ref, tag):
abort(404)
if repo.kind != 'image':
msg = 'This repository is for managing %s resources and not container images.' % repo.kind
abort(405, message=msg, namespace=namespace_name)
model.delete_tag(namespace_name, repo_name, tag)
track_and_log('delete_tag', model.get_repository(namespace_name, repo_name), tag=tag)
track_and_log('delete_tag', repository_ref, tag=tag)
return make_response('Deleted', 200)
abort(403)

View file

@ -3,7 +3,7 @@ import json
from cStringIO import StringIO
from enum import Enum, unique
from digest.checksums import compute_simple
from digest.checksums import compute_simple, compute_tarsum
from test.registry.protocols import (RegistryProtocol, Failures, ProtocolOptions, PushResult,
PullResult)
@ -31,7 +31,7 @@ class V1Protocol(RegistryProtocol):
V1ProtocolSteps.GET_IMAGES: {
Failures.UNAUTHENTICATED: 403,
Failures.UNAUTHORIZED: 403,
Failures.APP_REPOSITORY: 405,
Failures.APP_REPOSITORY: 404,
Failures.ANONYMOUS_NOT_ALLOWED: 401,
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
Failures.NAMESPACE_DISABLED: 400,
@ -93,7 +93,7 @@ class V1Protocol(RegistryProtocol):
# GET /v1/repositories/{namespace}/{repository}/tags
image_ids = self.conduct(session, 'GET', prefix + 'tags', headers=headers).json()
assert len(image_ids.values()) == len(tag_names)
assert len(image_ids.values()) >= len(tag_names)
for tag_name in tag_names:
if tag_name not in image_ids:
@ -165,13 +165,21 @@ class V1Protocol(RegistryProtocol):
expected_status=(200, expected_failure,
V1ProtocolSteps.PUT_IMAGE_JSON))
if response.status_code != 200:
break
return
# PUT /v1/images/{imageID}/checksum (old style)
old_checksum = compute_tarsum(StringIO(image.bytes), json.dumps(image_json_data))
checksum_headers = {'X-Docker-Checksum': old_checksum}
checksum_headers.update(headers)
self.conduct(session, 'PUT', '/v1/images/%s/checksum' % image.id,
headers=checksum_headers)
# PUT /v1/images/{imageID}/layer
self.conduct(session, 'PUT', '/v1/images/%s/layer' % image.id,
data=StringIO(image.bytes), headers=headers)
# PUT /v1/images/{imageID}/checksum
# PUT /v1/images/{imageID}/checksum (new style)
checksum = compute_simple(StringIO(image.bytes), json.dumps(image_json_data))
checksum_headers = {'X-Docker-Checksum-Payload': checksum}
checksum_headers.update(headers)
@ -208,3 +216,12 @@ class V1Protocol(RegistryProtocol):
'/v1/repositories/%s/tags/%s' % (self.repo_name(namespace, repo_name), tag_name),
auth=auth,
expected_status=(200, expected_failure, V1ProtocolSteps.DELETE_TAG))
def tag(self, session, namespace, repo_name, tag_name, image, credentials=None,
expected_failure=None, options=None):
auth = self._auth_for_credentials(credentials)
self.conduct(session, 'PUT',
'/v1/repositories/%s/tags/%s' % (self.repo_name(namespace, repo_name), tag_name),
data='"%s"' % image.id,
auth=auth,
expected_status=(200, expected_failure, V1ProtocolSteps.PUT_TAG))

View file

@ -97,6 +97,11 @@ class RegistryProtocol(object):
the given credentials.
"""
@abstractmethod
def delete(self, session, namespace, repo_name, tag_names, credentials=None,
expected_failure=None, options=None):
""" Deletes some tags. """
def repo_name(self, namespace, repo_name):
if namespace:
return '%s/%s' % (namespace, repo_name)

View file

@ -226,7 +226,7 @@ def test_push_pull_logging(credentials, namespace, expected_performer, pusher, p
credentials = credentials(api_caller, registry_server_executor.on(liveserver))
# Push to the repository with the specified credentials.
pusher.push(liveserver_session, namespace, 'newrepo', 'latest', basic_images,
pusher.push(liveserver_session, namespace, 'newrepo', 'anothertag', basic_images,
credentials=credentials)
# Check the logs for the push.
@ -243,7 +243,7 @@ def test_push_pull_logging(credentials, namespace, expected_performer, pusher, p
assert logs[0]['performer']['name'] == expected_performer
# Pull the repository to verify.
puller.pull(liveserver_session, namespace, 'newrepo', 'latest', basic_images,
puller.pull(liveserver_session, namespace, 'newrepo', 'anothertag', basic_images,
credentials=credentials)
# Check the logs for the pull.
@ -1299,3 +1299,20 @@ def test_push_pull_same_blobs(pusher, puller, liveserver_session, app_reloader):
# Pull the repository to verify.
puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', images,
credentials=credentials, options=options)
def test_push_tag_existing_image(v1_protocol, puller, basic_images, liveserver_session, app_reloader):
""" Test: Push a new tag on an existing manifest/image. """
credentials = ('devtable', 'password')
# Push a new repository.
result = v1_protocol.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images,
credentials=credentials)
# Push the same image/manifest to another tag in the repository.
v1_protocol.tag(liveserver_session, 'devtable', 'newrepo', 'anothertag', basic_images[-1],
credentials=credentials)
# Pull the repository to verify.
puller.pull(liveserver_session, 'devtable', 'newrepo', 'anothertag', basic_images,
credentials=credentials)

View file

@ -793,26 +793,6 @@ class RegistryTestsMixin(object):
# Pull the repository to verify.
self.do_pull('public', 'foo.bar', 'public', 'password')
def test_application_repo(self):
# Create an application repository via the API.
self.conduct_api_login('devtable', 'password')
data = {
'repository': 'someapprepo',
'visibility': 'private',
'repo_kind': 'application',
'description': 'test app repo',
}
self.conduct('POST', '/api/v1/repository', json_data=data, expected_code=201)
# Try to push to the repo, which should fail with a 405.
self.do_push('devtable', 'someapprepo', 'devtable', 'password',
expect_failure=FailureCodes.APP_REPOSITORY)
# Try to pull from the repo, which should fail with a 405.
self.do_pull('devtable', 'someapprepo', 'devtable', 'password',
expect_failure=FailureCodes.APP_REPOSITORY)
def test_middle_layer_different_sha(self):
if self.push_version == 'v1':
# No SHAs to munge in V1.

View file

@ -137,11 +137,11 @@ def build_v1_index_specs():
IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID),
PUBLIC_REPO, 403, 403, 403, 403, 403).set_method('PUT'),
IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID),
PRIVATE_REPO, 403, 403, 403, 403, 404).set_method('PUT'),
PRIVATE_REPO, 403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID),
ORG_REPO, 403, 403, 403, 403, 404).set_method('PUT'),
ORG_REPO, 403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.put_image_layer', image_id=FAKE_IMAGE_ID),
ANOTHER_ORG_REPO, 403, 403, 403, 403, 404).set_method('PUT'),
ANOTHER_ORG_REPO, 403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.put_image_checksum',
image_id=FAKE_IMAGE_ID),
@ -205,11 +205,11 @@ def build_v1_index_specs():
IndexV1TestSpec(url_for('v1.update_images', repository=PUBLIC_REPO),
NO_REPO, 403, 403, 403, 403, 403).set_method('PUT'),
IndexV1TestSpec(url_for('v1.update_images', repository=PRIVATE_REPO),
NO_REPO, 403, 403, 403, 403, 204).set_method('PUT'),
NO_REPO, 403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.update_images', repository=ORG_REPO), NO_REPO,
403, 403, 403, 403, 204).set_method('PUT'),
403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.update_images', repository=ANOTHER_ORG_REPO), NO_REPO,
403, 403, 403, 403, 204).set_method('PUT'),
403, 403, 403, 403, 400).set_method('PUT'),
IndexV1TestSpec(url_for('v1.get_repository_images',
repository=PUBLIC_REPO),