Implement V2 interfaces and remaining V1 interfaces

Also adds some tests to registry tests for V1 stuff.
Note: All *registry* tests currently pass, but as verbs are not yet converted, the verb tests in registry_tests.py currently fail.
This commit is contained in:
Joseph Schorr 2016-08-16 15:23:00 -04:00 committed by Jimmy Zelinskie
parent d67991987b
commit db60df827d
21 changed files with 588 additions and 338 deletions

View file

@ -5,7 +5,7 @@ from auth.auth_context import get_authenticated_user, get_validated_oauth_token
import json
def build_event_data(repo, extra_data={}, subpage=None):
repo_string = '%s/%s' % (repo.namespace_user.username, repo.name)
repo_string = '%s/%s' % (repo.namespace_name, repo.name)
homepage = '%s://%s/repository/%s' % (app.config['PREFERRED_URL_SCHEME'],
app.config['SERVER_HOSTNAME'],
repo_string)
@ -18,11 +18,10 @@ def build_event_data(repo, extra_data={}, subpage=None):
event_data = {
'repository': repo_string,
'namespace': repo.namespace_user.username,
'namespace': repo.namespace_name,
'name': repo.name,
'docker_url': '%s/%s' % (app.config['SERVER_HOSTNAME'], repo_string),
'homepage': homepage,
'visibility': repo.visibility.name
}
event_data.update(extra_data)
@ -54,10 +53,10 @@ def spawn_notification(repo, event_name, extra_data={}, subpage=None, pathargs=[
performer_data=None):
event_data = build_event_data(repo, extra_data=extra_data, subpage=subpage)
notifications = model.notification.list_repo_notifications(repo.namespace_user.username,
notifications = model.notification.list_repo_notifications(repo.namespace_name,
repo.name,
event_name=event_name)
for notification in list(notifications):
notification_data = build_notification_data(notification, event_data, performer_data)
path = [repo.namespace_user.username, repo.name, event_name] + pathargs
path = [repo.namespace_name, repo.name, event_name] + pathargs
notification_queue.put(path, json.dumps(notification_data))

View file

@ -6,8 +6,9 @@ from functools import wraps
from flask import request, make_response, jsonify, session
from data.model import v1
from data.interfaces import v1
from app import authentication, userevents, metric_queue
from app import authentication, userevents
from auth.auth import process_auth, generate_signed_token
from auth.auth_context import get_authenticated_user, get_validated_token, get_validated_oauth_token
from auth.permissions import (ModifyRepositoryPermission, UserAdminPermission,
@ -148,10 +149,6 @@ def update_user(username):
logger.debug('Updating user password')
v1.change_user_password(get_authenticated_user(), update_request['password'])
if 'email' in update_request:
logger.debug('Updating user email')
v1.change_user_email(get_authenticated_user(), update_request['email'])
return jsonify({
'username': get_authenticated_user().username,
'email': get_authenticated_user().email

View file

@ -14,7 +14,7 @@ from auth.permissions import (ReadRepositoryPermission,
ModifyRepositoryPermission)
from auth.registry_jwt_auth import get_granted_username
from data import model, database
from data.model import v1
from data.interfaces import v1
from digest import checksums
from endpoints.v1 import v1_bp
from endpoints.decorators import anon_protect

View file

@ -9,7 +9,7 @@ from auth.auth import process_auth
from auth.permissions import (ReadRepositoryPermission,
ModifyRepositoryPermission)
from data import model
from data.model import v1
from data.interfaces import v1
from endpoints.common import parse_repository_name
from endpoints.decorators import anon_protect
from endpoints.v1 import v1_bp

View file

@ -65,7 +65,7 @@ def paginate(limit_kwarg_name='limit', offset_kwarg_name='offset',
kwargs[limit_kwarg_name] = limit
kwargs[offset_kwarg_name] = offset
kwargs[callback_kwarg_name] = callback
func(*args, **kwargs)
return func(*args, **kwargs)
return wrapped
return wrapper

View file

@ -8,6 +8,7 @@ import resumablehashlib
from app import storage, app
from auth.registry_jwt_auth import process_registry_jwt_auth
from data import database
from data.interfaces import v2
from digest import digest_tools
from endpoints.common import parse_repository_name
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream
@ -134,7 +135,7 @@ def start_blob_upload(namespace_name, repo_name):
# The user plans to send us the entire body right now.
# Find the upload.
blob_upload = v2.blob_upload_by_uuid(new_upload_uuid)
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, new_upload_uuid)
if blob_upload is None:
raise BlobUploadUnknown()
@ -142,7 +143,7 @@ def start_blob_upload(namespace_name, repo_name):
# the upload state.
updated_blob_upload = _upload_chunk(blob_upload, request.headers.get('range'))
if updated_blob_upload is None:
_abort_range_not_satisfiable(updated_blob_upload.byte_count, new_upload_uuid)
_abort_range_not_satisfiable(blob_upload.byte_count, new_upload_uuid)
# Save the upload state to the database.
v2.update_blob_upload(updated_blob_upload)
@ -195,7 +196,7 @@ def upload_chunk(namespace_name, repo_name, upload_uuid):
# the upload state.
updated_blob_upload = _upload_chunk(blob_upload, request.headers.get('range'))
if updated_blob_upload is None:
_abort_range_not_satisfiable(updated_blob_upload.byte_count, upload_uuid)
_abort_range_not_satisfiable(blob_upload.byte_count, upload_uuid)
# Save the upload state to the database.
v2.update_blob_upload(updated_blob_upload)
@ -231,7 +232,7 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
# the upload state.
updated_blob_upload = _upload_chunk(blob_upload, request.headers.get('range'))
if updated_blob_upload is None:
_abort_range_not_satisfiable(updated_blob_upload.byte_count, upload_uuid)
_abort_range_not_satisfiable(blob_upload.byte_count, upload_uuid)
# Finalize the upload process in the database and storage.
_finish_upload(namespace_name, repo_name, updated_blob_upload, digest)
@ -253,14 +254,15 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
@require_repo_write
@anon_protect
def cancel_upload(namespace_name, repo_name, upload_uuid):
upload = v2.blob_upload_by_uuid(upload_uuid)
if upload is None:
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
if blob_upload is None:
raise BlobUploadUnknown()
# We delete the record for the upload first, since if the partial upload in
# storage fails to delete, it doesn't break anything
v2.delete_blob_upload(upload_uuid)
storage.cancel_chunked_upload({upload.location_name}, upload.uuid, upload.storage_metadata)
# storage fails to delete, it doesn't break anything.
v2.delete_blob_upload(namespace_name, repo_name, upload_uuid)
storage.cancel_chunked_upload({blob_upload.location_name}, blob_upload.uuid,
blob_upload.storage_metadata)
return Response(status=204)
@ -342,7 +344,7 @@ def _upload_chunk(blob_upload, range_header):
"""
# Get the offset and length of the current chunk.
start_offset, length = _start_offset_and_length(range_header)
if None in {blob_upload, start_offset, length}:
if blob_upload is None or None in {start_offset, length}:
logger.error('Invalid arguments provided to _upload_chunk')
return None
@ -393,7 +395,7 @@ def _upload_chunk(blob_upload, range_header):
size_info, fn = calculate_size_handler()
input_fp = wrap_with_handler(input_fp, fn)
length_written, new_metadata, error = storage.stream_upload_chunk(
length_written, new_metadata, upload_error = storage.stream_upload_chunk(
location_set,
blob_upload.uuid,
start_offset,
@ -402,8 +404,9 @@ def _upload_chunk(blob_upload, range_header):
blob_upload.storage_metadata,
content_type=BLOB_CONTENT_TYPE,
)
if error is not None:
logger.error('storage.stream_upload_chunk returned error %s', error)
if upload_error is not None:
logger.error('storage.stream_upload_chunk returned error %s', upload_error)
return None
# If we determined an uncompressed size and this is the first chunk, add it to the blob.
@ -418,6 +421,7 @@ def _upload_chunk(blob_upload, range_header):
if piece_hasher is not None:
blob_upload.piece_hashes = piece_hasher.piece_hashes
blob_upload.piece_sha_state = piece_hasher.hash_fragment
blob_upload.storage_metadata = new_metadata
blob_upload.byte_count += length_written
blob_upload.chunk_count += 1
@ -471,19 +475,17 @@ def _finalize_blob_database(namespace_name, repo_name, blob_upload, digest, alre
namespace_name,
repo_name,
digest,
blob_upload.location_name,
blob_upload.byte_count,
blob_upload.uncompressed_byte_count,
blob_upload,
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'],
)
# If it doesn't already exist, create the BitTorrent pieces for the blob.
if blob_upload.piece_sha_state is not None and not already_existed:
piece_bytes = blob_upload.piece_hashes + blob_upload.piece_sha_state.digest()
v2.create_bittorrent_pieces(blob_storage, app.config['BITTORRENT_PIECE_SIZE'], piece_bytes)
v2.save_bittorrent_pieces(blob_storage, app.config['BITTORRENT_PIECE_SIZE'], piece_bytes)
# Delete the blob upload.
v2.delete_upload(blob_upload.uuid)
v2.delete_blob_upload(namespace_name, repo_name, blob_upload.uuid)
def _finish_upload(namespace_name, repo_name, blob_upload, digest):

View file

@ -3,6 +3,7 @@ from flask import jsonify
from auth.registry_jwt_auth import process_registry_jwt_auth, get_granted_entity
from endpoints.decorators import anon_protect
from endpoints.v2 import v2_bp, paginate
from data.interfaces import v2
@v2_bp.route('/_catalog', methods=['GET'])
@process_registry_jwt_auth()
@ -14,10 +15,10 @@ def catalog_search(limit, offset, pagination_callback):
if entity:
username = entity.user.username
visible_repositories = v2.get_visible_repositories(username, limit, offset)
visible_repositories = v2.get_visible_repositories(username, limit+1, offset)
response = jsonify({
'repositories': ['%s/%s' % (repo.namespace_name, repo.name)
for repo in visible_repositories],
for repo in visible_repositories][0:limit],
})
pagination_callback(len(visible_repositories), response)

View file

@ -9,6 +9,7 @@ import features
from app import docker_v2_signing_key, app, metric_queue
from auth.registry_jwt_auth import process_registry_jwt_auth
from data import model
from data.interfaces import v2
from digest import digest_tools
from endpoints.common import parse_repository_name
from endpoints.decorators import anon_protect
@ -35,14 +36,14 @@ MANIFEST_TAGNAME_ROUTE = BASE_MANIFEST_ROUTE.format(VALID_TAG_PATTERN)
@process_registry_jwt_auth(scopes=['pull'])
@require_repo_read
@anon_protect
def fetch_manifest_by_tagname(namespace_name, repo_name, tag_name):
manifest = v2.get_manifest_by_tag(namespace_name, repo_name, tag_name)
def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
manifest = v2.get_manifest_by_tag(namespace_name, repo_name, manifest_ref)
if manifest is None:
tag = v2.get_active_tag(namespace_name, repo_name, tag_name)
if tag is None:
has_tag = v2.has_active_tag(namespace_name, repo_name, manifest_ref)
if not has_tag:
raise ManifestUnknown()
manifest = _generate_and_store_manifest(namespace_name, repo_name, tag_name)
manifest = _generate_and_store_manifest(namespace_name, repo_name, manifest_ref)
if manifest is None:
raise ManifestUnknown()
@ -52,9 +53,9 @@ def fetch_manifest_by_tagname(namespace_name, repo_name, tag_name):
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2'])
return Response(
manifest.bytes,
manifest.json,
status=200,
headers={'Content-Type': manifest.content_type, 'Docker-Content-Digest': manifest.digest},
headers={'Content-Type': manifest.media_type, 'Docker-Content-Digest': manifest.digest},
)
@ -64,7 +65,7 @@ def fetch_manifest_by_tagname(namespace_name, repo_name, tag_name):
@require_repo_read
@anon_protect
def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref):
manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, manifest_ref)
manifest = v2.get_manifest_by_digest(namespace_name, repo_name, manifest_ref)
if manifest is None:
# Without a tag name to reference, we can't make an attempt to generate the manifest
raise ManifestUnknown()
@ -74,7 +75,7 @@ def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref):
track_and_log('pull_repo', repo)
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2'])
return Response(manifest.json, status=200, headers={'Content-Type': manifest.content_type,
return Response(manifest.json, status=200, headers={'Content-Type': manifest.media_type,
'Docker-Content-Digest': manifest.digest})
@ -94,13 +95,13 @@ def _reject_manifest2_schema2(func):
@process_registry_jwt_auth(scopes=['pull', 'push'])
@require_repo_write
@anon_protect
def write_manifest_by_tagname(namespace_name, repo_name, tag_name):
def write_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
try:
manifest = DockerSchema1Manifest(request.data)
except ManifestException as me:
raise ManifestInvalid(detail={'message': me.message})
if manifest.tag != tag_name:
if manifest.tag != manifest_ref:
raise TagInvalid()
return _write_manifest(namespace_name, repo_name, manifest)
@ -144,8 +145,7 @@ def _write_manifest(namespace_name, repo_name, manifest):
raise ManifestInvalid(detail={'message': 'manifest does not reference any layers'})
# Ensure all the blobs in the manifest exist.
storage_query = model.storage.lookup_repo_storages_by_content_checksum(repo, manifest.checksums)
storage_map = {storage.content_checksum: storage for storage in storage_query}
storage_map = v2.lookup_blobs_by_digest(namespace_name, repo_name, manifest.checksums)
for layer in manifest.layers:
digest_str = str(layer.digest)
if digest_str not in storage_map:
@ -153,14 +153,14 @@ def _write_manifest(namespace_name, repo_name, manifest):
# Lookup all the images and their parent images (if any) inside the manifest.
# This will let us know which v1 images we need to synthesize and which ones are invalid.
all_image_ids = list(manifest.docker_image_ids | manifest.parent_image_ids)
all_image_ids = list(manifest.parent_image_ids | manifest.image_ids)
images_map = v2.get_docker_v1_metadata_by_image_id(namespace_name, repo_name, all_image_ids)
# Rewrite any v1 image IDs that do not match the checksum in the database.
try:
rewritten_images = manifest.rewrite_invalid_image_ids(images_map)
rewritten_images = list(manifest.rewrite_invalid_image_ids(images_map))
for rewritten_image in rewritten_images:
image = v2.synthesize_v1_image(
v1_metadata = v2.synthesize_v1_image(
repo,
storage_map[rewritten_image.content_checksum],
rewritten_image.image_id,
@ -170,13 +170,13 @@ def _write_manifest(namespace_name, repo_name, manifest):
rewritten_image.compat_json,
rewritten_image.parent_image_id,
)
images_map[image.image_id] = image
except ManifestException as me:
raise ManifestInvalid(detail={'message': me.message})
# Store the manifest pointing to the tag.
leaf_layer_id = images_map[manifest.leaf_layer.v1_metadata.image_id].image_id
v2.save_manifest(namespace_name, repo_name, tag_name, leaf_layer_id, manifest.digest, manifest.bytes)
leaf_layer_id = rewritten_images[-1].image_id
v2.save_manifest(namespace_name, repo_name, manifest.tag, leaf_layer_id, manifest.digest,
manifest.bytes)
# Queue all blob manifests for replication.
# TODO(jschorr): Find a way to optimize this insertion.
@ -206,25 +206,19 @@ def _write_manifest(namespace_name, repo_name, manifest):
@process_registry_jwt_auth(scopes=['pull', 'push'])
@require_repo_write
@anon_protect
def delete_manifest_by_digest(namespace_name, repo_name, digest):
def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref):
"""
Delete the manifest specified by the digest.
Note: there is no equivalent method for deleting by tag name because it is
forbidden by the spec.
"""
tag = v2.get_tag_by_manifest_digest(namespace_name, repo_name, digest)
if tag is None:
# TODO(jzelinskie): disambiguate between no manifest and no tag
tags = v2.delete_manifest_by_digest(namespace_name, repo_name, manifest_ref)
if not tags:
raise ManifestUnknown()
# Mark the tag as no longer alive.
deleted = v2.delete_tag(namespace_name, repo_name, tag.name)
if not deleted:
# Tag was not alive.
raise ManifestUnknown()
track_and_log('delete_tag', tag.repository, tag=tag.name, digest=digest)
for tag in tags:
track_and_log('delete_tag', tag.repository, tag=tag.name, digest=manifest_ref)
return Response(status=202)

View file

@ -5,6 +5,7 @@ from endpoints.common import parse_repository_name
from endpoints.v2 import v2_bp, require_repo_read, paginate
from endpoints.v2.errors import NameUnknown
from endpoints.decorators import anon_protect
from data.interfaces import v2
@v2_bp.route('/<repopath:repository>/tags/list', methods=['GET'])
@parse_repository_name()

View file

@ -11,6 +11,7 @@ from auth.permissions import (ModifyRepositoryPermission, ReadRepositoryPermissi
CreateRepositoryPermission)
from endpoints.v2 import v2_bp
from endpoints.decorators import anon_protect
from data.interfaces import v2
from util.cache import no_cache
from util.names import parse_namespace_repository, REPOSITORY_NAME_REGEX
from util.security.registry_jwt import generate_bearer_token, build_context_and_subject