diff --git a/endpoints/api/image.py b/endpoints/api/image.py index 014bed412..0814e75e1 100644 --- a/endpoints/api/image.py +++ b/endpoints/api/image.py @@ -12,11 +12,10 @@ from util.cache import cache_control_flask_restful def image_view(image, image_map, include_ancestors=True): - extended_props = image + # TODO: Remove once we've migrated all storage data to the image records. + storage_props = image if image.storage and image.storage.id: - extended_props = image.storage - - command = extended_props.command + storage_props = image.storage def docker_id(aid): if not aid or not aid in image_map: @@ -24,12 +23,13 @@ def image_view(image, image_map, include_ancestors=True): return image_map[aid].docker_image_id + command = image.command or storage_props.command image_data = { 'id': image.docker_image_id, - 'created': format_date(extended_props.created), - 'comment': extended_props.comment, + 'created': format_date(image.created or storage_props.created), + 'comment': image.comment or storage_props.comment, 'command': json.loads(command) if command else None, - 'size': extended_props.image_size, + 'size': storage_props.image_size, 'uploading': image.storage.uploading, 'sort_index': len(image.ancestors), } diff --git a/endpoints/v2/__init__.py b/endpoints/v2/__init__.py index e0d49b70c..66e4a6cbd 100644 --- a/endpoints/v2/__init__.py +++ b/endpoints/v2/__init__.py @@ -15,6 +15,7 @@ from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermissi AdministerRepositoryPermission) from data import model from util.http import abort +from app import app @@ -68,7 +69,10 @@ def v2_support_enabled(): response = make_response('true', 401) realm_hostname = urlparse(request.url).netloc realm_auth_path = url_for('v2.generate_registry_jwt') - authenticate = 'Bearer realm="{0}{1}",service="quay"'.format(realm_hostname, realm_auth_path) + scheme = app.config['PREFERRED_URL_SCHEME'] + + authenticate = 'Bearer realm="{0}://{1}{2}",service="quay"'.format(scheme, realm_hostname, + realm_auth_path) response.headers['WWW-Authenticate'] = authenticate response.headers['Docker-Distribution-API-Version'] = 'registry/2.0' diff --git a/endpoints/v2/blob.py b/endpoints/v2/blob.py index 78d71ec58..013627d81 100644 --- a/endpoints/v2/blob.py +++ b/endpoints/v2/blob.py @@ -109,8 +109,8 @@ def download_blob(namespace, repo_name, digest): return Response(storage.stream_read(found.locations, path), headers=headers) -def _render_range(end_byte): - return 'bytes=0-{0}'.format(end_byte) +def _render_range(num_uploaded_bytes, with_bytes_prefix=True): + return '{0}0-{1}'.format('bytes=' if with_bytes_prefix else '', num_uploaded_bytes - 1) @v2_bp.route('///blobs/uploads/', methods=['POST']) @@ -133,7 +133,7 @@ def start_blob_upload(namespace, repo_name): return accepted else: # The user plans to send us the entire body right now - uploaded = _upload_chunk(namespace, repo_name, new_upload_uuid, range_required=False) + uploaded = _upload_chunk(namespace, repo_name, new_upload_uuid) uploaded.save() return _finish_upload(namespace, repo_name, uploaded, digest) @@ -184,7 +184,7 @@ def _parse_range_header(range_header_text, valid_start): return (start, length) -def _upload_chunk(namespace, repo_name, upload_uuid, range_required): +def _upload_chunk(namespace, repo_name, upload_uuid): """ Common code among the various uploading paths for appending data to blobs. Callers MUST call .save() or .delete_instance() on the returned database object. """ @@ -195,10 +195,6 @@ def _upload_chunk(namespace, repo_name, upload_uuid, range_required): start_offset, length = 0, -1 range_header = request.headers.get('range', None) - - if range_required and range_header is None: - _range_not_satisfiable(found.byte_count) - if range_header is not None: try: start_offset, length = _parse_range_header(range_header, found.byte_count) @@ -208,11 +204,12 @@ def _upload_chunk(namespace, repo_name, upload_uuid, range_required): input_fp = wrap_with_hash(get_input_stream(request), found.sha_state) try: - storage.stream_upload_chunk({found.location.name}, upload_uuid, start_offset, length, input_fp) + length_written = storage.stream_upload_chunk({found.location.name}, upload_uuid, start_offset, + length, input_fp) except InvalidChunkException: _range_not_satisfiable(found.byte_count) - found.byte_count += length + found.byte_count += length_written return found @@ -240,12 +237,12 @@ def _finish_upload(namespace, repo_name, upload_obj, expected_digest): @require_repo_write @anon_protect def upload_chunk(namespace, repo_name, upload_uuid): - upload = _upload_chunk(namespace, repo_name, upload_uuid, range_required=True) + upload = _upload_chunk(namespace, repo_name, upload_uuid) upload.save() - accepted = make_response('', 202) + accepted = make_response('', 204) accepted.headers['Location'] = _current_request_path() - accepted.headers['Range'] = _render_range(upload.byte_count) + accepted.headers['Range'] = _render_range(upload.byte_count, with_bytes_prefix=False) accepted.headers['Docker-Upload-UUID'] = upload_uuid return accepted @@ -259,7 +256,7 @@ def monolithic_upload_or_last_chunk(namespace, repo_name, upload_uuid): if digest is None: raise BlobUploadInvalid() - found = _upload_chunk(namespace, repo_name, upload_uuid, range_required=False) + found = _upload_chunk(namespace, repo_name, upload_uuid) return _finish_upload(namespace, repo_name, found, digest)