Merge pull request #360 from coreos-inc/python-registry-v2-smallfixes
Python registry v2 fixes
This commit is contained in:
commit
0dc0f23c5e
3 changed files with 23 additions and 22 deletions
|
@ -12,11 +12,10 @@ from util.cache import cache_control_flask_restful
|
||||||
|
|
||||||
|
|
||||||
def image_view(image, image_map, include_ancestors=True):
|
def image_view(image, image_map, include_ancestors=True):
|
||||||
extended_props = image
|
# TODO: Remove once we've migrated all storage data to the image records.
|
||||||
|
storage_props = image
|
||||||
if image.storage and image.storage.id:
|
if image.storage and image.storage.id:
|
||||||
extended_props = image.storage
|
storage_props = image.storage
|
||||||
|
|
||||||
command = extended_props.command
|
|
||||||
|
|
||||||
def docker_id(aid):
|
def docker_id(aid):
|
||||||
if not aid or not aid in image_map:
|
if not aid or not aid in image_map:
|
||||||
|
@ -24,12 +23,13 @@ def image_view(image, image_map, include_ancestors=True):
|
||||||
|
|
||||||
return image_map[aid].docker_image_id
|
return image_map[aid].docker_image_id
|
||||||
|
|
||||||
|
command = image.command or storage_props.command
|
||||||
image_data = {
|
image_data = {
|
||||||
'id': image.docker_image_id,
|
'id': image.docker_image_id,
|
||||||
'created': format_date(extended_props.created),
|
'created': format_date(image.created or storage_props.created),
|
||||||
'comment': extended_props.comment,
|
'comment': image.comment or storage_props.comment,
|
||||||
'command': json.loads(command) if command else None,
|
'command': json.loads(command) if command else None,
|
||||||
'size': extended_props.image_size,
|
'size': storage_props.image_size,
|
||||||
'uploading': image.storage.uploading,
|
'uploading': image.storage.uploading,
|
||||||
'sort_index': len(image.ancestors),
|
'sort_index': len(image.ancestors),
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermissi
|
||||||
AdministerRepositoryPermission)
|
AdministerRepositoryPermission)
|
||||||
from data import model
|
from data import model
|
||||||
from util.http import abort
|
from util.http import abort
|
||||||
|
from app import app
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -68,7 +69,10 @@ def v2_support_enabled():
|
||||||
response = make_response('true', 401)
|
response = make_response('true', 401)
|
||||||
realm_hostname = urlparse(request.url).netloc
|
realm_hostname = urlparse(request.url).netloc
|
||||||
realm_auth_path = url_for('v2.generate_registry_jwt')
|
realm_auth_path = url_for('v2.generate_registry_jwt')
|
||||||
authenticate = 'Bearer realm="{0}{1}",service="quay"'.format(realm_hostname, realm_auth_path)
|
scheme = app.config['PREFERRED_URL_SCHEME']
|
||||||
|
|
||||||
|
authenticate = 'Bearer realm="{0}://{1}{2}",service="quay"'.format(scheme, realm_hostname,
|
||||||
|
realm_auth_path)
|
||||||
response.headers['WWW-Authenticate'] = authenticate
|
response.headers['WWW-Authenticate'] = authenticate
|
||||||
|
|
||||||
response.headers['Docker-Distribution-API-Version'] = 'registry/2.0'
|
response.headers['Docker-Distribution-API-Version'] = 'registry/2.0'
|
||||||
|
|
|
@ -109,8 +109,8 @@ def download_blob(namespace, repo_name, digest):
|
||||||
return Response(storage.stream_read(found.locations, path), headers=headers)
|
return Response(storage.stream_read(found.locations, path), headers=headers)
|
||||||
|
|
||||||
|
|
||||||
def _render_range(end_byte):
|
def _render_range(num_uploaded_bytes, with_bytes_prefix=True):
|
||||||
return 'bytes=0-{0}'.format(end_byte)
|
return '{0}0-{1}'.format('bytes=' if with_bytes_prefix else '', num_uploaded_bytes - 1)
|
||||||
|
|
||||||
|
|
||||||
@v2_bp.route('/<namespace>/<repo_name>/blobs/uploads/', methods=['POST'])
|
@v2_bp.route('/<namespace>/<repo_name>/blobs/uploads/', methods=['POST'])
|
||||||
|
@ -133,7 +133,7 @@ def start_blob_upload(namespace, repo_name):
|
||||||
return accepted
|
return accepted
|
||||||
else:
|
else:
|
||||||
# The user plans to send us the entire body right now
|
# The user plans to send us the entire body right now
|
||||||
uploaded = _upload_chunk(namespace, repo_name, new_upload_uuid, range_required=False)
|
uploaded = _upload_chunk(namespace, repo_name, new_upload_uuid)
|
||||||
uploaded.save()
|
uploaded.save()
|
||||||
|
|
||||||
return _finish_upload(namespace, repo_name, uploaded, digest)
|
return _finish_upload(namespace, repo_name, uploaded, digest)
|
||||||
|
@ -184,7 +184,7 @@ def _parse_range_header(range_header_text, valid_start):
|
||||||
return (start, length)
|
return (start, length)
|
||||||
|
|
||||||
|
|
||||||
def _upload_chunk(namespace, repo_name, upload_uuid, range_required):
|
def _upload_chunk(namespace, repo_name, upload_uuid):
|
||||||
""" Common code among the various uploading paths for appending data to blobs.
|
""" Common code among the various uploading paths for appending data to blobs.
|
||||||
Callers MUST call .save() or .delete_instance() on the returned database object.
|
Callers MUST call .save() or .delete_instance() on the returned database object.
|
||||||
"""
|
"""
|
||||||
|
@ -195,10 +195,6 @@ def _upload_chunk(namespace, repo_name, upload_uuid, range_required):
|
||||||
|
|
||||||
start_offset, length = 0, -1
|
start_offset, length = 0, -1
|
||||||
range_header = request.headers.get('range', None)
|
range_header = request.headers.get('range', None)
|
||||||
|
|
||||||
if range_required and range_header is None:
|
|
||||||
_range_not_satisfiable(found.byte_count)
|
|
||||||
|
|
||||||
if range_header is not None:
|
if range_header is not None:
|
||||||
try:
|
try:
|
||||||
start_offset, length = _parse_range_header(range_header, found.byte_count)
|
start_offset, length = _parse_range_header(range_header, found.byte_count)
|
||||||
|
@ -208,11 +204,12 @@ def _upload_chunk(namespace, repo_name, upload_uuid, range_required):
|
||||||
input_fp = wrap_with_hash(get_input_stream(request), found.sha_state)
|
input_fp = wrap_with_hash(get_input_stream(request), found.sha_state)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
storage.stream_upload_chunk({found.location.name}, upload_uuid, start_offset, length, input_fp)
|
length_written = storage.stream_upload_chunk({found.location.name}, upload_uuid, start_offset,
|
||||||
|
length, input_fp)
|
||||||
except InvalidChunkException:
|
except InvalidChunkException:
|
||||||
_range_not_satisfiable(found.byte_count)
|
_range_not_satisfiable(found.byte_count)
|
||||||
|
|
||||||
found.byte_count += length
|
found.byte_count += length_written
|
||||||
return found
|
return found
|
||||||
|
|
||||||
|
|
||||||
|
@ -240,12 +237,12 @@ def _finish_upload(namespace, repo_name, upload_obj, expected_digest):
|
||||||
@require_repo_write
|
@require_repo_write
|
||||||
@anon_protect
|
@anon_protect
|
||||||
def upload_chunk(namespace, repo_name, upload_uuid):
|
def upload_chunk(namespace, repo_name, upload_uuid):
|
||||||
upload = _upload_chunk(namespace, repo_name, upload_uuid, range_required=True)
|
upload = _upload_chunk(namespace, repo_name, upload_uuid)
|
||||||
upload.save()
|
upload.save()
|
||||||
|
|
||||||
accepted = make_response('', 202)
|
accepted = make_response('', 204)
|
||||||
accepted.headers['Location'] = _current_request_path()
|
accepted.headers['Location'] = _current_request_path()
|
||||||
accepted.headers['Range'] = _render_range(upload.byte_count)
|
accepted.headers['Range'] = _render_range(upload.byte_count, with_bytes_prefix=False)
|
||||||
accepted.headers['Docker-Upload-UUID'] = upload_uuid
|
accepted.headers['Docker-Upload-UUID'] = upload_uuid
|
||||||
return accepted
|
return accepted
|
||||||
|
|
||||||
|
@ -259,7 +256,7 @@ def monolithic_upload_or_last_chunk(namespace, repo_name, upload_uuid):
|
||||||
if digest is None:
|
if digest is None:
|
||||||
raise BlobUploadInvalid()
|
raise BlobUploadInvalid()
|
||||||
|
|
||||||
found = _upload_chunk(namespace, repo_name, upload_uuid, range_required=False)
|
found = _upload_chunk(namespace, repo_name, upload_uuid)
|
||||||
return _finish_upload(namespace, repo_name, found, digest)
|
return _finish_upload(namespace, repo_name, found, digest)
|
||||||
|
|
||||||
|
|
||||||
|
|
Reference in a new issue