parent
4d6f96cd6c
commit
cbf7c2bf44
4 changed files with 58 additions and 55 deletions
|
@ -18,7 +18,6 @@ from util.cache import cache_control
|
|||
from util.registry.filelike import wrap_with_handler, StreamSlice
|
||||
from util.registry.gzipstream import calculate_size_handler
|
||||
from util.registry.torrent import PieceHasher
|
||||
from storage.basestorage import InvalidChunkException
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -134,12 +133,15 @@ def start_blob_upload(namespace_name, repo_name):
|
|||
return accepted
|
||||
else:
|
||||
# The user plans to send us the entire body right now
|
||||
uploaded, error = _upload_chunk(namespace_name, repo_name, new_upload_uuid)
|
||||
uploaded.save()
|
||||
if error:
|
||||
_range_not_satisfiable(uploaded.byte_count)
|
||||
blob_upload, upload_error = _upload_chunk(namespace_name, repo_name, new_upload_uuid)
|
||||
blob_upload.save()
|
||||
|
||||
return _finish_upload(namespace_name, repo_name, uploaded, digest)
|
||||
if upload_error:
|
||||
logger.error('Got error when uploading chunk for blob %s under repository %s/%s: %s',
|
||||
namespace_name, repo_name, new_upload_uuid, upload_error)
|
||||
_range_not_satisfiable(blob_upload.byte_count)
|
||||
|
||||
return _finish_upload(namespace_name, repo_name, blob_upload, digest)
|
||||
|
||||
|
||||
@v2_bp.route('/<repopath:repository>/blobs/uploads/<upload_uuid>', methods=['GET'])
|
||||
|
@ -192,6 +194,7 @@ def _parse_range_header(range_header_text):
|
|||
def _upload_chunk(namespace_name, repo_name, upload_uuid):
|
||||
""" Common code among the various uploading paths for appending data to blobs.
|
||||
Callers MUST call .save() or .delete_instance() on the returned database object.
|
||||
Returns the BlobUpload object and the error that occurred, if any (or None if none).
|
||||
"""
|
||||
try:
|
||||
found = model.blob.get_blob_upload(namespace_name, repo_name, upload_uuid)
|
||||
|
@ -211,6 +214,7 @@ def _upload_chunk(namespace_name, repo_name, upload_uuid):
|
|||
|
||||
location_set = {found.location.name}
|
||||
|
||||
upload_error = None
|
||||
with database.CloseForLongOperation(app.config):
|
||||
input_fp = get_input_stream(request)
|
||||
|
||||
|
@ -227,7 +231,7 @@ def _upload_chunk(namespace_name, repo_name, upload_uuid):
|
|||
# We use this to escape early in case we have already processed all of the bytes the user
|
||||
# wants to upload
|
||||
if length == 0:
|
||||
return found
|
||||
return found, None
|
||||
|
||||
input_fp = wrap_with_handler(input_fp, found.sha_state.update)
|
||||
|
||||
|
@ -252,14 +256,10 @@ def _upload_chunk(namespace_name, repo_name, upload_uuid):
|
|||
size_info, fn = calculate_size_handler()
|
||||
input_fp = wrap_with_handler(input_fp, fn)
|
||||
|
||||
try:
|
||||
length_written, new_metadata, error = storage.stream_upload_chunk(location_set, upload_uuid,
|
||||
start_offset, length,
|
||||
input_fp,
|
||||
found.storage_metadata,
|
||||
content_type=BLOB_CONTENT_TYPE)
|
||||
except InvalidChunkException:
|
||||
_range_not_satisfiable(found.byte_count)
|
||||
chunk_result = storage.stream_upload_chunk(location_set, upload_uuid, start_offset, length,
|
||||
input_fp, found.storage_metadata,
|
||||
content_type=BLOB_CONTENT_TYPE)
|
||||
length_written, new_metadata, upload_error = chunk_result
|
||||
|
||||
# If we determined an uncompressed size and this is the first chunk, add it to the blob.
|
||||
# Otherwise, we clear the size from the blob as it was uploaded in multiple chunks.
|
||||
|
@ -277,7 +277,7 @@ def _upload_chunk(namespace_name, repo_name, upload_uuid):
|
|||
found.storage_metadata = new_metadata
|
||||
found.byte_count += length_written
|
||||
found.chunk_count += 1
|
||||
return found, error
|
||||
return found, upload_error
|
||||
|
||||
|
||||
def _finish_upload(namespace_name, repo_name, upload_obj, expected_digest):
|
||||
|
@ -330,15 +330,17 @@ def _finish_upload(namespace_name, repo_name, upload_obj, expected_digest):
|
|||
@require_repo_write
|
||||
@anon_protect
|
||||
def upload_chunk(namespace_name, repo_name, upload_uuid):
|
||||
upload, error = _upload_chunk(namespace_name, repo_name, upload_uuid)
|
||||
upload.save()
|
||||
blob_upload, upload_error = _upload_chunk(namespace_name, repo_name, upload_uuid)
|
||||
blob_upload.save()
|
||||
|
||||
if error:
|
||||
_range_not_satisfiable(upload.byte_count)
|
||||
if upload_error:
|
||||
logger.error('Got error when uploading chunk for blob %s under repository %s/%s: %s',
|
||||
namespace_name, repo_name, upload_uuid, upload_error)
|
||||
_range_not_satisfiable(blob_upload.byte_count)
|
||||
|
||||
accepted = make_response('', 204)
|
||||
accepted.headers['Location'] = _current_request_path()
|
||||
accepted.headers['Range'] = _render_range(upload.byte_count, with_bytes_prefix=False)
|
||||
accepted.headers['Range'] = _render_range(blob_upload.byte_count, with_bytes_prefix=False)
|
||||
accepted.headers['Docker-Upload-UUID'] = upload_uuid
|
||||
return accepted
|
||||
|
||||
|
@ -353,13 +355,15 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
|
|||
if digest is None:
|
||||
raise BlobUploadInvalid()
|
||||
|
||||
found, error = _upload_chunk(namespace_name, repo_name, upload_uuid)
|
||||
blob_upload, upload_error = _upload_chunk(namespace_name, repo_name, upload_uuid)
|
||||
blob_upload.save()
|
||||
|
||||
if error:
|
||||
found.save()
|
||||
_range_not_satisfiable(found.byte_count)
|
||||
if upload_error:
|
||||
logger.error('Got error when uploading chunk for blob %s under repository %s/%s: %s',
|
||||
namespace_name, repo_name, upload_uuid, upload_error)
|
||||
_range_not_satisfiable(blob_upload.byte_count)
|
||||
|
||||
return _finish_upload(namespace_name, repo_name, found, digest)
|
||||
return _finish_upload(namespace_name, repo_name, blob_upload, digest)
|
||||
|
||||
|
||||
@v2_bp.route('/<repopath:repository>/blobs/uploads/<upload_uuid>', methods=['DELETE'])
|
||||
|
|
Reference in a new issue