Implement some new methods on the storage engines.

This commit is contained in:
Jake Moshenko 2015-08-26 17:08:42 -04:00
parent 4fa37a46d1
commit 398202e6fc
10 changed files with 211 additions and 98 deletions

View file

@ -119,8 +119,8 @@ def _render_range(num_uploaded_bytes, with_bytes_prefix=True):
@anon_protect
def start_blob_upload(namespace, repo_name):
location_name = storage.preferred_locations[0]
new_upload_uuid = storage.initiate_chunked_upload(location_name)
model.blob.initiate_upload(namespace, repo_name, new_upload_uuid, location_name)
new_upload_uuid, upload_metadata = storage.initiate_chunked_upload(location_name)
model.blob.initiate_upload(namespace, repo_name, new_upload_uuid, location_name, upload_metadata)
digest = request.args.get('digest', None)
if digest is None:
@ -205,11 +205,13 @@ def _upload_chunk(namespace, repo_name, upload_uuid):
input_fp = wrap_with_handler(input_fp, found.sha_state.update)
try:
length_written = storage.stream_upload_chunk({found.location.name}, upload_uuid, start_offset,
length, input_fp)
length_written, new_metadata = storage.stream_upload_chunk({found.location.name}, upload_uuid,
start_offset, length, input_fp,
found.storage_metadata)
except InvalidChunkException:
_range_not_satisfiable(found.byte_count)
found.storage_metadata = new_metadata
found.byte_count += length_written
return found
@ -222,7 +224,8 @@ def _finish_upload(namespace, repo_name, upload_obj, expected_digest):
# Mark the blob as uploaded.
final_blob_location = digest_tools.content_path(expected_digest)
storage.complete_chunked_upload({upload_obj.location.name}, upload_obj.uuid, final_blob_location)
storage.complete_chunked_upload({upload_obj.location.name}, upload_obj.uuid, final_blob_location,
upload_obj.storage_metadata)
model.blob.store_blob_record_and_temp_link(namespace, repo_name, expected_digest,
upload_obj.location, upload_obj.byte_count,
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
@ -278,6 +281,6 @@ def cancel_upload(namespace, repo_name, upload_uuid):
# We delete the record for the upload first, since if the partial upload in
# storage fails to delete, it doesn't break anything
found.delete_instance()
storage.cancel_chunked_upload({found.location.name}, found.uuid)
storage.cancel_chunked_upload({found.location.name}, found.uuid, found.storage_metadata)
return make_response('', 204)