add client side chunk join method

This commit is contained in:
Jimmy Zelinskie 2015-09-30 17:46:22 -04:00
parent ffeb99d4ee
commit 6ed5087a3c

View file

@ -318,6 +318,34 @@ class _CloudStorage(BaseStorageV2):
def _chunk_list_from_metadata(storage_metadata):
return [_PartUploadMetadata(*chunk_args) for chunk_args in storage_metadata[_CHUNKS_KEY]]
def _client_side_chunk_join(self, final_path, chunk_list):
# If there's only one chunk, just "move" (copy and delete) the key and call it a day.
if len(chunk_list) == 1:
chunk_path = chunk_list[0].path
# Let the copy raise an exception if it fails.
self._cloud_bucket.copy_key(final_path, self._bucket_name, chunk_path)
# Attempt to clean up the old chunk.
try:
self._cloud_bucket.delete_key(chunk_path)
except IOError:
# We failed to delete a chunk. This sucks, but we shouldn't fail the push.
msg = 'Failed to clean up chunk %s for move of %s'
logger.exception(msg, chunk_path, final_path)
else:
# Concatenate and write all the chunks as one key.
concatenated = filelike.FilelikeStreamConcat(self._chunk_generator(chunk_list))
self.stream_write(final_path, concatenated)
# Attempt to clean up all the chunks.
for chunk in chunk_list:
try:
self._cloud_bucket.delete_key(chunk.path)
except IOError:
# We failed to delete a chunk. This sucks, but we shouldn't fail the push.
msg = 'Failed to clean up chunk %s for reupload of %s'
logger.exception(msg, chunk.path, final_path)
def complete_chunked_upload(self, uuid, final_path, storage_metadata):
self._initialize_cloud_conn()
@ -356,9 +384,7 @@ class _CloudStorage(BaseStorageV2):
else:
# We are going to turn all of the server side objects into a single file-like stream, and
# pass that to stream_write to chunk and upload the final object.
logger.warning('Performing client side assmebly of multi-part upload for: %s', final_path)
concatenated = filelike.FilelikeStreamConcat(self._chunk_generator(chunk_list))
self.stream_write(final_path, concatenated)
self._client_side_chunk_join(final_path, chunk_list)
def cancel_chunked_upload(self, uuid, storage_metadata):
@ -475,15 +501,8 @@ class RadosGWStorage(_CloudStorage):
def complete_chunked_upload(self, uuid, final_path, storage_metadata):
self._initialize_cloud_conn()
# RadosGW does not support multipart copying from keys, so are forced to join
# RadosGW does not support multipart copying from keys, so we are forced to join
# it all locally and then reupload.
# See https://github.com/ceph/ceph/pull/5139
chunk_list = self._chunk_list_from_metadata(storage_metadata)
if len(chunk_list) == 1:
# If there's only one chunk, just "move" the key and call it a day.
chunk_path = chunk_list[0].path
self._cloud_bucket.copy_key(final_path, self._bucket_name, chunk_path)
self._cloud_bucket.delete_key(chunk_path)
return
concatenated = filelike.FilelikeStreamConcat(self._chunk_generator(chunk_list))
self.stream_write(final_path, concatenated)
self._client_side_chunk_join(final_path, chunk_list)