diff --git a/storage/cloud.py b/storage/cloud.py index 3c28480dc..dd8cd2256 100644 --- a/storage/cloud.py +++ b/storage/cloud.py @@ -342,8 +342,8 @@ class _CloudStorage(BaseStorageV2): abs_chunk_path = self._init_path(chunk.path) part_num = chunk_offset + 1 chunk_end_offset_inclusive = chunk.length - 1 - mpu.copy_part_from_key(self.get_cloud_bucket().name, abs_chunk_path, part_num, 0, - chunk_end_offset_inclusive) + mpu.copy_part_from_key(self.get_cloud_bucket().name, abs_chunk_path, part_num, + start=0, end=chunk_end_offset_inclusive) mpu.complete_upload() except IOError as ioe: @@ -354,10 +354,9 @@ class _CloudStorage(BaseStorageV2): raise ioe else: - logger.warning('Performing client side assmebly of multi-part upload for: %s', final_path) - # We are going to turn all of the server side objects into a single file-like stream, and # pass that to stream_write to chunk and upload the final object. + logger.warning('Performing client side assmebly of multi-part upload for: %s', final_path) concatenated = filelike.FilelikeStreamConcat(self._chunk_generator(chunk_list)) self.stream_write(final_path, concatenated) @@ -472,3 +471,19 @@ class RadosGWStorage(_CloudStorage): return None return super(RadosGWStorage, self).get_direct_upload_url(path, mime_type, requires_cors) + + def complete_chunked_upload(self, uuid, final_path, storage_metadata): + self._initialize_cloud_conn() + + # RadosGW does not support multipart copying from keys, so are forced to join + # it all locally and then reupload. + # See https://github.com/ceph/ceph/pull/5139 + chunk_list = self._chunk_list_from_metadata(storage_metadata) + if len(chunk_list) == 1: + # If there's only one chunk, just "move" the key and call it a day. + chunk_path = chunk_list[0].path + self._cloud_bucket.copy_key(final_path, self._bucket_name, chunk_path) + self._cloud_bucket.delete_key(chunk_path) + return + concatenated = filelike.FilelikeStreamConcat(self._chunk_generator(chunk_list)) + self.stream_write(final_path, concatenated)