Add multipart upload retry to chunk uploads as well
This commit is contained in:
parent
7c3ef6781d
commit
233f55829e
1 changed files with 19 additions and 17 deletions
|
@ -219,7 +219,7 @@ class _CloudStorage(BaseStorageV2):
|
||||||
self._metric_queue.put_deprecated('MultipartUploadSuccess', 1)
|
self._metric_queue.put_deprecated('MultipartUploadSuccess', 1)
|
||||||
self._metric_queue.multipart_upload_end.Inc(labelvalues=['success'])
|
self._metric_queue.multipart_upload_end.Inc(labelvalues=['success'])
|
||||||
|
|
||||||
mp.complete_upload()
|
self._complete_s3_multipart_upload(mp, path)
|
||||||
|
|
||||||
return total_bytes_written, write_error
|
return total_bytes_written, write_error
|
||||||
|
|
||||||
|
@ -347,6 +347,23 @@ class _CloudStorage(BaseStorageV2):
|
||||||
msg = 'Failed to clean up chunk %s for reupload of %s'
|
msg = 'Failed to clean up chunk %s for reupload of %s'
|
||||||
logger.exception(msg, chunk.path, final_path)
|
logger.exception(msg, chunk.path, final_path)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _complete_s3_multipart_upload(self, mpu, path):
|
||||||
|
# Note: Sometimes Amazon S3 simply raises an internal error when trying to complete a
|
||||||
|
# multipart upload. The recommendation is to simply try calling complete_upload again.
|
||||||
|
for remaining_retries in range(3, -1, -1):
|
||||||
|
try:
|
||||||
|
mpu.complete_upload()
|
||||||
|
break
|
||||||
|
except S3ResponseError as s3re:
|
||||||
|
if remaining_retries and s3re.status == 200 and s3re.error_code == 'InternalError':
|
||||||
|
# Weird internal error case. Retry.
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Otherwise, raise it.
|
||||||
|
logger.exception('Exception trying to complete multipart upload for: %s', path)
|
||||||
|
raise s3re
|
||||||
|
|
||||||
def complete_chunked_upload(self, uuid, final_path, storage_metadata, force_client_side=False):
|
def complete_chunked_upload(self, uuid, final_path, storage_metadata, force_client_side=False):
|
||||||
self._initialize_cloud_conn()
|
self._initialize_cloud_conn()
|
||||||
chunk_list = self._chunk_list_from_metadata(storage_metadata)
|
chunk_list = self._chunk_list_from_metadata(storage_metadata)
|
||||||
|
@ -376,22 +393,7 @@ class _CloudStorage(BaseStorageV2):
|
||||||
mpu.copy_part_from_key(self.get_cloud_bucket().name, abs_chunk_path, part_num,
|
mpu.copy_part_from_key(self.get_cloud_bucket().name, abs_chunk_path, part_num,
|
||||||
start=0, end=chunk_end_offset_inclusive)
|
start=0, end=chunk_end_offset_inclusive)
|
||||||
|
|
||||||
# Note: Sometimes Amazon S3 simply raises an internal error when trying to complete a
|
self._complete_s3_multipart_upload(mpu, final_path)
|
||||||
# multipart upload. The recommendation is to simply try calling complete_upload again.
|
|
||||||
for remaining_retries in range(3, -1, -1):
|
|
||||||
try:
|
|
||||||
mpu.complete_upload()
|
|
||||||
break
|
|
||||||
except S3ResponseError as s3re:
|
|
||||||
if remaining_retries and s3re.status == 200 and s3re.error_code == 'InternalError':
|
|
||||||
# Weird internal error case. Retry.
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Otherwise, raise it.
|
|
||||||
logger.exception('Exception trying to complete multipart upload for: %s', final_path)
|
|
||||||
raise s3re
|
|
||||||
|
|
||||||
|
|
||||||
except IOError as ioe:
|
except IOError as ioe:
|
||||||
# Something bad happened, log it and then give up
|
# Something bad happened, log it and then give up
|
||||||
msg = 'Exception when attempting server-side assembly for: %s'
|
msg = 'Exception when attempting server-side assembly for: %s'
|
||||||
|
|
Reference in a new issue