Add retry attempts for internal error on multipart upload

Fixes #1740
This commit is contained in:
Joseph Schorr 2016-08-18 11:56:23 -04:00
parent 8d7b600cf3
commit 0bc90ea45b

View file

@ -3,6 +3,7 @@ import os
import logging
import copy
from boto.exception import S3ResponseError
import boto.s3.connection
import boto.s3.multipart
import boto.gs.connection
@ -374,7 +375,22 @@ class _CloudStorage(BaseStorageV2):
chunk_end_offset_inclusive = chunk.length - 1
mpu.copy_part_from_key(self.get_cloud_bucket().name, abs_chunk_path, part_num,
start=0, end=chunk_end_offset_inclusive)
# Note: Sometimes Amazon S3 simply raises an internal error when trying to complete a
# multipart upload. The recommendation is to simply try calling complete_upload again.
for remaining_retries in range(3, -1, -1):
try:
mpu.complete_upload()
break
except S3ResponseError as s3re:
if remaining_retries and s3re.status == 200 and s3re.error_code == 'InternalError':
# Weird internal error case. Retry.
continue
# Otherwise, raise it.
logger.exception('Exception trying to complete multipart upload for: %s', final_path)
raise s3re
except IOError as ioe:
# Something bad happened, log it and then give up