Handle IOErrors in v2 uploads
This commit is contained in:
parent
35437c9f55
commit
2dcc1f13a6
6 changed files with 63 additions and 42 deletions
|
@ -166,17 +166,16 @@ class _CloudStorage(BaseStorageV2):
|
|||
**self._upload_params)
|
||||
|
||||
def stream_write(self, path, fp, content_type=None, content_encoding=None):
|
||||
return self._stream_write_internal(path, fp, content_type, content_encoding)
|
||||
self._stream_write_internal(path, fp, content_type, content_encoding)
|
||||
|
||||
def _stream_write_internal(self, path, fp, content_type=None, content_encoding=None,
|
||||
cancel_on_error=True, size=filelike.READ_UNTIL_END):
|
||||
error = None
|
||||
mp = self.__initiate_multipart_upload(path, content_type, content_encoding)
|
||||
|
||||
# We are going to reuse this but be VERY careful to only read the number of bytes written to it
|
||||
buf = StringIO.StringIO()
|
||||
|
||||
chunk_sizes = []
|
||||
|
||||
num_part = 1
|
||||
total_bytes_written = 0
|
||||
while size == filelike.READ_UNTIL_END or total_bytes_written < size:
|
||||
|
@ -192,26 +191,24 @@ class _CloudStorage(BaseStorageV2):
|
|||
if bytes_staged == 0:
|
||||
break
|
||||
|
||||
chunk_sizes.append(bytes_staged)
|
||||
|
||||
buf.seek(0)
|
||||
mp.upload_part_from_file(buf, num_part, size=bytes_staged)
|
||||
total_bytes_written += bytes_staged
|
||||
num_part += 1
|
||||
except IOError:
|
||||
except IOError as ex:
|
||||
logger.warn('stream write error: %s', ex)
|
||||
error = ex
|
||||
app.metric_queue.put('MultipartUploadFailure', 1)
|
||||
if cancel_on_error:
|
||||
mp.cancel_upload()
|
||||
return 0
|
||||
return 0, error
|
||||
else:
|
||||
break
|
||||
|
||||
if total_bytes_written > 0:
|
||||
app.metric_queue.put('MultipartUploadSuccess', 1)
|
||||
try:
|
||||
mp.complete_upload()
|
||||
except:
|
||||
logger.error('complete_upload failed: chunk_sizes: %s', chunk_sizes)
|
||||
raise
|
||||
return total_bytes_written
|
||||
mp.complete_upload()
|
||||
return total_bytes_written, error
|
||||
|
||||
def list_directory(self, path=None):
|
||||
self._initialize_cloud_conn()
|
||||
|
@ -307,8 +304,8 @@ class _CloudStorage(BaseStorageV2):
|
|||
|
||||
# We are going to upload each chunk to a separate key
|
||||
chunk_path = self._rel_upload_path(str(uuid4()))
|
||||
bytes_written = self._stream_write_internal(chunk_path, in_fp, cancel_on_error=False,
|
||||
size=length, content_type=content_type)
|
||||
bytes_written, error = self._stream_write_internal(chunk_path, in_fp, cancel_on_error=False,
|
||||
size=length, content_type=content_type)
|
||||
|
||||
new_metadata = copy.deepcopy(storage_metadata)
|
||||
|
||||
|
@ -316,7 +313,7 @@ class _CloudStorage(BaseStorageV2):
|
|||
if bytes_written > 0:
|
||||
new_metadata[_CHUNKS_KEY].append(_PartUploadMetadata(chunk_path, offset, bytes_written))
|
||||
|
||||
return bytes_written, new_metadata
|
||||
return bytes_written, new_metadata, error
|
||||
|
||||
def _chunk_generator(self, chunk_list):
|
||||
for chunk in chunk_list:
|
||||
|
@ -480,9 +477,12 @@ class GoogleCloudStorage(_CloudStorage):
|
|||
fp = filelike.StreamSlice(fp, 0, size)
|
||||
|
||||
# TODO figure out how to handle cancel_on_error=False
|
||||
key.set_contents_from_stream(fp)
|
||||
try:
|
||||
key.set_contents_from_stream(fp)
|
||||
except IOError as ex:
|
||||
return 0, ex
|
||||
|
||||
return key.size
|
||||
return key.size, None
|
||||
|
||||
def complete_chunked_upload(self, uuid, final_path, storage_metadata):
|
||||
self._initialize_cloud_conn()
|
||||
|
|
Reference in a new issue