Fix blob content types

Fixes #990
This commit is contained in:
Joseph Schorr 2015-11-30 15:45:45 -05:00
parent 94effb5aaa
commit ee0eb80c8f
6 changed files with 14 additions and 10 deletions

View file

@ -122,7 +122,7 @@ class BaseStorageV2(BaseStorage):
"""
raise NotImplementedError
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata):
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata, content_type=None):
""" Upload the specified amount of data from the given file pointer to the chunked destination
specified, starting at the given offset. Returns the number of bytes uploaded, and a new
version of the storage_metadata. Raises InvalidChunkException if the offset or length can

View file

@ -294,13 +294,13 @@ class _CloudStorage(BaseStorageV2):
return random_uuid, metadata
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata):
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata, content_type=None):
self._initialize_cloud_conn()
# We are going to upload each chunk to a separate key
chunk_path = self._rel_upload_path(str(uuid4()))
bytes_written = self._stream_write_internal(chunk_path, in_fp, cancel_on_error=False,
size=length)
size=length, content_type=content_type)
new_metadata = copy.deepcopy(storage_metadata)

View file

@ -54,7 +54,7 @@ class FakeStorage(BaseStorageV2):
_FAKE_STORAGE_MAP[new_uuid].seek(0)
return new_uuid, {}
def stream_upload_chunk(self, uuid, offset, length, in_fp, _):
def stream_upload_chunk(self, uuid, offset, length, in_fp, _, content_type=None):
upload_storage = _FAKE_STORAGE_MAP[uuid]
upload_storage.seek(offset)
return self.stream_write_to_fp(in_fp, upload_storage, length), {}

View file

@ -105,7 +105,7 @@ class LocalStorage(BaseStorageV2):
return new_uuid, {}
def stream_upload_chunk(self, uuid, offset, length, in_fp, _):
def stream_upload_chunk(self, uuid, offset, length, in_fp, _, content_type=None):
with open(self._init_path(self._rel_upload_path(uuid)), 'r+b') as upload_storage:
upload_storage.seek(offset)
return self.stream_write_to_fp(in_fp, upload_storage, length), {}

View file

@ -268,7 +268,7 @@ class SwiftStorage(BaseStorage):
return random_uuid, metadata
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata):
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata, content_type=None):
if length == 0:
return 0, storage_metadata
@ -277,7 +277,7 @@ class SwiftStorage(BaseStorage):
total_bytes_written = 0
while True:
bytes_written, storage_metadata = self._stream_upload_segment(uuid, offset, length, in_fp,
storage_metadata)
storage_metadata, content_type)
if length != filelike.READ_UNTIL_END:
length = length - bytes_written
@ -287,7 +287,7 @@ class SwiftStorage(BaseStorage):
if bytes_written == 0 or length <= 0:
return total_bytes_written, storage_metadata
def _stream_upload_segment(self, uuid, offset, length, in_fp, storage_metadata):
def _stream_upload_segment(self, uuid, offset, length, in_fp, storage_metadata, content_type):
updated_metadata = copy.deepcopy(storage_metadata)
segment_count = len(updated_metadata[_SEGMENTS_KEY])
segment_path = '%s/%s/%s' % (_SEGMENT_DIRECTORY, uuid, segment_count)
@ -302,7 +302,7 @@ class SwiftStorage(BaseStorage):
limiting_fp = filelike.LimitingStream(in_fp, length)
# Write the segment to Swift.
self.stream_write(segment_path, limiting_fp)
self.stream_write(segment_path, limiting_fp, content_type)
# We are only going to track keys to which data was confirmed written.
bytes_written = limiting_fp.tell()