Fix blob content types

Fixes #990
This commit is contained in:
Joseph Schorr 2015-11-30 15:45:45 -05:00
parent 94effb5aaa
commit ee0eb80c8f
6 changed files with 14 additions and 10 deletions

View file

@ -21,6 +21,7 @@ logger = logging.getLogger(__name__)
BASE_BLOB_ROUTE = '/<namespace>/<repo_name>/blobs/<regex("{0}"):digest>'
BLOB_DIGEST_ROUTE = BASE_BLOB_ROUTE.format(digest_tools.DIGEST_PATTERN)
RANGE_HEADER_REGEX = re.compile(r'^bytes=([0-9]+)-([0-9]+)$')
BLOB_CONTENT_TYPE = 'application/octet-stream'
class _InvalidRangeHeader(Exception):
@ -60,6 +61,7 @@ def check_blob_exists(namespace, repo_name, digest):
response = make_response('')
response.headers.extend(headers)
response.headers['Content-Length'] = found.image_size
response.headers['Content-Type'] = BLOB_CONTENT_TYPE
return response
@ -87,6 +89,7 @@ def download_blob(namespace, repo_name, digest):
database.close_db_filter(None)
headers['Content-Length'] = found.image_size
headers['Content-Type'] = BLOB_CONTENT_TYPE
return Response(storage.stream_read(found.locations, path), headers=headers)
@ -200,7 +203,8 @@ def _upload_chunk(namespace, repo_name, upload_uuid):
try:
length_written, new_metadata = storage.stream_upload_chunk({found.location.name}, upload_uuid,
start_offset, length, input_fp,
found.storage_metadata)
found.storage_metadata,
content_type=BLOB_CONTENT_TYPE)
except InvalidChunkException:
_range_not_satisfiable(found.byte_count)

View file

@ -122,7 +122,7 @@ class BaseStorageV2(BaseStorage):
"""
raise NotImplementedError
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata):
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata, content_type=None):
""" Upload the specified amount of data from the given file pointer to the chunked destination
specified, starting at the given offset. Returns the number of bytes uploaded, and a new
version of the storage_metadata. Raises InvalidChunkException if the offset or length can

View file

@ -294,13 +294,13 @@ class _CloudStorage(BaseStorageV2):
return random_uuid, metadata
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata):
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata, content_type=None):
self._initialize_cloud_conn()
# We are going to upload each chunk to a separate key
chunk_path = self._rel_upload_path(str(uuid4()))
bytes_written = self._stream_write_internal(chunk_path, in_fp, cancel_on_error=False,
size=length)
size=length, content_type=content_type)
new_metadata = copy.deepcopy(storage_metadata)

View file

@ -54,7 +54,7 @@ class FakeStorage(BaseStorageV2):
_FAKE_STORAGE_MAP[new_uuid].seek(0)
return new_uuid, {}
def stream_upload_chunk(self, uuid, offset, length, in_fp, _):
def stream_upload_chunk(self, uuid, offset, length, in_fp, _, content_type=None):
upload_storage = _FAKE_STORAGE_MAP[uuid]
upload_storage.seek(offset)
return self.stream_write_to_fp(in_fp, upload_storage, length), {}

View file

@ -105,7 +105,7 @@ class LocalStorage(BaseStorageV2):
return new_uuid, {}
def stream_upload_chunk(self, uuid, offset, length, in_fp, _):
def stream_upload_chunk(self, uuid, offset, length, in_fp, _, content_type=None):
with open(self._init_path(self._rel_upload_path(uuid)), 'r+b') as upload_storage:
upload_storage.seek(offset)
return self.stream_write_to_fp(in_fp, upload_storage, length), {}

View file

@ -268,7 +268,7 @@ class SwiftStorage(BaseStorage):
return random_uuid, metadata
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata):
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata, content_type=None):
if length == 0:
return 0, storage_metadata
@ -277,7 +277,7 @@ class SwiftStorage(BaseStorage):
total_bytes_written = 0
while True:
bytes_written, storage_metadata = self._stream_upload_segment(uuid, offset, length, in_fp,
storage_metadata)
storage_metadata, content_type)
if length != filelike.READ_UNTIL_END:
length = length - bytes_written
@ -287,7 +287,7 @@ class SwiftStorage(BaseStorage):
if bytes_written == 0 or length <= 0:
return total_bytes_written, storage_metadata
def _stream_upload_segment(self, uuid, offset, length, in_fp, storage_metadata):
def _stream_upload_segment(self, uuid, offset, length, in_fp, storage_metadata, content_type):
updated_metadata = copy.deepcopy(storage_metadata)
segment_count = len(updated_metadata[_SEGMENTS_KEY])
segment_path = '%s/%s/%s' % (_SEGMENT_DIRECTORY, uuid, segment_count)
@ -302,7 +302,7 @@ class SwiftStorage(BaseStorage):
limiting_fp = filelike.LimitingStream(in_fp, length)
# Write the segment to Swift.
self.stream_write(segment_path, limiting_fp)
self.stream_write(segment_path, limiting_fp, content_type)
# We are only going to track keys to which data was confirmed written.
bytes_written = limiting_fp.tell()