Merge pull request #643 from coreos-inc/nullimage

Check and handle NULL image_size
This commit is contained in:
josephschorr 2015-10-15 13:26:13 -04:00
commit d3857e509f
2 changed files with 21 additions and 15 deletions

View file

@ -304,6 +304,9 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
def set_image_size(docker_image_id, namespace_name, repository_name, image_size, uncompressed_size):
if image_size is None:
raise DataModelException('Empty image size field')
try:
image = (Image
.select(Image, ImageStorage)
@ -314,7 +317,6 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size,
.where(Repository.name == repository_name, Namespace.username == namespace_name,
Image.docker_image_id == docker_image_id)
.get())
except Image.DoesNotExist:
raise DataModelException('No image with specified id and repository')
@ -327,15 +329,17 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size,
# TODO(jschorr): Switch to this faster route once we have full ancestor aggregate_size
# parent_image = Image.get(Image.id == ancestors[-1])
# total_size = image_size + parent_image.storage.aggregate_size
total_size = (ImageStorage
.select(fn.Sum(ImageStorage.image_size))
.join(Image)
.where(Image.id << ancestors)
.scalar()) + image_size
ancestor_size = (ImageStorage
.select(fn.Sum(ImageStorage.image_size))
.join(Image)
.where(Image.id << ancestors)
.scalar())
# TODO stop writing to storage when all readers are removed
image.storage.aggregate_size = total_size
image.aggregate_size = total_size
if ancestor_size is not None:
total_size = ancestor_size + image_size
image.storage.aggregate_size = total_size
image.aggregate_size = total_size
except Image.DoesNotExist:
pass
else:

View file

@ -258,15 +258,15 @@ def put_image_layer(namespace, repository, image_id):
logger.exception('Exception when writing image data')
abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id)
# Save the size of the image.
model.image.set_image_size(image_id, namespace, repository, size_info.compressed_size,
size_info.uncompressed_size)
# Append the computed checksum.
csums = []
csums.append('sha256:{0}'.format(h.hexdigest()))
try:
# Save the size of the image.
model.image.set_image_size(image_id, namespace, repository, size_info.compressed_size,
size_info.uncompressed_size)
if requires_tarsum:
tmp.seek(0)
csums.append(checksums.compute_tarsum(tmp, json_data))
@ -331,9 +331,8 @@ def put_image_checksum(namespace, repository, image_id):
if not repo_image or not repo_image.storage:
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
uuid = repo_image.storage.uuid
logger.debug('Looking up repo layer data')
uuid = repo_image.storage.uuid
if not store.exists(repo_image.storage.locations, store.image_json_path(uuid)):
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
@ -384,7 +383,10 @@ def get_image_json(namespace, repository, image_id, headers):
logger.debug('Looking up repo layer size')
size = repo_image.storage.image_size
headers['X-Docker-Size'] = str(size)
if size is not None:
# Note: X-Docker-Size is optional and we *can* end up with a NULL image_size,
# so handle this case rather than failing.
headers['X-Docker-Size'] = str(size)
response = make_response(data, 200)
response.headers.extend(headers)