parent
e31dda35df
commit
fe79d5fb66
2 changed files with 21 additions and 15 deletions
|
@ -304,6 +304,9 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
|
||||||
|
|
||||||
|
|
||||||
def set_image_size(docker_image_id, namespace_name, repository_name, image_size, uncompressed_size):
|
def set_image_size(docker_image_id, namespace_name, repository_name, image_size, uncompressed_size):
|
||||||
|
if image_size is None:
|
||||||
|
raise DataModelException('Empty image size field')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
image = (Image
|
image = (Image
|
||||||
.select(Image, ImageStorage)
|
.select(Image, ImageStorage)
|
||||||
|
@ -314,7 +317,6 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size,
|
||||||
.where(Repository.name == repository_name, Namespace.username == namespace_name,
|
.where(Repository.name == repository_name, Namespace.username == namespace_name,
|
||||||
Image.docker_image_id == docker_image_id)
|
Image.docker_image_id == docker_image_id)
|
||||||
.get())
|
.get())
|
||||||
|
|
||||||
except Image.DoesNotExist:
|
except Image.DoesNotExist:
|
||||||
raise DataModelException('No image with specified id and repository')
|
raise DataModelException('No image with specified id and repository')
|
||||||
|
|
||||||
|
@ -327,15 +329,17 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size,
|
||||||
# TODO(jschorr): Switch to this faster route once we have full ancestor aggregate_size
|
# TODO(jschorr): Switch to this faster route once we have full ancestor aggregate_size
|
||||||
# parent_image = Image.get(Image.id == ancestors[-1])
|
# parent_image = Image.get(Image.id == ancestors[-1])
|
||||||
# total_size = image_size + parent_image.storage.aggregate_size
|
# total_size = image_size + parent_image.storage.aggregate_size
|
||||||
total_size = (ImageStorage
|
ancestor_size = (ImageStorage
|
||||||
.select(fn.Sum(ImageStorage.image_size))
|
.select(fn.Sum(ImageStorage.image_size))
|
||||||
.join(Image)
|
.join(Image)
|
||||||
.where(Image.id << ancestors)
|
.where(Image.id << ancestors)
|
||||||
.scalar()) + image_size
|
.scalar())
|
||||||
|
|
||||||
# TODO stop writing to storage when all readers are removed
|
# TODO stop writing to storage when all readers are removed
|
||||||
image.storage.aggregate_size = total_size
|
if ancestor_size is not None:
|
||||||
image.aggregate_size = total_size
|
total_size = ancestor_size + image_size
|
||||||
|
image.storage.aggregate_size = total_size
|
||||||
|
image.aggregate_size = total_size
|
||||||
except Image.DoesNotExist:
|
except Image.DoesNotExist:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -258,15 +258,15 @@ def put_image_layer(namespace, repository, image_id):
|
||||||
logger.exception('Exception when writing image data')
|
logger.exception('Exception when writing image data')
|
||||||
abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id)
|
abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id)
|
||||||
|
|
||||||
|
# Save the size of the image.
|
||||||
|
model.image.set_image_size(image_id, namespace, repository, size_info.compressed_size,
|
||||||
|
size_info.uncompressed_size)
|
||||||
|
|
||||||
# Append the computed checksum.
|
# Append the computed checksum.
|
||||||
csums = []
|
csums = []
|
||||||
csums.append('sha256:{0}'.format(h.hexdigest()))
|
csums.append('sha256:{0}'.format(h.hexdigest()))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Save the size of the image.
|
|
||||||
model.image.set_image_size(image_id, namespace, repository, size_info.compressed_size,
|
|
||||||
size_info.uncompressed_size)
|
|
||||||
|
|
||||||
if requires_tarsum:
|
if requires_tarsum:
|
||||||
tmp.seek(0)
|
tmp.seek(0)
|
||||||
csums.append(checksums.compute_tarsum(tmp, json_data))
|
csums.append(checksums.compute_tarsum(tmp, json_data))
|
||||||
|
@ -331,9 +331,8 @@ def put_image_checksum(namespace, repository, image_id):
|
||||||
if not repo_image or not repo_image.storage:
|
if not repo_image or not repo_image.storage:
|
||||||
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
|
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
|
||||||
|
|
||||||
uuid = repo_image.storage.uuid
|
|
||||||
|
|
||||||
logger.debug('Looking up repo layer data')
|
logger.debug('Looking up repo layer data')
|
||||||
|
uuid = repo_image.storage.uuid
|
||||||
if not store.exists(repo_image.storage.locations, store.image_json_path(uuid)):
|
if not store.exists(repo_image.storage.locations, store.image_json_path(uuid)):
|
||||||
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
|
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
|
||||||
|
|
||||||
|
@ -384,7 +383,10 @@ def get_image_json(namespace, repository, image_id, headers):
|
||||||
|
|
||||||
logger.debug('Looking up repo layer size')
|
logger.debug('Looking up repo layer size')
|
||||||
size = repo_image.storage.image_size
|
size = repo_image.storage.image_size
|
||||||
headers['X-Docker-Size'] = str(size)
|
if size is not None:
|
||||||
|
# Note: X-Docker-Size is optional and we *can* end up with a NULL image_size,
|
||||||
|
# so handle this case rather than failing.
|
||||||
|
headers['X-Docker-Size'] = str(size)
|
||||||
|
|
||||||
response = make_response(data, 200)
|
response = make_response(data, 200)
|
||||||
response.headers.extend(headers)
|
response.headers.extend(headers)
|
||||||
|
|
Reference in a new issue