Hash v1 uploads for torrent chunks
This commit is contained in:
parent
44fcc7e44b
commit
8f80d7064b
6 changed files with 98 additions and 69 deletions
|
@ -2,7 +2,7 @@ import logging
|
|||
import dateutil.parser
|
||||
import random
|
||||
|
||||
from peewee import JOIN_LEFT_OUTER, fn, SQL
|
||||
from peewee import JOIN_LEFT_OUTER, SQL
|
||||
from datetime import datetime
|
||||
|
||||
from data.model import (DataModelException, db_transaction, _basequery, storage,
|
||||
|
@ -296,6 +296,8 @@ def find_create_or_link_image(docker_image_id, repo_obj, username, translations,
|
|||
|
||||
def set_image_metadata(docker_image_id, namespace_name, repository_name, created_date_str, comment,
|
||||
command, v1_json_metadata, parent=None):
|
||||
""" Sets metadata that is specific to how a binary piece of storage fits into the layer tree.
|
||||
"""
|
||||
with db_transaction():
|
||||
query = (Image
|
||||
.select(Image, ImageStorage)
|
||||
|
@ -322,6 +324,7 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
|
|||
# We cleanup any old checksum in case it's a retry after a fail
|
||||
fetched.v1_checksum = None
|
||||
fetched.storage.content_checksum = None
|
||||
fetched.storage.save()
|
||||
|
||||
fetched.comment = comment
|
||||
fetched.command = command
|
||||
|
@ -335,59 +338,6 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
|
|||
return fetched
|
||||
|
||||
|
||||
def set_image_size(docker_image_id, namespace_name, repository_name, image_size, uncompressed_size):
|
||||
if image_size is None:
|
||||
raise DataModelException('Empty image size field')
|
||||
|
||||
try:
|
||||
image = (Image
|
||||
.select(Image, ImageStorage)
|
||||
.join(Repository)
|
||||
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
||||
.switch(Image)
|
||||
.join(ImageStorage, JOIN_LEFT_OUTER)
|
||||
.where(Repository.name == repository_name, Namespace.username == namespace_name,
|
||||
Image.docker_image_id == docker_image_id)
|
||||
.get())
|
||||
except Image.DoesNotExist:
|
||||
raise DataModelException('No image with specified id and repository')
|
||||
|
||||
image.storage.image_size = image_size
|
||||
image.storage.uncompressed_size = uncompressed_size
|
||||
image.storage.save()
|
||||
|
||||
image.aggregate_size = calculate_image_aggregate_size(image.ancestors, image.storage,
|
||||
image.parent)
|
||||
image.save()
|
||||
|
||||
return image
|
||||
|
||||
|
||||
def calculate_image_aggregate_size(ancestors_str, image_storage, parent_image):
|
||||
ancestors = ancestors_str.split('/')[1:-1]
|
||||
if not ancestors:
|
||||
return image_storage.image_size
|
||||
|
||||
if parent_image is None:
|
||||
raise DataModelException('Could not load parent image')
|
||||
|
||||
ancestor_size = parent_image.aggregate_size
|
||||
if ancestor_size is not None:
|
||||
return ancestor_size + image_storage.image_size
|
||||
|
||||
# Fallback to a slower path if the parent doesn't have an aggregate size saved.
|
||||
# TODO: remove this code if/when we do a full backfill.
|
||||
ancestor_size = (ImageStorage
|
||||
.select(fn.Sum(ImageStorage.image_size))
|
||||
.join(Image)
|
||||
.where(Image.id << ancestors)
|
||||
.scalar())
|
||||
if ancestor_size is None:
|
||||
return None
|
||||
|
||||
return ancestor_size + image_storage.image_size
|
||||
|
||||
|
||||
def get_image(repo, docker_image_id):
|
||||
try:
|
||||
return Image.get(Image.docker_image_id == docker_image_id, Image.repository == repo)
|
||||
|
@ -452,7 +402,8 @@ def synthesize_v1_image(repo, image_storage, docker_image_id, created_date_str,
|
|||
pass
|
||||
|
||||
# Get the aggregate size for the image.
|
||||
aggregate_size = calculate_image_aggregate_size(ancestors, image_storage, parent_image)
|
||||
aggregate_size = _basequery.calculate_image_aggregate_size(ancestors, image_storage.image_size,
|
||||
parent_image)
|
||||
|
||||
return Image.create(docker_image_id=docker_image_id, ancestors=ancestors, comment=comment,
|
||||
command=command, v1_json_metadata=v1_json_metadata, created=created,
|
||||
|
|
Reference in a new issue