Fixes for backfill_aggregate_size script.
This commit is contained in:
parent
8baacd2741
commit
a887125c3f
1 changed files with 28 additions and 22 deletions
|
@ -1,44 +1,50 @@
|
|||
import logging
|
||||
|
||||
from data.database import ImageStorage, Image, db
|
||||
from data.database import ImageStorage, Image, db, db_for_update
|
||||
from app import app
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def backfill_aggregate_sizes():
|
||||
""" Generates aggregate sizes for any image storage entries without them """
|
||||
LOGGER.setLevel(logging.DEBUG)
|
||||
LOGGER.debug('Aggregate sizes backfill: Began execution')
|
||||
logger.debug('Aggregate sizes backfill: Began execution')
|
||||
while True:
|
||||
batch_storage_ids = list(ImageStorage
|
||||
.select(ImageStorage.id)
|
||||
.where(ImageStorage.aggregate_size >> None)
|
||||
.limit(10))
|
||||
batch_image_ids = list(Image
|
||||
.select(Image.id)
|
||||
.where(Image.aggregate_size >> None)
|
||||
.limit(100))
|
||||
|
||||
if len(batch_storage_ids) == 0:
|
||||
if len(batch_image_ids) == 0:
|
||||
# There are no storages left to backfill. We're done!
|
||||
LOGGER.debug('Aggregate sizes backfill: Backfill completed')
|
||||
logger.debug('Aggregate sizes backfill: Backfill completed')
|
||||
return
|
||||
|
||||
LOGGER.debug('Aggregate sizes backfill: Found %s records to update', len(batch_storage_ids))
|
||||
for image_storage_id in batch_storage_ids:
|
||||
LOGGER.debug('Updating image storage: %s', image_storage_id.id)
|
||||
logger.debug('Aggregate sizes backfill: Found %s records to update', len(batch_image_ids))
|
||||
for image_id in batch_image_ids:
|
||||
logger.debug('Updating image : %s', image_id.id)
|
||||
|
||||
with app.config['DB_TRANSACTION_FACTORY'](db):
|
||||
try:
|
||||
storage = ImageStorage.select().where(ImageStorage.id == image_storage_id.id).get()
|
||||
image = Image.select().where(Image.storage == storage).get()
|
||||
image = (Image
|
||||
.select(Image, ImageStorage)
|
||||
.join(ImageStorage)
|
||||
.where(Image.id == image_id)
|
||||
.get())
|
||||
|
||||
aggregate_size = image.storage.image_size
|
||||
|
||||
image_ids = image.ancestors.split('/')[1:-1]
|
||||
aggregate_size = storage.image_size
|
||||
for image_id in image_ids:
|
||||
current_image = Image.select().where(Image.id == image_id).join(ImageStorage)
|
||||
aggregate_size += image.storage.image_size
|
||||
to_add = db_for_update(Image
|
||||
.select(Image, ImageStorage)
|
||||
.join(ImageStorage)
|
||||
.where(Image.id == image_id)).get()
|
||||
aggregate_size += to_add.storage.image_size
|
||||
|
||||
storage.aggregate_size = aggregate_size
|
||||
storage.save()
|
||||
except ImageStorage.DoesNotExist:
|
||||
pass
|
||||
image.aggregate_size = aggregate_size
|
||||
image.save()
|
||||
except Image.DoesNotExist:
|
||||
pass
|
||||
|
||||
|
|
Reference in a new issue