Move the uncompressed image size migration call outside of alembic, since it will sometimes deadlock with certain kinds of DBs (because alembic is running things inside a transaction)
This commit is contained in:
parent
6adf4644b4
commit
29c30b336e
3 changed files with 28 additions and 18 deletions
|
@ -3,3 +3,6 @@ set -e
|
|||
|
||||
# Run the database migration
|
||||
PYTHONPATH=. venv/bin/alembic upgrade head
|
||||
|
||||
# Run the uncompressed size migration
|
||||
PYTHONPATH=. venv/bin/python -m util.uncompressedsize
|
|
@ -16,7 +16,9 @@ from util.uncompressedsize import backfill_sizes_from_data
|
|||
|
||||
|
||||
def upgrade(tables):
|
||||
backfill_sizes_from_data()
|
||||
# Note: Doing non-alembic operations inside alembic can cause a deadlock. This call has been
|
||||
# moved to runmigration.sh.
|
||||
pass
|
||||
|
||||
def downgrade(tables):
|
||||
pass
|
||||
|
|
|
@ -15,28 +15,29 @@ logger = logging.getLogger(__name__)
|
|||
CHUNK_SIZE = 5 * 1024 * 1024
|
||||
|
||||
def backfill_sizes_from_data():
|
||||
logger.setLevel(logging.DEBUG)
|
||||
logger.debug('Starting uncompressed image size backfill')
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
logger.debug('Starting uncompressed image size backfill with DBURI: %s', app.config['DB_URI'])
|
||||
logger.debug('NOTE: This can be a LONG RUNNING OPERATION. Please wait!')
|
||||
|
||||
# Make sure we have a reference to the current DB.
|
||||
configure(app.config)
|
||||
|
||||
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
|
||||
ch = logging.StreamHandler(sys.stdout)
|
||||
ch.setFormatter(formatter)
|
||||
logger.addHandler(ch)
|
||||
logger.debug('Uncompressed backfill: Database configured')
|
||||
|
||||
encountered = set()
|
||||
# Check for any uncompressed images.
|
||||
has_images = bool(list(ImageStorage
|
||||
.select(ImageStorage.uuid)
|
||||
.where(ImageStorage.uncompressed_size >> None,
|
||||
ImageStorage.uploading == False)
|
||||
.limit(1)))
|
||||
|
||||
# Try reading the ImageStorage table count. If it doesn't exist, then this is a postgres
|
||||
# initial setup migration and we can skip this step anyway.
|
||||
try:
|
||||
ImageStorage.select().count()
|
||||
except:
|
||||
logger.debug('Skipping migration for new setup')
|
||||
if not has_images:
|
||||
logger.debug('Uncompressed backfill: No migration needed')
|
||||
return
|
||||
|
||||
logger.debug('Uncompressed backfill: Starting migration')
|
||||
encountered = set()
|
||||
while True:
|
||||
# Load the record from the DB.
|
||||
batch_ids = list(ImageStorage
|
||||
|
@ -46,15 +47,19 @@ def backfill_sizes_from_data():
|
|||
.limit(100)
|
||||
.order_by(db_random_func()))
|
||||
|
||||
batch_ids = set(batch_ids) - encountered
|
||||
batch_ids = set([s.uuid for s in batch_ids]) - encountered
|
||||
logger.debug('Found %s images to process', len(batch_ids))
|
||||
if len(batch_ids) == 0:
|
||||
# We're done!
|
||||
return
|
||||
|
||||
for record in batch_ids:
|
||||
uuid = record.uuid
|
||||
counter = 1
|
||||
for uuid in batch_ids:
|
||||
encountered.add(uuid)
|
||||
|
||||
logger.debug('Processing image ID %s (%s/%s)', uuid, counter, len(batch_ids))
|
||||
counter = counter + 1
|
||||
|
||||
try:
|
||||
with_locs = model.get_storage_by_uuid(uuid)
|
||||
if with_locs.uncompressed_size is not None:
|
||||
|
|
Reference in a new issue