Switch postgres to a non-transactional DDL to allow us to use peewee to modify data in migrations: enterprise customers are running postgres migrations offline already. Move the image backfill script back to a migration since it will now work. Unify the interface to sending a DB URI to env.py for the migration script.

This commit is contained in:
Jake Moshenko 2014-11-18 14:07:33 -05:00
parent 3815e9a293
commit 17fc72d262
6 changed files with 21 additions and 45 deletions

View file

@ -2,7 +2,4 @@
set -e set -e
# Run the database migration # Run the database migration
PYTHONPATH=. venv/bin/alembic upgrade head PYTHONPATH=. venv/bin/alembic upgrade head
# Run the uncompressed size migration
PYTHONPATH=. venv/bin/python -m util.uncompressedsize

View file

@ -13,24 +13,8 @@ from app import app
from data.model.sqlalchemybridge import gen_sqlalchemy_metadata from data.model.sqlalchemybridge import gen_sqlalchemy_metadata
from util.morecollections import AttrDict from util.morecollections import AttrDict
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
db_uri = unquote(app.config['DB_URI'])
if 'GENMIGRATE' in os.environ:
docker_host = os.environ.get('DOCKER_HOST')
docker_host_ip = docker_host[len('tcp://'):].split(':')[0]
if os.environ.get('GENMIGRATE') == 'mysql':
db_uri = 'mysql+pymysql://root:password@%s/genschema' % (docker_host_ip)
else:
db_uri = 'postgresql://postgres@%s/genschema' % (docker_host_ip)
if 'DB_URI' in os.environ:
db_uri = os.environ['DB_URI']
app.config['DB_URI'] = db_uri
config = context.config config = context.config
config.set_main_option('sqlalchemy.url', db_uri) config.set_main_option('sqlalchemy.url', unquote(app.config['DB_URI']))
# Interpret the config file for Python logging. # Interpret the config file for Python logging.
# This line sets up loggers basically. # This line sets up loggers basically.
@ -86,7 +70,8 @@ def run_migrations_online():
connection = engine.connect() connection = engine.connect()
context.configure( context.configure(
connection=connection, connection=connection,
target_metadata=target_metadata target_metadata=target_metadata,
transactional_ddl=False,
) )
try: try:

View file

@ -1,4 +1,8 @@
set -e set -e
DOCKER_IP=`echo $DOCKER_HOST | sed 's/tcp:\/\///' | sed 's/:.*//'`
MYSQL_CONFIG_OVERRIDE="{\"DB_URI\":\"mysql+pymysql://root:password@$DOCKER_IP/genschema\"}"
PGSQL_CONFIG_OVERRIDE="{\"DB_URI\":\"postgresql://postgres@$DOCKER_IP/genschema\"}"
up_mysql() { up_mysql() {
# Run a SQL database on port 3306 inside of Docker. # Run a SQL database on port 3306 inside of Docker.
@ -36,19 +40,19 @@ down_postgres() {
gen_migrate() { gen_migrate() {
# Generate a SQLite database with the schema as defined by the existing alembic model. # Generate a SQLite database with the schema as defined by the existing alembic model.
GENMIGRATE=$1 PYTHONPATH=. alembic upgrade head QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic upgrade head
# Generate the migration to the current model. # Generate the migration to the current model.
GENMIGRATE=$1 PYTHONPATH=. alembic revision --autogenerate -m "$2" QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic revision --autogenerate -m "$2"
} }
test_migrate() { test_migrate() {
# Generate a SQLite database with the schema as defined by the existing alembic model. # Generate a SQLite database with the schema as defined by the existing alembic model.
GENMIGRATE=$1 PYTHONPATH=. alembic upgrade head QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic upgrade head
# Downgrade to verify it works in both directions. # Downgrade to verify it works in both directions.
COUNT=`ls data/migrations/versions/*.py | wc -l | tr -d ' '` COUNT=`ls data/migrations/versions/*.py | wc -l | tr -d ' '`
GENMIGRATE=$1 PYTHONPATH=. alembic downgrade "-$COUNT" QUAY_OVERRIDE_CONFIG=$1 PYTHONPATH=. alembic downgrade "-$COUNT"
} }
# Test (and generate, if requested) via MySQL. # Test (and generate, if requested) via MySQL.
@ -59,13 +63,13 @@ if [ ! -z "$@" ]
then then
set +e set +e
echo '> Generating Migration' echo '> Generating Migration'
gen_migrate "mysql" "$@" gen_migrate $MYSQL_CONFIG_OVERRIDE "$@"
set -e set -e
fi fi
echo '> Testing Migration (mysql)' echo '> Testing Migration (mysql)'
set +e set +e
test_migrate "mysql" test_migrate $MYSQL_CONFIG_OVERRIDE
set -e set -e
down_mysql down_mysql
@ -75,8 +79,6 @@ up_postgres
echo '> Testing Migration (postgres)' echo '> Testing Migration (postgres)'
set +e set +e
test_migrate "postgres" test_migrate $PGSQL_CONFIG_OVERRIDE
set -e set -e
down_postgres down_postgres

View file

@ -16,9 +16,7 @@ from util.uncompressedsize import backfill_sizes_from_data
def upgrade(tables): def upgrade(tables):
# Note: Doing non-alembic operations inside alembic can cause a deadlock. This call has been backfill_sizes_from_data()
# moved to runmigration.sh.
pass
def downgrade(tables): def downgrade(tables):
pass pass

View file

@ -1,14 +1,14 @@
"""Translate the queue names to reference namespace by id, remove the namespace column. """Translate the queue names to reference namespace by id, remove the namespace column.
Revision ID: 2430f55c41d5 Revision ID: 2430f55c41d5
Revises: 9a1087b007d Revises: 313d297811c4
Create Date: 2014-09-30 17:31:33.308490 Create Date: 2014-09-30 17:31:33.308490
""" """
# revision identifiers, used by Alembic. # revision identifiers, used by Alembic.
revision = '2fb36d4be80d' revision = '2fb36d4be80d'
down_revision = '2430f55c41d5' down_revision = '313d297811c4'
from alembic import op from alembic import op
import sqlalchemy as sa import sqlalchemy as sa

View file

@ -1,9 +1,8 @@
import logging import logging
import zlib import zlib
import sys
from data import model from data import model
from data.database import ImageStorage, configure from data.database import ImageStorage
from app import app, storage as store from app import app, storage as store
from data.database import db, db_random_func from data.database import db, db_random_func
from util.gzipstream import ZLIB_GZIP_WINDOW from util.gzipstream import ZLIB_GZIP_WINDOW
@ -15,16 +14,11 @@ logger = logging.getLogger(__name__)
CHUNK_SIZE = 5 * 1024 * 1024 CHUNK_SIZE = 5 * 1024 * 1024
def backfill_sizes_from_data(): def backfill_sizes_from_data():
logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG)
logger.debug('Starting uncompressed image size backfill') logger.debug('Starting uncompressed image size backfill')
logger.debug('NOTE: This can be a LONG RUNNING OPERATION. Please wait!') logger.debug('NOTE: This can be a LONG RUNNING OPERATION. Please wait!')
# Make sure we have a reference to the current DB.
configure(app.config)
logger.debug('Uncompressed backfill: Database configured')
# Check for any uncompressed images. # Check for any uncompressed images.
has_images = bool(list(ImageStorage has_images = bool(list(ImageStorage
.select(ImageStorage.uuid) .select(ImageStorage.uuid)