diff --git a/data/database.py b/data/database.py index aba62b88b..f7c4b8931 100644 --- a/data/database.py +++ b/data/database.py @@ -257,7 +257,7 @@ class EmailConfirmation(BaseModel): class ImageStorage(BaseModel): - uuid = CharField(default=uuid_generator) + uuid = CharField(default=uuid_generator, index=True) checksum = CharField(null=True) created = DateTimeField(null=True) comment = TextField(null=True) diff --git a/data/migrations/versions/201d55b38649_remove_fields_from_image_table_that_.py b/data/migrations/versions/201d55b38649_remove_fields_from_image_table_that_.py index d50c3a592..8185c1118 100644 --- a/data/migrations/versions/201d55b38649_remove_fields_from_image_table_that_.py +++ b/data/migrations/versions/201d55b38649_remove_fields_from_image_table_that_.py @@ -44,11 +44,11 @@ def downgrade(tables): op.create_index('notificationkind_name', 'notificationkind', ['name'], unique=False) op.drop_index('logentrykind_name', table_name='logentrykind') op.create_index('logentrykind_name', 'logentrykind', ['name'], unique=False) - op.add_column('image', sa.Column('created', mysql.DATETIME(), nullable=True)) - op.add_column('image', sa.Column('command', mysql.LONGTEXT(), nullable=True)) - op.add_column('image', sa.Column('image_size', mysql.BIGINT(display_width=20), nullable=True)) - op.add_column('image', sa.Column('checksum', mysql.VARCHAR(length=255), nullable=True)) - op.add_column('image', sa.Column('comment', mysql.LONGTEXT(), nullable=True)) + op.add_column('image', sa.Column('created', sa.DateTime(), nullable=True)) + op.add_column('image', sa.Column('command', sa.Text(), nullable=True)) + op.add_column('image', sa.Column('image_size', sa.BigInteger(), nullable=True)) + op.add_column('image', sa.Column('checksum', sa.String(length=255), nullable=True)) + op.add_column('image', sa.Column('comment', sa.Text(), nullable=True)) op.drop_index('buildtriggerservice_name', table_name='buildtriggerservice') op.create_index('buildtriggerservice_name', 'buildtriggerservice', ['name'], unique=False) ### end Alembic commands ### diff --git a/data/migrations/versions/3f4fe1194671_backfill_the_namespace_user_fields.py b/data/migrations/versions/3f4fe1194671_backfill_the_namespace_user_fields.py index 6f40f4fc0..4a1e2fe9d 100644 --- a/data/migrations/versions/3f4fe1194671_backfill_the_namespace_user_fields.py +++ b/data/migrations/versions/3f4fe1194671_backfill_the_namespace_user_fields.py @@ -16,8 +16,8 @@ import sqlalchemy as sa def upgrade(tables): conn = op.get_bind() - conn.execute('update repository set namespace_user_id = (select id from user where user.username = repository.namespace) where namespace_user_id is NULL') - + user_table_name_escaped = conn.dialect.identifier_preparer.format_table(tables['user']) + conn.execute('update repository set namespace_user_id = (select id from {0} where {0}.username = repository.namespace) where namespace_user_id is NULL'.format(user_table_name_escaped)) op.create_index('repository_namespace_user_id_name', 'repository', ['namespace_user_id', 'name'], unique=True) diff --git a/data/migrations/versions/9a1087b007d_allow_the_namespace_column_to_be_.py b/data/migrations/versions/9a1087b007d_allow_the_namespace_column_to_be_.py index 9b63ae190..a0726bf3b 100644 --- a/data/migrations/versions/9a1087b007d_allow_the_namespace_column_to_be_.py +++ b/data/migrations/versions/9a1087b007d_allow_the_namespace_column_to_be_.py @@ -22,7 +22,8 @@ def upgrade(tables): def downgrade(tables): conn = op.get_bind() - conn.execute('update repository set namespace = (select username from user where user.id = repository.namespace_user_id) where namespace is NULL') + user_table_name_escaped = conn.dialect.identifier_preparer.format_table(tables['user']) + conn.execute('update repository set namespace = (select username from {0} where {0}.id = repository.namespace_user_id) where namespace is NULL'.format(user_table_name_escaped)) op.create_index('repository_namespace_name', 'repository', ['namespace', 'name'], unique=True) op.alter_column('repository', 'namespace', nullable=False, existing_type=sa.String(length=255)) diff --git a/data/migrations/versions/b1d41e2071b_add_an_index_to_the_uuid_in_the_image_.py b/data/migrations/versions/b1d41e2071b_add_an_index_to_the_uuid_in_the_image_.py new file mode 100644 index 000000000..71a9df794 --- /dev/null +++ b/data/migrations/versions/b1d41e2071b_add_an_index_to_the_uuid_in_the_image_.py @@ -0,0 +1,22 @@ +"""Add an index to the uuid in the image storage table. + +Revision ID: b1d41e2071b +Revises: 9a1087b007d +Create Date: 2014-10-06 18:42:10.021235 + +""" + +# revision identifiers, used by Alembic. +revision = 'b1d41e2071b' +down_revision = '9a1087b007d' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(tables): + op.create_index('imagestorage_uuid', 'imagestorage', ['uuid'], unique=True) + + +def downgrade(tables): + op.drop_index('imagestorage_uuid', table_name='imagestorage') diff --git a/data/migrations/versions/f42b0ea7a4d_remove_the_old_webhooks_table.py b/data/migrations/versions/f42b0ea7a4d_remove_the_old_webhooks_table.py index 9ceab4218..5b3f6c812 100644 --- a/data/migrations/versions/f42b0ea7a4d_remove_the_old_webhooks_table.py +++ b/data/migrations/versions/f42b0ea7a4d_remove_the_old_webhooks_table.py @@ -23,13 +23,11 @@ def upgrade(tables): def downgrade(tables): ### commands auto generated by Alembic - please adjust! ### op.create_table('webhook', - sa.Column('id', mysql.INTEGER(display_width=11), nullable=False), - sa.Column('public_id', mysql.VARCHAR(length=255), nullable=False), - sa.Column('repository_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False), - sa.Column('parameters', mysql.LONGTEXT(), nullable=False), - sa.ForeignKeyConstraint(['repository_id'], [u'repository.id'], name=u'fk_webhook_repository_repository_id'), - sa.PrimaryKeyConstraint('id'), - mysql_default_charset=u'latin1', - mysql_engine=u'InnoDB' + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('public_id', sa.String(length=255), nullable=False), + sa.Column('repository_id', sa.Integer(), nullable=False), + sa.Column('parameters', sa.Text(), nullable=False), + sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ), + sa.PrimaryKeyConstraint('id') ) ### end Alembic commands ### diff --git a/static/directives/prototype-manager.html b/static/directives/prototype-manager.html index 7143b13b3..35e5b9c0c 100644 --- a/static/directives/prototype-manager.html +++ b/static/directives/prototype-manager.html @@ -3,7 +3,7 @@
- Default permissions provide a means of specifying additional permissions that should be granted automatically to a repository. + Default permissions provide a means of specifying additional permissions that should be granted automatically to a repository when it is created.
diff --git a/static/js/app.js b/static/js/app.js index ba3145116..14d697d19 100644 --- a/static/js/app.js +++ b/static/js/app.js @@ -1391,7 +1391,8 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading { 'name': 'notification_token', 'type': 'string', - 'title': 'Notification Token' + 'title': 'Room Notification Token', + 'help_url': 'https://hipchat.com/rooms/tokens/{room_id}' } ] }, diff --git a/tools/uncompressedsize.py b/tools/uncompressedsize.py index 608446bae..53bb74126 100644 --- a/tools/uncompressedsize.py +++ b/tools/uncompressedsize.py @@ -4,7 +4,7 @@ import zlib from data import model from data.database import ImageStorage from app import app, storage as store -from data.database import db +from data.database import db, db_random_func from util.gzipstream import ZLIB_GZIP_WINDOW @@ -17,49 +17,59 @@ CHUNK_SIZE = 5 * 1024 * 1024 def backfill_sizes_from_data(): while True: # Load the record from the DB. - try: - record = (ImageStorage - .select(ImageStorage.uuid) - .where(ImageStorage.uncompressed_size >> None, ImageStorage.uploading == False) - .get()) - except ImageStorage.DoesNotExist: + batch_ids = list(ImageStorage + .select(ImageStorage.uuid) + .where(ImageStorage.uncompressed_size >> None, + ImageStorage.uploading == False) + .limit(100) + .order_by(db_random_func())) + if len(batch_ids) == 0: # We're done! return - uuid = record.uuid + for record in batch_ids: + uuid = record.uuid - with_locations = model.get_storage_by_uuid(uuid) - - # Read the layer from backing storage and calculate the uncompressed size. - logger.debug('Loading data: %s (%s bytes)', uuid, with_locations.image_size) - decompressor = zlib.decompressobj(ZLIB_GZIP_WINDOW) - - uncompressed_size = 0 - with store.stream_read_file(with_locations.locations, store.image_layer_path(uuid)) as stream: - while True: - current_data = stream.read(CHUNK_SIZE) - if len(current_data) == 0: - break - - uncompressed_size += len(decompressor.decompress(current_data)) - - # Write the size to the image storage. We do so under a transaction AFTER checking to - # make sure the image storage still exists and has not changed. - logger.debug('Writing entry: %s. Size: %s', uuid, uncompressed_size) - with app.config['DB_TRANSACTION_FACTORY'](db): try: - current_record = model.get_storage_by_uuid(uuid) + with_locs = model.get_storage_by_uuid(uuid) + if with_locs.uncompressed_size is not None: + logger.debug('Somebody else already filled this in for us: %s', uuid) + continue + + # Read the layer from backing storage and calculate the uncompressed size. + logger.debug('Loading data: %s (%s bytes)', uuid, with_locs.image_size) + decompressor = zlib.decompressobj(ZLIB_GZIP_WINDOW) + + uncompressed_size = 0 + with store.stream_read_file(with_locs.locations, store.image_layer_path(uuid)) as stream: + while True: + current_data = stream.read(CHUNK_SIZE) + if len(current_data) == 0: + break + + uncompressed_size += len(decompressor.decompress(current_data)) + + # Write the size to the image storage. We do so under a transaction AFTER checking to + # make sure the image storage still exists and has not changed. + logger.debug('Writing entry: %s. Size: %s', uuid, uncompressed_size) + with app.config['DB_TRANSACTION_FACTORY'](db): + current_record = model.get_storage_by_uuid(uuid) + + if not current_record.uploading and current_record.uncompressed_size == None: + current_record.uncompressed_size = uncompressed_size + current_record.save() + else: + logger.debug('Somebody else already filled this in for us, after we did the work: %s', + uuid) + except model.InvalidImageException: logger.warning('Storage with uuid no longer exists: %s', uuid) - continue - - if not current_record.uploading and current_record.uncompressed_size == None: - current_record.uncompressed_size = uncompressed_size - current_record.save() - + except MemoryError: + logger.warning('MemoryError on %s', uuid) if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) logging.getLogger('boto').setLevel(logging.CRITICAL) + logging.getLogger('peewee').setLevel(logging.CRITICAL) backfill_sizes_from_data()