Merge remote-tracking branch 'origin/master' into nomenclature
This commit is contained in:
commit
ee9973a395
9 changed files with 85 additions and 53 deletions
|
@ -257,7 +257,7 @@ class EmailConfirmation(BaseModel):
|
|||
|
||||
|
||||
class ImageStorage(BaseModel):
|
||||
uuid = CharField(default=uuid_generator)
|
||||
uuid = CharField(default=uuid_generator, index=True)
|
||||
checksum = CharField(null=True)
|
||||
created = DateTimeField(null=True)
|
||||
comment = TextField(null=True)
|
||||
|
|
|
@ -44,11 +44,11 @@ def downgrade(tables):
|
|||
op.create_index('notificationkind_name', 'notificationkind', ['name'], unique=False)
|
||||
op.drop_index('logentrykind_name', table_name='logentrykind')
|
||||
op.create_index('logentrykind_name', 'logentrykind', ['name'], unique=False)
|
||||
op.add_column('image', sa.Column('created', mysql.DATETIME(), nullable=True))
|
||||
op.add_column('image', sa.Column('command', mysql.LONGTEXT(), nullable=True))
|
||||
op.add_column('image', sa.Column('image_size', mysql.BIGINT(display_width=20), nullable=True))
|
||||
op.add_column('image', sa.Column('checksum', mysql.VARCHAR(length=255), nullable=True))
|
||||
op.add_column('image', sa.Column('comment', mysql.LONGTEXT(), nullable=True))
|
||||
op.add_column('image', sa.Column('created', sa.DateTime(), nullable=True))
|
||||
op.add_column('image', sa.Column('command', sa.Text(), nullable=True))
|
||||
op.add_column('image', sa.Column('image_size', sa.BigInteger(), nullable=True))
|
||||
op.add_column('image', sa.Column('checksum', sa.String(length=255), nullable=True))
|
||||
op.add_column('image', sa.Column('comment', sa.Text(), nullable=True))
|
||||
op.drop_index('buildtriggerservice_name', table_name='buildtriggerservice')
|
||||
op.create_index('buildtriggerservice_name', 'buildtriggerservice', ['name'], unique=False)
|
||||
### end Alembic commands ###
|
||||
|
|
|
@ -16,8 +16,8 @@ import sqlalchemy as sa
|
|||
|
||||
def upgrade(tables):
|
||||
conn = op.get_bind()
|
||||
conn.execute('update repository set namespace_user_id = (select id from user where user.username = repository.namespace) where namespace_user_id is NULL')
|
||||
|
||||
user_table_name_escaped = conn.dialect.identifier_preparer.format_table(tables['user'])
|
||||
conn.execute('update repository set namespace_user_id = (select id from {0} where {0}.username = repository.namespace) where namespace_user_id is NULL'.format(user_table_name_escaped))
|
||||
op.create_index('repository_namespace_user_id_name', 'repository', ['namespace_user_id', 'name'], unique=True)
|
||||
|
||||
|
||||
|
|
|
@ -22,7 +22,8 @@ def upgrade(tables):
|
|||
|
||||
def downgrade(tables):
|
||||
conn = op.get_bind()
|
||||
conn.execute('update repository set namespace = (select username from user where user.id = repository.namespace_user_id) where namespace is NULL')
|
||||
user_table_name_escaped = conn.dialect.identifier_preparer.format_table(tables['user'])
|
||||
conn.execute('update repository set namespace = (select username from {0} where {0}.id = repository.namespace_user_id) where namespace is NULL'.format(user_table_name_escaped))
|
||||
|
||||
op.create_index('repository_namespace_name', 'repository', ['namespace', 'name'], unique=True)
|
||||
op.alter_column('repository', 'namespace', nullable=False, existing_type=sa.String(length=255))
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
"""Add an index to the uuid in the image storage table.
|
||||
|
||||
Revision ID: b1d41e2071b
|
||||
Revises: 9a1087b007d
|
||||
Create Date: 2014-10-06 18:42:10.021235
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'b1d41e2071b'
|
||||
down_revision = '9a1087b007d'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade(tables):
|
||||
op.create_index('imagestorage_uuid', 'imagestorage', ['uuid'], unique=True)
|
||||
|
||||
|
||||
def downgrade(tables):
|
||||
op.drop_index('imagestorage_uuid', table_name='imagestorage')
|
|
@ -23,13 +23,11 @@ def upgrade(tables):
|
|||
def downgrade(tables):
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('webhook',
|
||||
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
|
||||
sa.Column('public_id', mysql.VARCHAR(length=255), nullable=False),
|
||||
sa.Column('repository_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False),
|
||||
sa.Column('parameters', mysql.LONGTEXT(), nullable=False),
|
||||
sa.ForeignKeyConstraint(['repository_id'], [u'repository.id'], name=u'fk_webhook_repository_repository_id'),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
mysql_default_charset=u'latin1',
|
||||
mysql_engine=u'InnoDB'
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('public_id', sa.String(length=255), nullable=False),
|
||||
sa.Column('repository_id', sa.Integer(), nullable=False),
|
||||
sa.Column('parameters', sa.Text(), nullable=False),
|
||||
sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
### end Alembic commands ###
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
<div class="container" ng-show="!loading">
|
||||
<div class="alert alert-info">
|
||||
Default permissions provide a means of specifying <span class="context-tooltip" data-title="By default, all repositories have the creating user added as an 'Admin'" bs-tooltip="tooltip.title">additional</span> permissions that should be granted automatically to a repository.
|
||||
Default permissions provide a means of specifying <span class="context-tooltip" data-title="By default, all repositories have the creating user added as an 'Admin'" bs-tooltip="tooltip.title">additional</span> permissions that should be granted automatically to a repository <strong>when it is created</strong>.
|
||||
</div>
|
||||
|
||||
<div class="side-controls">
|
||||
|
|
|
@ -1391,7 +1391,8 @@ quayApp = angular.module('quay', quayDependencies, function($provide, cfpLoading
|
|||
{
|
||||
'name': 'notification_token',
|
||||
'type': 'string',
|
||||
'title': 'Notification Token'
|
||||
'title': 'Room Notification Token',
|
||||
'help_url': 'https://hipchat.com/rooms/tokens/{room_id}'
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
@ -4,7 +4,7 @@ import zlib
|
|||
from data import model
|
||||
from data.database import ImageStorage
|
||||
from app import app, storage as store
|
||||
from data.database import db
|
||||
from data.database import db, db_random_func
|
||||
from util.gzipstream import ZLIB_GZIP_WINDOW
|
||||
|
||||
|
||||
|
@ -17,49 +17,59 @@ CHUNK_SIZE = 5 * 1024 * 1024
|
|||
def backfill_sizes_from_data():
|
||||
while True:
|
||||
# Load the record from the DB.
|
||||
try:
|
||||
record = (ImageStorage
|
||||
.select(ImageStorage.uuid)
|
||||
.where(ImageStorage.uncompressed_size >> None, ImageStorage.uploading == False)
|
||||
.get())
|
||||
except ImageStorage.DoesNotExist:
|
||||
batch_ids = list(ImageStorage
|
||||
.select(ImageStorage.uuid)
|
||||
.where(ImageStorage.uncompressed_size >> None,
|
||||
ImageStorage.uploading == False)
|
||||
.limit(100)
|
||||
.order_by(db_random_func()))
|
||||
if len(batch_ids) == 0:
|
||||
# We're done!
|
||||
return
|
||||
|
||||
uuid = record.uuid
|
||||
for record in batch_ids:
|
||||
uuid = record.uuid
|
||||
|
||||
with_locations = model.get_storage_by_uuid(uuid)
|
||||
|
||||
# Read the layer from backing storage and calculate the uncompressed size.
|
||||
logger.debug('Loading data: %s (%s bytes)', uuid, with_locations.image_size)
|
||||
decompressor = zlib.decompressobj(ZLIB_GZIP_WINDOW)
|
||||
|
||||
uncompressed_size = 0
|
||||
with store.stream_read_file(with_locations.locations, store.image_layer_path(uuid)) as stream:
|
||||
while True:
|
||||
current_data = stream.read(CHUNK_SIZE)
|
||||
if len(current_data) == 0:
|
||||
break
|
||||
|
||||
uncompressed_size += len(decompressor.decompress(current_data))
|
||||
|
||||
# Write the size to the image storage. We do so under a transaction AFTER checking to
|
||||
# make sure the image storage still exists and has not changed.
|
||||
logger.debug('Writing entry: %s. Size: %s', uuid, uncompressed_size)
|
||||
with app.config['DB_TRANSACTION_FACTORY'](db):
|
||||
try:
|
||||
current_record = model.get_storage_by_uuid(uuid)
|
||||
with_locs = model.get_storage_by_uuid(uuid)
|
||||
if with_locs.uncompressed_size is not None:
|
||||
logger.debug('Somebody else already filled this in for us: %s', uuid)
|
||||
continue
|
||||
|
||||
# Read the layer from backing storage and calculate the uncompressed size.
|
||||
logger.debug('Loading data: %s (%s bytes)', uuid, with_locs.image_size)
|
||||
decompressor = zlib.decompressobj(ZLIB_GZIP_WINDOW)
|
||||
|
||||
uncompressed_size = 0
|
||||
with store.stream_read_file(with_locs.locations, store.image_layer_path(uuid)) as stream:
|
||||
while True:
|
||||
current_data = stream.read(CHUNK_SIZE)
|
||||
if len(current_data) == 0:
|
||||
break
|
||||
|
||||
uncompressed_size += len(decompressor.decompress(current_data))
|
||||
|
||||
# Write the size to the image storage. We do so under a transaction AFTER checking to
|
||||
# make sure the image storage still exists and has not changed.
|
||||
logger.debug('Writing entry: %s. Size: %s', uuid, uncompressed_size)
|
||||
with app.config['DB_TRANSACTION_FACTORY'](db):
|
||||
current_record = model.get_storage_by_uuid(uuid)
|
||||
|
||||
if not current_record.uploading and current_record.uncompressed_size == None:
|
||||
current_record.uncompressed_size = uncompressed_size
|
||||
current_record.save()
|
||||
else:
|
||||
logger.debug('Somebody else already filled this in for us, after we did the work: %s',
|
||||
uuid)
|
||||
|
||||
except model.InvalidImageException:
|
||||
logger.warning('Storage with uuid no longer exists: %s', uuid)
|
||||
continue
|
||||
|
||||
if not current_record.uploading and current_record.uncompressed_size == None:
|
||||
current_record.uncompressed_size = uncompressed_size
|
||||
current_record.save()
|
||||
|
||||
except MemoryError:
|
||||
logger.warning('MemoryError on %s', uuid)
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('peewee').setLevel(logging.CRITICAL)
|
||||
|
||||
backfill_sizes_from_data()
|
||||
|
|
Reference in a new issue