Allow the namespace column to be null, and also non-unique. Fix the uncompressed size clobbering the size on the wire field. Add metadata constraints so that foreign key constraints get predictable names. Fix all downgrade migrations.
This commit is contained in:
parent
2c5cc7990f
commit
5c18ffe67d
9 changed files with 53 additions and 28 deletions
|
@ -22,4 +22,5 @@ def upgrade(tables):
|
|||
|
||||
|
||||
def downgrade(tables):
|
||||
op.drop_constraint('fk_repository_namespace_user_id_user', table_name='repository', type_='foreignkey')
|
||||
op.drop_index('repository_namespace_user_id_name', table_name='repository')
|
||||
|
|
|
@ -74,8 +74,5 @@ def downgrade(tables):
|
|||
.where(tables.notificationkind.c.name == op.inline_literal('org_team_invite')))
|
||||
)
|
||||
|
||||
op.drop_index('teammemberinvite_user_id', table_name='teammemberinvite')
|
||||
op.drop_index('teammemberinvite_team_id', table_name='teammemberinvite')
|
||||
op.drop_index('teammemberinvite_inviter_id', table_name='teammemberinvite')
|
||||
op.drop_table('teammemberinvite')
|
||||
### end Alembic commands ###
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
"""Allow the namespace column to be nullable.
|
||||
|
||||
Revision ID: 9a1087b007d
|
||||
Revises: 3f4fe1194671
|
||||
Create Date: 2014-10-01 16:11:21.277226
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '9a1087b007d'
|
||||
down_revision = '3f4fe1194671'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade(tables):
|
||||
op.drop_index('repository_namespace_name', table_name='repository')
|
||||
op.alter_column('repository', 'namespace', nullable=True, existing_type=sa.String(length=255),
|
||||
server_default=sa.text('NULL'))
|
||||
|
||||
|
||||
def downgrade(tables):
|
||||
conn = op.get_bind()
|
||||
conn.execute('update repository set namespace = (select username from user where user.id = repository.namespace_user_id) where namespace is NULL')
|
||||
|
||||
op.create_index('repository_namespace_name', 'repository', ['namespace', 'name'], unique=True)
|
||||
op.alter_column('repository', 'namespace', nullable=False, existing_type=sa.String(length=255))
|
|
@ -1223,8 +1223,7 @@ def get_storage_by_uuid(storage_uuid):
|
|||
return found
|
||||
|
||||
|
||||
def set_image_size(docker_image_id, namespace_name, repository_name,
|
||||
image_size):
|
||||
def set_image_size(docker_image_id, namespace_name, repository_name, image_size, uncompressed_size):
|
||||
try:
|
||||
image = (Image
|
||||
.select(Image, ImageStorage)
|
||||
|
@ -1233,18 +1232,15 @@ def set_image_size(docker_image_id, namespace_name, repository_name,
|
|||
.switch(Image)
|
||||
.join(ImageStorage, JOIN_LEFT_OUTER)
|
||||
.where(Repository.name == repository_name, Namespace.username == namespace_name,
|
||||
Image.docker_image_id == docker_image_id)
|
||||
Image.docker_image_id == docker_image_id)
|
||||
.get())
|
||||
|
||||
except Image.DoesNotExist:
|
||||
raise DataModelException('No image with specified id and repository')
|
||||
|
||||
if image.storage and image.storage.id:
|
||||
image.storage.image_size = image_size
|
||||
image.storage.save()
|
||||
else:
|
||||
image.image_size = image_size
|
||||
image.save()
|
||||
image.storage.image_size = image_size
|
||||
image.storage.uncompressed_size = uncompressed_size
|
||||
image.storage.save()
|
||||
|
||||
return image
|
||||
|
||||
|
|
|
@ -17,7 +17,12 @@ OPTION_TRANSLATIONS = {
|
|||
|
||||
|
||||
def gen_sqlalchemy_metadata(peewee_model_list):
|
||||
metadata = MetaData()
|
||||
metadata = MetaData(naming_convention={
|
||||
"ix": 'ix_%(column_0_label)s',
|
||||
"uq": "uq_%(table_name)s_%(column_0_name)s",
|
||||
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
|
||||
"pk": "pk_%(table_name)s"
|
||||
})
|
||||
|
||||
for model in peewee_model_list:
|
||||
meta = model._meta
|
||||
|
|
|
@ -220,7 +220,7 @@ def put_image_layer(namespace, repository, image_id):
|
|||
image_size = tmp.tell()
|
||||
|
||||
# Save the size of the image.
|
||||
model.set_image_size(image_id, namespace, repository, image_size)
|
||||
model.set_image_size(image_id, namespace, repository, image_size, uncompressed_size_info.size)
|
||||
|
||||
tmp.seek(0)
|
||||
csums.append(checksums.compute_tarsum(tmp, json_data))
|
||||
|
@ -229,12 +229,6 @@ def put_image_layer(namespace, repository, image_id):
|
|||
logger.debug('put_image_layer: Error when computing tarsum '
|
||||
'{0}'.format(e))
|
||||
|
||||
# Write the uncompressed image size, if any.
|
||||
if uncompressed_size_info['size'] > 0:
|
||||
profile.debug('Storing uncompressed layer size: %s' % uncompressed_size_info['size'])
|
||||
repo_image.storage.uncompressed_size = uncompressed_size_info['size']
|
||||
repo_image.storage.save()
|
||||
|
||||
if repo_image.storage.checksum is None:
|
||||
# We don't have a checksum stored yet, that's fine skipping the check.
|
||||
# Not removing the mark though, image is not downloadable yet.
|
||||
|
|
|
@ -82,8 +82,9 @@ def __create_subtree(repo, structure, creator_username, parent):
|
|||
new_image = model.set_image_metadata(docker_image_id, repo.namespace_user.username, repo.name,
|
||||
str(creation_time), 'no comment', command, parent)
|
||||
|
||||
model.set_image_size(docker_image_id, repo.namespace_user.username, repo.name,
|
||||
random.randrange(1, 1024 * 1024 * 1024))
|
||||
compressed_size = random.randrange(1, 1024 * 1024 * 1024)
|
||||
model.set_image_size(docker_image_id, repo.namespace_user.username, repo.name, compressed_size,
|
||||
int(compressed_size * 1.4))
|
||||
|
||||
# Populate the diff file
|
||||
diff_path = store.image_file_diffs_path(new_image.storage.uuid)
|
||||
|
|
Binary file not shown.
|
@ -9,17 +9,20 @@ import zlib
|
|||
# http://stackoverflow.com/questions/3122145/zlib-error-error-3-while-decompressing-incorrect-header-check/22310760#22310760
|
||||
ZLIB_GZIP_WINDOW = zlib.MAX_WBITS | 32
|
||||
|
||||
class SizeInfo(object):
|
||||
def __init__(self):
|
||||
self.size = 0
|
||||
|
||||
def calculate_size_handler():
|
||||
""" Returns an object and a SocketReader handler. The handler will gunzip the data it receives,
|
||||
adding the size found to the object.
|
||||
"""
|
||||
uncompressed_size_info = {
|
||||
'size': 0
|
||||
}
|
||||
|
||||
|
||||
size_info = SizeInfo()
|
||||
|
||||
decompressor = zlib.decompressobj(ZLIB_GZIP_WINDOW)
|
||||
|
||||
def fn(buf):
|
||||
uncompressed_size_info['size'] += len(decompressor.decompress(buf))
|
||||
size_info.size += len(decompressor.decompress(buf))
|
||||
|
||||
return uncompressed_size_info, fn
|
||||
return size_info, fn
|
||||
|
|
Reference in a new issue