Migrate data back to Image in preparation for v2
This commit is contained in:
parent
abb1486a96
commit
b56de3355c
7 changed files with 72 additions and 6 deletions
|
@ -561,6 +561,12 @@ class Image(BaseModel):
|
|||
|
||||
storage = ForeignKeyField(ImageStorage, index=True, null=True)
|
||||
|
||||
created = DateTimeField(null=True)
|
||||
comment = TextField(null=True)
|
||||
command = TextField(null=True)
|
||||
aggregate_size = BigIntegerField(null=True)
|
||||
v1_json_metadata = TextField(null=True)
|
||||
|
||||
class Meta:
|
||||
database = db
|
||||
read_slaves = (read_slave,)
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
"""Migrate image data back to image table
|
||||
|
||||
Revision ID: 545794454f49
|
||||
Revises: 3a3bb77e17d5
|
||||
Create Date: 2015-09-15 11:48:47.554255
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '545794454f49'
|
||||
down_revision = '3a3bb77e17d5'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade(tables):
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('image', sa.Column('aggregate_size', sa.BigInteger(), nullable=True))
|
||||
op.add_column('image', sa.Column('command', sa.Text(), nullable=True))
|
||||
op.add_column('image', sa.Column('comment', sa.Text(), nullable=True))
|
||||
op.add_column('image', sa.Column('created', sa.DateTime(), nullable=True))
|
||||
op.add_column('image', sa.Column('v1_json_metadata', sa.Text(), nullable=True))
|
||||
### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade(tables):
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_column('image', 'v1_json_metadata')
|
||||
op.drop_column('image', 'created')
|
||||
op.drop_column('image', 'comment')
|
||||
op.drop_column('image', 'command')
|
||||
op.drop_column('image', 'aggregate_size')
|
||||
### end Alembic commands ###
|
|
@ -255,7 +255,7 @@ def find_create_or_link_image(docker_image_id, repo_obj, username, translations,
|
|||
|
||||
|
||||
def set_image_metadata(docker_image_id, namespace_name, repository_name, created_date_str, comment,
|
||||
command, parent=None):
|
||||
command, v1_json_metadata, parent=None):
|
||||
with db_transaction():
|
||||
query = (Image
|
||||
.select(Image, ImageStorage)
|
||||
|
@ -273,7 +273,10 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
|
|||
|
||||
# We cleanup any old checksum in case it's a retry after a fail
|
||||
fetched.storage.checksum = None
|
||||
fetched.storage.created = datetime.now()
|
||||
now = datetime.now()
|
||||
# TODO stop writing to storage when all readers are removed
|
||||
fetched.storage.created = now
|
||||
fetched.created = now
|
||||
|
||||
if created_date_str is not None:
|
||||
try:
|
||||
|
@ -282,8 +285,12 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
|
|||
# parse raises different exceptions, so we cannot use a specific kind of handler here.
|
||||
pass
|
||||
|
||||
# TODO stop writing to storage fields when all readers are removed
|
||||
fetched.storage.comment = comment
|
||||
fetched.storage.command = command
|
||||
fetched.comment = comment
|
||||
fetched.command = command
|
||||
fetched.v1_json_metadata = v1_json_metadata
|
||||
|
||||
if parent:
|
||||
fetched.ancestors = '%s%s/' % (parent.ancestors, parent.id)
|
||||
|
@ -323,13 +330,18 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size,
|
|||
.where(Image.id << ancestors)
|
||||
.scalar()) + image_size
|
||||
|
||||
# TODO stop writing to storage when all readers are removed
|
||||
image.storage.aggregate_size = total_size
|
||||
image.aggregate_size = total_size
|
||||
except Image.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
# TODO stop writing to storage when all readers are removed
|
||||
image.storage.aggregate_size = image_size
|
||||
image.aggregate_size = image_size
|
||||
|
||||
image.storage.save()
|
||||
image.save()
|
||||
|
||||
return image
|
||||
|
||||
|
|
Reference in a new issue