Fix deleting repos and images under MySQL
MySQL doesn't handle constraints at the end of transactions, so deleting images currently fails. This removes the constraint and just leaves parent_id as an int
This commit is contained in:
parent
daa74b5132
commit
2d2662f53f
6 changed files with 9 additions and 10 deletions
|
@ -575,7 +575,7 @@ class Image(BaseModel):
|
|||
|
||||
security_indexed = BooleanField(default=False)
|
||||
security_indexed_engine = IntegerField(default=-1)
|
||||
parent = ForeignKeyField('self', index=True, null=True, related_name='children')
|
||||
parent_id = IntegerField(index=True, null=True)
|
||||
|
||||
class Meta:
|
||||
database = db
|
||||
|
|
|
@ -19,14 +19,13 @@ def upgrade(tables):
|
|||
op.add_column('image', sa.Column('security_indexed', sa.Boolean(), nullable=False, default=False, server_default=sa.sql.expression.false()))
|
||||
op.add_column('image', sa.Column('security_indexed_engine', sa.Integer(), nullable=False, default=-1, server_default="-1"))
|
||||
op.create_index('image_parent_id', 'image', ['parent_id'], unique=False)
|
||||
op.create_foreign_key(op.f('fk_image_parent_id_image'), 'image', 'image', ['parent_id'], ['id'])
|
||||
### end Alembic commands ###
|
||||
|
||||
op.create_index('image_security_indexed_engine_security_indexed', 'image', ['security_indexed_engine', 'security_indexed'])
|
||||
|
||||
def downgrade(tables):
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_index('image_security_indexed_engine_security_indexed', 'image')
|
||||
op.drop_constraint(op.f('fk_image_parent_id_image'), 'image', type_='foreignkey')
|
||||
op.drop_index('image_parent_id', table_name='image')
|
||||
op.drop_column('image', 'security_indexed')
|
||||
op.drop_column('image', 'security_indexed_engine')
|
||||
|
|
|
@ -303,7 +303,7 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
|
|||
|
||||
if parent:
|
||||
fetched.ancestors = '%s%s/' % (parent.ancestors, parent.id)
|
||||
fetched.parent = parent
|
||||
fetched.parent_id = parent.id
|
||||
|
||||
fetched.save()
|
||||
fetched.storage.save()
|
||||
|
|
|
@ -131,12 +131,12 @@ def garbage_collect_repo(repo):
|
|||
# iterable of tuples containing [(k, v), (k, v), ...]
|
||||
all_repo_images = Image.select(Image.id, Image.storage).where(Image.repository == repo).tuples()
|
||||
images_to_storages = dict(all_repo_images)
|
||||
to_remove = set(images_to_storages.keys()).difference(referenced_ancestors)
|
||||
to_remove = list(set(images_to_storages.keys()).difference(referenced_ancestors))
|
||||
|
||||
if len(to_remove) > 0:
|
||||
logger.info('Cleaning up unreferenced images: %s', to_remove)
|
||||
storage_id_whitelist = {images_to_storages[to_remove_id] for to_remove_id in to_remove}
|
||||
Image.delete().where(Image.id << list(to_remove)).execute()
|
||||
Image.delete().where(Image.id << to_remove).execute()
|
||||
|
||||
if len(to_remove) > 0:
|
||||
logger.info('Garbage collecting storage for images: %s', to_remove)
|
||||
|
|
|
@ -18,7 +18,7 @@ def backfill_parent_id():
|
|||
return (Image
|
||||
.select(Image.id, Image.ancestors)
|
||||
.join(ImageStorage)
|
||||
.where(Image.parent >> None, Image.ancestors != '/',
|
||||
.where(Image.parent_id >> None, Image.ancestors != '/',
|
||||
ImageStorage.uploading == False))
|
||||
|
||||
for to_backfill in yield_random_entries(fetch_batch, 10000, 0.3):
|
||||
|
@ -27,7 +27,7 @@ def backfill_parent_id():
|
|||
image = db_for_update(Image
|
||||
.select()
|
||||
.where(Image.id == to_backfill.id)).get()
|
||||
image.parent = to_backfill.ancestors.split('/')[-2]
|
||||
image.parent_id = int(to_backfill.ancestors.split('/')[-2])
|
||||
image.save()
|
||||
except Image.DoesNotExist:
|
||||
pass
|
||||
|
|
|
@ -27,7 +27,7 @@ def _get_image_to_export(version):
|
|||
candidates = (Image
|
||||
.select(Image.docker_image_id, ImageStorage.uuid, ImageStorage.checksum)
|
||||
.join(ImageStorage)
|
||||
.where(Image.security_indexed_engine < version, Image.parent >> None, ImageStorage.uploading == False, ImageStorage.checksum != '')
|
||||
.where(Image.security_indexed_engine < version, Image.parent_id >> None, ImageStorage.uploading == False, ImageStorage.checksum != '')
|
||||
.limit(BATCH_SIZE*10)
|
||||
.alias('candidates'))
|
||||
|
||||
|
@ -44,7 +44,7 @@ def _get_image_to_export(version):
|
|||
# With analyzed parent
|
||||
candidates = (Image
|
||||
.select(Image.docker_image_id, ImageStorage.uuid, ImageStorage.checksum, Parent.docker_image_id.alias('parent_docker_image_id'), ParentImageStorage.uuid.alias('parent_storage_uuid'))
|
||||
.join(Parent, on=(Image.parent == Parent.id))
|
||||
.join(Parent, on=(Image.parent_id == Parent.id))
|
||||
.join(ParentImageStorage, on=(ParentImageStorage.id == Parent.storage))
|
||||
.switch(Image)
|
||||
.join(ImageStorage)
|
||||
|
|
Reference in a new issue