Merge master into vulnerability-tool
This commit is contained in:
commit
7816b0c657
44 changed files with 880 additions and 289 deletions
|
@ -19,7 +19,7 @@ class RedisBuildLogs(object):
|
|||
|
||||
def __init__(self, redis_config):
|
||||
args = dict(redis_config)
|
||||
args.update({'socket_connect_timeout': 5})
|
||||
args.update({'socket_connect_timeout': 5, 'socket_timeout': 5})
|
||||
|
||||
self._redis_config = redis_config
|
||||
self._redis = redis.StrictRedis(**args)
|
||||
|
|
|
@ -484,7 +484,6 @@ class EmailConfirmation(BaseModel):
|
|||
|
||||
class ImageStorage(BaseModel):
|
||||
uuid = CharField(default=uuid_generator, index=True, unique=True)
|
||||
checksum = CharField(null=True) # TODO remove when all checksums have been moved back to Image
|
||||
image_size = BigIntegerField(null=True)
|
||||
uncompressed_size = BigIntegerField(null=True)
|
||||
uploading = BooleanField(default=True, null=True)
|
||||
|
@ -575,7 +574,7 @@ class Image(BaseModel):
|
|||
|
||||
security_indexed = BooleanField(default=False)
|
||||
security_indexed_engine = IntegerField(default=-1)
|
||||
parent = ForeignKeyField('self', index=True, null=True, related_name='children')
|
||||
parent_id = IntegerField(index=True, null=True)
|
||||
|
||||
security_indexed = BooleanField(default=False)
|
||||
security_indexed_engine = IntegerField(default=-1)
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
"""add support for quay's security indexer
|
||||
|
||||
Revision ID: 57dad559ff2d
|
||||
Revises: 154f2befdfbe
|
||||
Create Date: 2015-07-13 16:51:41.669249
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '57dad559ff2d'
|
||||
down_revision = '3ff4fbc94644'
|
||||
down_revision = '73669db7e12'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
@ -14,19 +16,18 @@ import sqlalchemy as sa
|
|||
def upgrade(tables):
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('image', sa.Column('parent_id', sa.Integer(), nullable=True))
|
||||
op.add_column('image', sa.Column('security_indexed', sa.Boolean(), nullable=False))
|
||||
op.add_column('image', sa.Column('security_indexed_engine', sa.Integer(), nullable=False))
|
||||
op.add_column('image', sa.Column('security_indexed', sa.Boolean(), nullable=False, default=False, server_default=sa.sql.expression.false()))
|
||||
op.add_column('image', sa.Column('security_indexed_engine', sa.Integer(), nullable=False, default=-1, server_default="-1"))
|
||||
op.create_index('image_parent_id', 'image', ['parent_id'], unique=False)
|
||||
op.create_foreign_key(op.f('fk_image_parent_id_image'), 'image', 'image', ['parent_id'], ['id'])
|
||||
### end Alembic commands ###
|
||||
|
||||
op.create_index('image_security_indexed_engine_security_indexed', 'image', ['security_indexed_engine', 'security_indexed'])
|
||||
|
||||
def downgrade(tables):
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_constraint(op.f('fk_image_parent_id_image'), 'image', type_='foreignkey')
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_index('image_security_indexed_engine_security_indexed', 'image')
|
||||
op.drop_index('image_parent_id', table_name='image')
|
||||
op.drop_column('image', 'security_indexed')
|
||||
op.drop_column('image', 'security_indexed_engine')
|
||||
op.drop_column('image', 'parent_id')
|
||||
### end Alembic commands ###
|
||||
op.drop_index('image_security_indexed', 'image')
|
||||
|
|
|
@ -65,7 +65,7 @@ def get_repo_image(namespace_name, repository_name, docker_image_id):
|
|||
|
||||
def get_repo_image_extended(namespace_name, repository_name, docker_image_id):
|
||||
def limit_to_image_id(query):
|
||||
return query.where(Image.docker_image_id == docker_image_id).limit(1)
|
||||
return query.where(Image.docker_image_id == docker_image_id)
|
||||
|
||||
images = get_repository_images_base(namespace_name, repository_name, limit_to_image_id)
|
||||
if not images:
|
||||
|
@ -215,14 +215,21 @@ def _find_or_link_image(existing_image, repo_obj, username, translations, prefer
|
|||
copied_storage.locations = {placement.location.name
|
||||
for placement in copied_storage.imagestorageplacement_set}
|
||||
|
||||
translated_parent_id = None
|
||||
if new_image_ancestry != '/':
|
||||
translated_parent_id = int(new_image_ancestry.split('/')[-2])
|
||||
|
||||
new_image = Image.create(docker_image_id=existing_image.docker_image_id,
|
||||
repository=repo_obj, storage=copied_storage,
|
||||
repository=repo_obj,
|
||||
storage=copied_storage,
|
||||
ancestors=new_image_ancestry,
|
||||
command=existing_image.command,
|
||||
created=existing_image.created,
|
||||
comment=existing_image.comment,
|
||||
v1_json_metadata=existing_image.v1_json_metadata,
|
||||
aggregate_size=existing_image.aggregate_size)
|
||||
aggregate_size=existing_image.aggregate_size,
|
||||
parent_id=translated_parent_id,
|
||||
v1_checksum=existing_image.v1_checksum)
|
||||
|
||||
|
||||
logger.debug('Storing translation %s -> %s', existing_image.id, new_image.id)
|
||||
|
@ -313,7 +320,6 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
|
|||
|
||||
# We cleanup any old checksum in case it's a retry after a fail
|
||||
fetched.v1_checksum = None
|
||||
fetched.storage.checksum = None # TODO remove when storage checksums are no longer read
|
||||
fetched.storage.content_checksum = None
|
||||
|
||||
fetched.comment = comment
|
||||
|
@ -322,7 +328,7 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created
|
|||
|
||||
if parent:
|
||||
fetched.ancestors = '%s%s/' % (parent.ancestors, parent.id)
|
||||
fetched.parent = parent
|
||||
fetched.parent_id = parent.id
|
||||
|
||||
fetched.save()
|
||||
fetched.storage.save()
|
||||
|
@ -378,3 +384,19 @@ def get_image(repo, dockerfile_id):
|
|||
return Image.get(Image.docker_image_id == dockerfile_id, Image.repository == repo)
|
||||
except Image.DoesNotExist:
|
||||
return None
|
||||
|
||||
|
||||
def ensure_image_locations(*names):
|
||||
with db_transaction():
|
||||
locations = ImageStorageLocation.select().where(ImageStorageLocation.name << names)
|
||||
|
||||
insert_names = list(names)
|
||||
|
||||
for location in locations:
|
||||
insert_names.remove(location.name)
|
||||
|
||||
if not insert_names:
|
||||
return
|
||||
|
||||
data = [{'name': name} for name in insert_names]
|
||||
ImageStorageLocation.insert_many(data).execute()
|
||||
|
|
|
@ -131,12 +131,12 @@ def garbage_collect_repo(repo):
|
|||
# iterable of tuples containing [(k, v), (k, v), ...]
|
||||
all_repo_images = Image.select(Image.id, Image.storage).where(Image.repository == repo).tuples()
|
||||
images_to_storages = dict(all_repo_images)
|
||||
to_remove = set(images_to_storages.keys()).difference(referenced_ancestors)
|
||||
to_remove = list(set(images_to_storages.keys()).difference(referenced_ancestors))
|
||||
|
||||
if len(to_remove) > 0:
|
||||
logger.info('Cleaning up unreferenced images: %s', to_remove)
|
||||
storage_id_whitelist = {images_to_storages[to_remove_id] for to_remove_id in to_remove}
|
||||
Image.delete().where(Image.id << list(to_remove)).execute()
|
||||
Image.delete().where(Image.id << to_remove).execute()
|
||||
|
||||
if len(to_remove) > 0:
|
||||
logger.info('Garbage collecting storage for images: %s', to_remove)
|
||||
|
|
Reference in a new issue