prepare branch to be merged into phase1-11-07-2015
This removes the checksum backfill, removes the migration that runs the backfills, and defaults the security scan feature off.
This commit is contained in:
parent
af4511455f
commit
f3c3e684a1
4 changed files with 2 additions and 89 deletions
|
@ -252,7 +252,7 @@ class DefaultConfig(object):
|
||||||
EXP_ASYNC_GARBAGE_COLLECTION = []
|
EXP_ASYNC_GARBAGE_COLLECTION = []
|
||||||
|
|
||||||
# Security scanner
|
# Security scanner
|
||||||
FEATURE_SECURITY_SCANNER = True
|
FEATURE_SECURITY_SCANNER = False
|
||||||
SECURITY_SCANNER = {
|
SECURITY_SCANNER = {
|
||||||
'ENDPOINT': 'http://192.168.99.100:6060',
|
'ENDPOINT': 'http://192.168.99.100:6060',
|
||||||
'ENGINE_VERSION_TARGET': 1,
|
'ENGINE_VERSION_TARGET': 1,
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
"""backfill parent ids and checksums
|
|
||||||
Revision ID: 2fb9492c20cc
|
|
||||||
Revises: 57dad559ff2d
|
|
||||||
Create Date: 2015-07-14 17:38:47.397963
|
|
||||||
"""
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision = '2fb9492c20cc'
|
|
||||||
down_revision = '57dad559ff2d'
|
|
||||||
|
|
||||||
from alembic import op
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from util.migrate.backfill_parent_id import backfill_parent_id
|
|
||||||
from util.migrate.backfill_checksums import backfill_checksums
|
|
||||||
|
|
||||||
def upgrade(tables):
|
|
||||||
backfill_parent_id()
|
|
||||||
backfill_checksums()
|
|
||||||
|
|
||||||
def downgrade(tables):
|
|
||||||
pass
|
|
|
@ -8,7 +8,7 @@ Create Date: 2015-10-13 18:03:14.859839
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
# revision identifiers, used by Alembic.
|
||||||
revision = '50925110da8c'
|
revision = '50925110da8c'
|
||||||
down_revision = '2fb9492c20cc'
|
down_revision = '57dad559ff2d'
|
||||||
|
|
||||||
from alembic import op
|
from alembic import op
|
||||||
import sqlalchemy as sa
|
import sqlalchemy as sa
|
||||||
|
|
|
@ -1,66 +0,0 @@
|
||||||
import logging
|
|
||||||
from app import storage as store
|
|
||||||
from data.database import ImageStorage, ImageStoragePlacement, ImageStorageLocation, JOIN_LEFT_OUTER
|
|
||||||
from digest import checksums
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# TODO: Fix this to use random
|
|
||||||
# TODO: Copy in all referenced peewee models, as a later migration changes these
|
|
||||||
|
|
||||||
def _get_imagestorages_with_locations(query_modifier):
|
|
||||||
query = (ImageStoragePlacement
|
|
||||||
.select(ImageStoragePlacement, ImageStorage, ImageStorageLocation)
|
|
||||||
.join(ImageStorageLocation)
|
|
||||||
.switch(ImageStoragePlacement)
|
|
||||||
.join(ImageStorage, JOIN_LEFT_OUTER))
|
|
||||||
query = query_modifier(query)
|
|
||||||
|
|
||||||
location_list = list(query)
|
|
||||||
|
|
||||||
storages = {}
|
|
||||||
for location in location_list:
|
|
||||||
storage = location.storage
|
|
||||||
|
|
||||||
if not storage.id in storages:
|
|
||||||
storages[storage.id] = storage
|
|
||||||
storage.locations = set()
|
|
||||||
else:
|
|
||||||
storage = storages[storage.id]
|
|
||||||
|
|
||||||
storage.locations.add(location.location.name)
|
|
||||||
|
|
||||||
return storages.values()
|
|
||||||
|
|
||||||
def backfill_checksum(imagestorage_with_locations):
|
|
||||||
try:
|
|
||||||
json_data = store.get_content(imagestorage_with_locations.locations, store.image_json_path(imagestorage_with_locations.uuid))
|
|
||||||
with store.stream_read_file(imagestorage_with_locations.locations, store.image_layer_path(imagestorage_with_locations.uuid)) as fp:
|
|
||||||
imagestorage_with_locations.checksum = 'sha256:{0}'.format(checksums.sha256_file(fp, json_data + '\n'))
|
|
||||||
imagestorage_with_locations.save()
|
|
||||||
except:
|
|
||||||
imagestorage_with_locations.checksum = 'unknown:{0}'.format(imagestorage_with_locations.uuid)
|
|
||||||
imagestorage_with_locations.save()
|
|
||||||
|
|
||||||
def backfill_checksums():
|
|
||||||
logger.setLevel(logging.DEBUG)
|
|
||||||
logger.debug('backfill_checksums: Starting')
|
|
||||||
logger.debug('backfill_checksums: This can be a LONG RUNNING OPERATION. Please wait!')
|
|
||||||
|
|
||||||
def limit_to_empty_checksum(query):
|
|
||||||
return query.where(ImageStorage.checksum >> None, ImageStorage.uploading == False).limit(100)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
storages = _get_imagestorages_with_locations(limit_to_empty_checksum)
|
|
||||||
if len(storages) == 0:
|
|
||||||
logger.debug('backfill_checksums: Completed')
|
|
||||||
return
|
|
||||||
|
|
||||||
for storage in storages:
|
|
||||||
backfill_checksum(storage)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
|
||||||
logging.getLogger('peewee').setLevel(logging.CRITICAL)
|
|
||||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
|
||||||
backfill_checksums()
|
|
Reference in a new issue