From c518874ded93ce6118e2b87cd318c4a42fff1986 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 22 Oct 2015 13:24:56 -0400 Subject: [PATCH 01/19] I hate Redis! - Remove redis check from our health endpoint in prod entirely - Have the redis check have a maximum timeout of 1 second --- data/buildlogs.py | 16 ++++++++++++---- health/healthcheck.py | 9 +++++---- health/services.py | 7 +++++-- 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/data/buildlogs.py b/data/buildlogs.py index e9ea4a78f..ac58bd368 100644 --- a/data/buildlogs.py +++ b/data/buildlogs.py @@ -18,7 +18,11 @@ class RedisBuildLogs(object): PHASE = 'phase' def __init__(self, redis_config): - self._redis = redis.StrictRedis(socket_connect_timeout=5, **redis_config) + args = dict(redis_config) + args.update({'socket_connect_timeout': 5}) + + self._redis_config = redis_config + self._redis = redis.StrictRedis(**args) @staticmethod def _logs_key(build_id): @@ -94,12 +98,16 @@ class RedisBuildLogs(object): def check_health(self): try: - if not self._redis.ping() == True: + args = dict(self._redis_config) + args.update({'socket_connect_timeout': 1, 'socket_timeout': 1}) + + connection = redis.StrictRedis(**args) + if not connection.ping() == True: return False # Ensure we can write and read a key. - self._redis.set(self._health_key(), time.time()) - self._redis.get(self._health_key()) + connection.set(self._health_key(), time.time()) + connection.get(self._health_key()) return True except redis.ConnectionError: diff --git a/health/healthcheck.py b/health/healthcheck.py index c212c694d..c015adca2 100644 --- a/health/healthcheck.py +++ b/health/healthcheck.py @@ -10,16 +10,17 @@ def get_healthchecker(app, config_provider): class HealthCheck(object): - def __init__(self, app, config_provider): + def __init__(self, app, config_provider, instance_skips=None): self.app = app self.config_provider = config_provider + self.instance_skips = instance_skips or [] def check_instance(self): """ Conducts a check on this specific instance, returning a dict representing the HealthCheck output and a number indicating the health check response code. """ - service_statuses = check_all_services(self.app) + service_statuses = check_all_services(self.app, skip=self.instance_skips) return self.get_instance_health(service_statuses) def check_endtoend(self): @@ -80,7 +81,7 @@ class LocalHealthCheck(HealthCheck): class ProductionHealthCheck(HealthCheck): def __init__(self, app, config_provider, access_key, secret_key, db_instance='quay'): - super(ProductionHealthCheck, self).__init__(app, config_provider) + super(ProductionHealthCheck, self).__init__(app, config_provider, ['redis']) self.access_key = access_key self.secret_key = secret_key self.db_instance = db_instance @@ -92,7 +93,7 @@ class ProductionHealthCheck(HealthCheck): def get_instance_health(self, service_statuses): # Note: We skip the redis check because if redis is down, we don't want ELB taking the # machines out of service. Redis is not considered a high avaliability-required service. - skip = ['redis'] + skip = [] notes = [] # If the database is marked as unhealthy, check the status of RDS directly. If RDS is diff --git a/health/services.py b/health/services.py index ce6112651..bf108595a 100644 --- a/health/services.py +++ b/health/services.py @@ -39,10 +39,13 @@ _SERVICES = { 'redis': _check_redis } -def check_all_services(app): +def check_all_services(app, skip): """ Returns a dictionary containing the status of all the services defined. """ status = {} for name in _SERVICES: + if name in skip: + continue + status[name] = _SERVICES[name](app) - return status \ No newline at end of file + return status From 46b2f10d7f2772f711ec2961f19f346f503ea6fc Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 22 Oct 2015 14:50:54 -0400 Subject: [PATCH 02/19] check for VPC subnet ID before using builder VPC This means you can use legacy networking machines by simply changing the instance type and removing the specified 'EC2_VPC_SUBNET_ID' from the executor config. --- buildman/manager/executor.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/buildman/manager/executor.py b/buildman/manager/executor.py index e4f9fb7bb..54b689611 100644 --- a/buildman/manager/executor.py +++ b/buildman/manager/executor.py @@ -121,12 +121,14 @@ class EC2Executor(BuilderExecutor): block_devices = boto.ec2.blockdevicemapping.BlockDeviceMapping() block_devices['/dev/xvda'] = ssd_root_ebs - interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( - subnet_id=self.executor_config['EC2_VPC_SUBNET_ID'], - groups=self.executor_config['EC2_SECURITY_GROUP_IDS'], - associate_public_ip_address=True, - ) - interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) + interfaces = None + if self.executor_config.get('EC2_VPC_SUBNET_ID', None) is not None: + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + subnet_id=self.executor_config['EC2_VPC_SUBNET_ID'], + groups=self.executor_config['EC2_SECURITY_GROUP_IDS'], + associate_public_ip_address=True, + ) + interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) reservation = yield From(ec2_conn.run_instances( coreos_ami, From 231d4634005e2dfafab28447f014d82be6c5b43e Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 22 Oct 2015 15:07:31 -0400 Subject: [PATCH 03/19] specify gitlab branch in req-no-ver --- requirements-nover.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-nover.txt b/requirements-nover.txt index ce0739cbc..452f54b63 100644 --- a/requirements-nover.txt +++ b/requirements-nover.txt @@ -37,7 +37,7 @@ git+https://github.com/DevTable/anunidecode.git git+https://github.com/DevTable/pygithub.git git+https://github.com/DevTable/container-cloud-config.git git+https://github.com/coreos/py-bitbucket.git -git+https://github.com/coreos/pyapi-gitlab.git +git+https://github.com/coreos/pyapi-gitlab.git@timeout git+https://github.com/coreos/mockldap.git git+https://github.com/DevTable/python-etcd.git@sslfix gipc @@ -54,4 +54,4 @@ pyjwt toposort rfc3987 pyjwkest -jsonpath-rw \ No newline at end of file +jsonpath-rw From a34ddc1f710e93010ecc5dda8c07717bbceb47cd Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 22 Oct 2015 15:21:15 -0400 Subject: [PATCH 04/19] copy over v1 metadata when linking existing image --- data/model/image.py | 1 + 1 file changed, 1 insertion(+) diff --git a/data/model/image.py b/data/model/image.py index 56583751e..82953cd86 100644 --- a/data/model/image.py +++ b/data/model/image.py @@ -202,6 +202,7 @@ def _find_or_link_image(existing_image, repo_obj, username, translations, prefer command=existing_image.command, created=existing_image.created, comment=existing_image.comment, + v1_json_metadata=existing_image.v1_json_metadata, aggregate_size=existing_image.aggregate_size) From d568697034e28d379c39819139069fe8a28a849b Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 22 Oct 2015 15:30:49 -0400 Subject: [PATCH 05/19] remove migration to get fixes into prod --- ...15d01_backfill_image_fields_from_image_.py | 24 ------------------- 1 file changed, 24 deletions(-) delete mode 100644 data/migrations/versions/2e0380215d01_backfill_image_fields_from_image_.py diff --git a/data/migrations/versions/2e0380215d01_backfill_image_fields_from_image_.py b/data/migrations/versions/2e0380215d01_backfill_image_fields_from_image_.py deleted file mode 100644 index 131c7fad0..000000000 --- a/data/migrations/versions/2e0380215d01_backfill_image_fields_from_image_.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Backfill image fields from image storages - -Revision ID: 2e0380215d01 -Revises: 3ff4fbc94644 -Create Date: 2015-09-15 16:57:42.850246 - -""" - -# revision identifiers, used by Alembic. -revision = '2e0380215d01' -down_revision = '3ff4fbc94644' - -from alembic import op -import sqlalchemy as sa -from util.migrate.backfill_image_fields import backfill_image_fields -from util.migrate.backfill_v1_metadata import backfill_v1_metadata - - -def upgrade(tables): - backfill_image_fields() - backfill_v1_metadata() - -def downgrade(tables): - pass From e03058cf6f5fd49d12648715f1e656a5e2b5f81e Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Thu, 22 Oct 2015 15:57:34 -0400 Subject: [PATCH 06/19] Add missing arg --- health/healthcheck.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/health/healthcheck.py b/health/healthcheck.py index c015adca2..ef700e3e4 100644 --- a/health/healthcheck.py +++ b/health/healthcheck.py @@ -20,7 +20,7 @@ class HealthCheck(object): Conducts a check on this specific instance, returning a dict representing the HealthCheck output and a number indicating the health check response code. """ - service_statuses = check_all_services(self.app, skip=self.instance_skips) + service_statuses = check_all_services(self.app, self.instance_skips) return self.get_instance_health(service_statuses) def check_endtoend(self): @@ -28,7 +28,7 @@ class HealthCheck(object): Conducts a check on all services, returning a dict representing the HealthCheck output and a number indicating the health check response code. """ - service_statuses = check_all_services(self.app) + service_statuses = check_all_services(self.app, []) return self.calculate_overall_health(service_statuses) def get_instance_health(self, service_statuses): From 278bc736e30dc893917f6368e1fceb80c1d71a81 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 22 Oct 2015 16:02:07 -0400 Subject: [PATCH 07/19] Revert "Merge pull request #682 from jzelinskie/revertrevert" This reverts commit 627ad25c9c409cb39fdc16a8aada55318f173827, reversing changes made to 31c392feccfe995f70f9fabbb2a01cbfc95b46ef. --- buildman/jobutil/buildjob.py | 5 +- data/model/image.py | 11 +-- digest/checksums.py | 2 +- endpoints/api/image.py | 12 ++-- endpoints/api/repository.py | 2 +- endpoints/v1/registry.py | 32 ++++++--- endpoints/verbs.py | 9 ++- util/migrate/backfill_aggregate_sizes.py | 50 ++++++-------- util/migrate/backfill_image_fields.py | 87 ------------------------ util/migrate/backfill_v1_metadata.py | 67 ------------------ 10 files changed, 65 insertions(+), 212 deletions(-) delete mode 100644 util/migrate/backfill_image_fields.py delete mode 100644 util/migrate/backfill_v1_metadata.py diff --git a/buildman/jobutil/buildjob.py b/buildman/jobutil/buildjob.py index dbbb8113f..f6291f62b 100644 --- a/buildman/jobutil/buildjob.py +++ b/buildman/jobutil/buildjob.py @@ -128,9 +128,10 @@ class BuildJob(object): return False full_command = '["/bin/sh", "-c", "%s"]' % cache_commands[step] - logger.debug('Checking step #%s: %s, %s == %s', step, image.id, image.command, full_command) + logger.debug('Checking step #%s: %s, %s == %s', step, image.id, + image.storage.command, full_command) - return image.command == full_command + return image.storage.command == full_command path = tree.find_longest_path(base_image.id, checker) if not path: diff --git a/data/model/image.py b/data/model/image.py index 82953cd86..00707d176 100644 --- a/data/model/image.py +++ b/data/model/image.py @@ -79,14 +79,7 @@ def get_repository_images_base(namespace_name, repository_name, query_modifier): query = query_modifier(query) - return invert_placement_query_results(query) - - -def invert_placement_query_results(placement_query): - """ This method will take a query which returns placements, storages, and images, and have it - return images and their storages, along with the placement set on each storage. - """ - location_list = list(placement_query) + location_list = list(query) images = {} for location in location_list: @@ -341,6 +334,7 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size, try: # TODO(jschorr): Switch to this faster route once we have full ancestor aggregate_size # parent_image = Image.get(Image.id == ancestors[-1]) + # total_size = image_size + parent_image.storage.aggregate_size ancestor_size = (ImageStorage .select(fn.Sum(ImageStorage.image_size)) .join(Image) @@ -349,7 +343,6 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size, # TODO stop writing to storage when all readers are removed if ancestor_size is not None: - # total_size = image_size + parent_image.storage.aggregate_size total_size = ancestor_size + image_size image.storage.aggregate_size = total_size image.aggregate_size = total_size diff --git a/digest/checksums.py b/digest/checksums.py index ea30e4dc1..154907823 100644 --- a/digest/checksums.py +++ b/digest/checksums.py @@ -68,7 +68,7 @@ def compute_tarsum(fp, json_data): def simple_checksum_handler(json_data): - h = hashlib.sha256(json_data.encode('utf8') + '\n') + h = hashlib.sha256(json_data + '\n') def fn(buf): h.update(buf) diff --git a/endpoints/api/image.py b/endpoints/api/image.py index 057e02cae..014bed412 100644 --- a/endpoints/api/image.py +++ b/endpoints/api/image.py @@ -12,7 +12,11 @@ from util.cache import cache_control_flask_restful def image_view(image, image_map, include_ancestors=True): - command = image.command + extended_props = image + if image.storage and image.storage.id: + extended_props = image.storage + + command = extended_props.command def docker_id(aid): if not aid or not aid in image_map: @@ -22,10 +26,10 @@ def image_view(image, image_map, include_ancestors=True): image_data = { 'id': image.docker_image_id, - 'created': format_date(image.created), - 'comment': image.comment, + 'created': format_date(extended_props.created), + 'comment': extended_props.comment, 'command': json.loads(command) if command else None, - 'size': image.storage.image_size, + 'size': extended_props.image_size, 'uploading': image.storage.uploading, 'sort_index': len(image.ancestors), } diff --git a/endpoints/api/repository.py b/endpoints/api/repository.py index b9664864e..d177403c5 100644 --- a/endpoints/api/repository.py +++ b/endpoints/api/repository.py @@ -254,7 +254,7 @@ class Repository(RepositoryParamResource): tag_info = { 'name': tag.name, 'image_id': tag.image.docker_image_id, - 'size': tag.image.aggregate_size + 'size': tag.image.storage.aggregate_size } if tag.lifetime_start_ts > 0: diff --git a/endpoints/v1/registry.py b/endpoints/v1/registry.py index 636da6f30..082a07864 100644 --- a/endpoints/v1/registry.py +++ b/endpoints/v1/registry.py @@ -211,10 +211,11 @@ def put_image_layer(namespace, repository, image_id): try: logger.debug('Retrieving image data') uuid = repo_image.storage.uuid - json_data = repo_image.v1_json_metadata - except (AttributeError): + json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid)) + except (IOError, AttributeError): logger.exception('Exception when retrieving image data') - abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) + abort(404, 'Image %(image_id)s not found', issue='unknown-image', + image_id=image_id) logger.debug('Retrieving image path info') layer_path = store.image_layer_path(uuid) @@ -331,7 +332,8 @@ def put_image_checksum(namespace, repository, image_id): abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id) logger.debug('Looking up repo layer data') - if not repo_image.v1_json_metadata: + uuid = repo_image.storage.uuid + if not store.exists(repo_image.storage.locations, store.image_json_path(uuid)): abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id) logger.debug('Marking image path') @@ -371,7 +373,12 @@ def get_image_json(namespace, repository, image_id, headers): logger.debug('Looking up repo image') repo_image = model.image.get_repo_image_extended(namespace, repository, image_id) - if repo_image is None: + + logger.debug('Looking up repo layer data') + try: + uuid = repo_image.storage.uuid + data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid)) + except (IOError, AttributeError): flask_abort(404) logger.debug('Looking up repo layer size') @@ -381,7 +388,7 @@ def get_image_json(namespace, repository, image_id, headers): # so handle this case rather than failing. headers['X-Docker-Size'] = str(size) - response = make_response(repo_image.v1_json_metadata, 200) + response = make_response(data, 200) response.headers.extend(headers) return response @@ -484,6 +491,8 @@ def put_image_json(namespace, repository, image_id): model.tag.create_temporary_hidden_tag(repo, repo_image, app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']) + uuid = repo_image.storage.uuid + if image_id != data['id']: abort(400, 'JSON data contains invalid id for image: %(image_id)s', issue='invalid-request', image_id=image_id) @@ -501,12 +510,17 @@ def put_image_json(namespace, repository, image_id): if parent_id: logger.debug('Looking up parent image data') - if parent_id and not parent_image.v1_json_metadata: + if (parent_id and not + store.exists(parent_locations, store.image_json_path(parent_uuid))): abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s', issue='invalid-request', image_id=image_id, parent_id=parent_id) + logger.debug('Looking up image storage paths') + json_path = store.image_json_path(uuid) + logger.debug('Checking if image already exists') - if repo_image.v1_json_metadata and not image_is_uploading(repo_image): + if (store.exists(repo_image.storage.locations, json_path) and not + image_is_uploading(repo_image)): exact_abort(409, 'Image already exists') set_uploading_flag(repo_image, True) @@ -522,8 +536,6 @@ def put_image_json(namespace, repository, image_id): data.get('comment'), command, v1_metadata, parent_image) logger.debug('Putting json path') - uuid = repo_image.storage.uuid - json_path = store.image_json_path(uuid) store.put_content(repo_image.storage.locations, json_path, request.data) logger.debug('Generating image ancestry') diff --git a/endpoints/verbs.py b/endpoints/verbs.py index 6201bd897..be0067d1d 100644 --- a/endpoints/verbs.py +++ b/endpoints/verbs.py @@ -114,15 +114,17 @@ def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None): abort(404) # Lookup the tag's image and storage. - repo_image = model.image.get_repo_image(namespace, repository, tag_image.docker_image_id) + repo_image = model.image.get_repo_image_extended(namespace, repository, tag_image.docker_image_id) if not repo_image: abort(404) # If there is a data checker, call it first. + uuid = repo_image.storage.uuid image_json = None if checker is not None: - image_json = json.loads(repo_image.v1_json_metadata) + image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid)) + image_json = json.loads(image_json_data) if not checker(image_json): logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repository, tag, verb) @@ -191,7 +193,8 @@ def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker= # Load the image's JSON layer. if not image_json: - image_json = json.loads(repo_image.v1_json_metadata) + image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid)) + image_json = json.loads(image_json_data) # Calculate a synthetic image ID. synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':' + verb).hexdigest() diff --git a/util/migrate/backfill_aggregate_sizes.py b/util/migrate/backfill_aggregate_sizes.py index db6f195f2..8e624753b 100644 --- a/util/migrate/backfill_aggregate_sizes.py +++ b/util/migrate/backfill_aggregate_sizes.py @@ -1,50 +1,44 @@ import logging -from data.database import ImageStorage, Image, db, db_for_update +from data.database import ImageStorage, Image, db from app import app - -logger = logging.getLogger(__name__) - +LOGGER = logging.getLogger(__name__) def backfill_aggregate_sizes(): """ Generates aggregate sizes for any image storage entries without them """ - logger.debug('Aggregate sizes backfill: Began execution') + LOGGER.setLevel(logging.DEBUG) + LOGGER.debug('Aggregate sizes backfill: Began execution') while True: - batch_image_ids = list(Image - .select(Image.id) - .where(Image.aggregate_size >> None) - .limit(100)) + batch_storage_ids = list(ImageStorage + .select(ImageStorage.id) + .where(ImageStorage.aggregate_size >> None) + .limit(10)) - if len(batch_image_ids) == 0: + if len(batch_storage_ids) == 0: # There are no storages left to backfill. We're done! - logger.debug('Aggregate sizes backfill: Backfill completed') + LOGGER.debug('Aggregate sizes backfill: Backfill completed') return - logger.debug('Aggregate sizes backfill: Found %s records to update', len(batch_image_ids)) - for image_id in batch_image_ids: - logger.debug('Updating image : %s', image_id.id) + LOGGER.debug('Aggregate sizes backfill: Found %s records to update', len(batch_storage_ids)) + for image_storage_id in batch_storage_ids: + LOGGER.debug('Updating image storage: %s', image_storage_id.id) with app.config['DB_TRANSACTION_FACTORY'](db): try: - image = (Image - .select(Image, ImageStorage) - .join(ImageStorage) - .where(Image.id == image_id) - .get()) - - aggregate_size = image.storage.image_size + storage = ImageStorage.select().where(ImageStorage.id == image_storage_id.id).get() + image = Image.select().where(Image.storage == storage).get() image_ids = image.ancestors.split('/')[1:-1] + aggregate_size = storage.image_size for image_id in image_ids: - to_add = db_for_update(Image - .select(Image, ImageStorage) - .join(ImageStorage) - .where(Image.id == image_id)).get() - aggregate_size += to_add.storage.image_size + current_image = Image.select().where(Image.id == image_id).join(ImageStorage) + aggregate_size += image.storage.image_size - image.aggregate_size = aggregate_size - image.save() + storage.aggregate_size = aggregate_size + storage.save() + except ImageStorage.DoesNotExist: + pass except Image.DoesNotExist: pass diff --git a/util/migrate/backfill_image_fields.py b/util/migrate/backfill_image_fields.py deleted file mode 100644 index 184cc8a42..000000000 --- a/util/migrate/backfill_image_fields.py +++ /dev/null @@ -1,87 +0,0 @@ -import logging - -from peewee import (CharField, BigIntegerField, BooleanField, ForeignKeyField, DateTimeField, - TextField) -from data.database import BaseModel, db, db_for_update -from app import app - - -logger = logging.getLogger(__name__) - - -class Repository(BaseModel): - pass - - -# Vendor the information from tables we will be writing to at the time of this migration -class ImageStorage(BaseModel): - created = DateTimeField(null=True) - comment = TextField(null=True) - command = TextField(null=True) - aggregate_size = BigIntegerField(null=True) - uploading = BooleanField(default=True, null=True) - - -class Image(BaseModel): - # This class is intentionally denormalized. Even though images are supposed - # to be globally unique we can't treat them as such for permissions and - # security reasons. So rather than Repository <-> Image being many to many - # each image now belongs to exactly one repository. - docker_image_id = CharField(index=True) - repository = ForeignKeyField(Repository) - - # '/' separated list of ancestory ids, e.g. /1/2/6/7/10/ - ancestors = CharField(index=True, default='/', max_length=64535, null=True) - - storage = ForeignKeyField(ImageStorage, index=True, null=True) - - created = DateTimeField(null=True) - comment = TextField(null=True) - command = TextField(null=True) - aggregate_size = BigIntegerField(null=True) - v1_json_metadata = TextField(null=True) - - -def backfill_image_fields(): - """ Copies metadata from image storages to their images. """ - logger.debug('Image metadata backfill: Began execution') - while True: - batch_image_ids = list(Image - .select(Image.id) - .join(ImageStorage) - .where(Image.created >> None, Image.comment >> None, - Image.command >> None, Image.aggregate_size >> None, - ImageStorage.uploading == False, - ~((ImageStorage.created >> None) & - (ImageStorage.comment >> None) & - (ImageStorage.command >> None) & - (ImageStorage.aggregate_size >> None))) - .limit(100)) - - if len(batch_image_ids) == 0: - logger.debug('Image metadata backfill: Backfill completed') - return - - logger.debug('Image metadata backfill: Found %s records to update', len(batch_image_ids)) - for image_id in batch_image_ids: - logger.debug('Updating image: %s', image_id.id) - - with app.config['DB_TRANSACTION_FACTORY'](db): - try: - image = db_for_update(Image - .select(Image, ImageStorage) - .join(ImageStorage) - .where(Image.id == image_id.id)).get() - - image.created = image.storage.created - image.comment = image.storage.comment - image.command = image.storage.command - image.aggregate_size = image.storage.aggregate_size - image.save() - except Image.DoesNotExist: - pass - -if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG) - logging.getLogger('peewee').setLevel(logging.CRITICAL) - backfill_image_fields() diff --git a/util/migrate/backfill_v1_metadata.py b/util/migrate/backfill_v1_metadata.py deleted file mode 100644 index be7a37c93..000000000 --- a/util/migrate/backfill_v1_metadata.py +++ /dev/null @@ -1,67 +0,0 @@ -import logging - -from peewee import JOIN_LEFT_OUTER - -from data.database import (Image, ImageStorage, ImageStoragePlacement, ImageStorageLocation, db, - db_for_update) -from app import app, storage -from data import model - - -logger = logging.getLogger(__name__) - - -def backfill_v1_metadata(): - """ Copies metadata from image storages to their images. """ - logger.debug('Image v1 metadata backfill: Began execution') - while True: - batch_image_ids = list(Image - .select(Image.id) - .join(ImageStorage) - .where(Image.v1_json_metadata >> None, ImageStorage.uploading == False) - .limit(100)) - - if len(batch_image_ids) == 0: - logger.debug('Image v1 metadata backfill: Backfill completed') - return - - logger.debug('Image v1 metadata backfill: Found %s records to update', len(batch_image_ids)) - for one_id in batch_image_ids: - with app.config['DB_TRANSACTION_FACTORY'](db): - try: - logger.debug('Loading image: %s', one_id.id) - - raw_query = (ImageStoragePlacement - .select(ImageStoragePlacement, Image, ImageStorage, ImageStorageLocation) - .join(ImageStorageLocation) - .switch(ImageStoragePlacement) - .join(ImageStorage, JOIN_LEFT_OUTER) - .join(Image) - .where(Image.id == one_id.id)) - - placement_query = db_for_update(raw_query) - - repo_image_list = model.image.invert_placement_query_results(placement_query) - if len(repo_image_list) > 1: - logger.error('Found more images than we requested, something is wrong with the query') - return - - repo_image = repo_image_list[0] - uuid = repo_image.storage.uuid - json_path = storage.image_json_path(uuid) - - logger.debug('Updating image: %s from: %s', repo_image.id, json_path) - try: - data = storage.get_content(repo_image.storage.locations, json_path) - except IOError: - data = None - logger.exception('failed to find v1 metadata, defaulting to None') - repo_image.v1_json_metadata = data - repo_image.save() - except ImageStoragePlacement.DoesNotExist: - pass - -if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG) - # logging.getLogger('peewee').setLevel(logging.CRITICAL) - backfill_v1_metadata() From 05262125a06e4114dd46fcc9c9cc3956ffcd88af Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Fri, 23 Oct 2015 12:18:11 -0400 Subject: [PATCH 08/19] Make the namespace and secret name configurable via env var for the k8s provider Fixes #695 --- util/config/provider/k8sprovider.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/util/config/provider/k8sprovider.py b/util/config/provider/k8sprovider.py index 4a5a6ef9d..7fac1b179 100644 --- a/util/config/provider/k8sprovider.py +++ b/util/config/provider/k8sprovider.py @@ -14,8 +14,8 @@ KUBERNETES_API_HOST = 'kubernetes.default.svc.cluster.local' SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token' -ER_NAMESPACE = 'quay' -ER_CONFIG_SECRET = 'quay-config-secret' +QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise') +QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret') class KubernetesConfigProvider(FileConfigProvider): """ Implementation of the config provider that reads and writes configuration @@ -67,12 +67,12 @@ class KubernetesConfigProvider(FileConfigProvider): "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": ER_CONFIG_SECRET + "name": QE_CONFIG_SECRET }, "data": secret_data } - secret_url = 'namespaces/%s/secrets/%s' % (ER_NAMESPACE, ER_CONFIG_SECRET) + secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET) secret = self._lookup_secret() if not secret: self._assert_success(self._execute_k8s_api('POST', secret_url, data)) @@ -86,7 +86,7 @@ class KubernetesConfigProvider(FileConfigProvider): def _lookup_secret(self): - secret_url = 'namespaces/%s/secrets/%s' % (ER_NAMESPACE, ER_CONFIG_SECRET) + secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET) response = self._execute_k8s_api('GET', secret_url) if response.status_code != 200: return None From e97328939734fa2eb6983593bc44c2c320d8f9fb Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Fri, 23 Oct 2015 15:24:47 -0400 Subject: [PATCH 09/19] Revert "Revert "Merge pull request #682 from jzelinskie/revertrevert"" This reverts commit 278bc736e30dc893917f6368e1fceb80c1d71a81. --- buildman/jobutil/buildjob.py | 5 +- ...15d01_backfill_image_fields_from_image_.py | 24 +++++ data/model/image.py | 11 ++- digest/checksums.py | 2 +- endpoints/api/image.py | 12 +-- endpoints/api/repository.py | 2 +- endpoints/v1/registry.py | 32 +++---- endpoints/verbs.py | 9 +- util/migrate/backfill_aggregate_sizes.py | 50 ++++++----- util/migrate/backfill_image_fields.py | 87 +++++++++++++++++++ util/migrate/backfill_v1_metadata.py | 67 ++++++++++++++ 11 files changed, 236 insertions(+), 65 deletions(-) create mode 100644 data/migrations/versions/2e0380215d01_backfill_image_fields_from_image_.py create mode 100644 util/migrate/backfill_image_fields.py create mode 100644 util/migrate/backfill_v1_metadata.py diff --git a/buildman/jobutil/buildjob.py b/buildman/jobutil/buildjob.py index f6291f62b..dbbb8113f 100644 --- a/buildman/jobutil/buildjob.py +++ b/buildman/jobutil/buildjob.py @@ -128,10 +128,9 @@ class BuildJob(object): return False full_command = '["/bin/sh", "-c", "%s"]' % cache_commands[step] - logger.debug('Checking step #%s: %s, %s == %s', step, image.id, - image.storage.command, full_command) + logger.debug('Checking step #%s: %s, %s == %s', step, image.id, image.command, full_command) - return image.storage.command == full_command + return image.command == full_command path = tree.find_longest_path(base_image.id, checker) if not path: diff --git a/data/migrations/versions/2e0380215d01_backfill_image_fields_from_image_.py b/data/migrations/versions/2e0380215d01_backfill_image_fields_from_image_.py new file mode 100644 index 000000000..93d89ed6e --- /dev/null +++ b/data/migrations/versions/2e0380215d01_backfill_image_fields_from_image_.py @@ -0,0 +1,24 @@ +"""Backfill image fields from image storages + +Revision ID: 2e0380215d01 +Revises: 3ff4fbc94644 +Create Date: 2015-09-15 16:57:42.850246 + +""" + +# revision identifiers, used by Alembic. +revision = '2e0380215d01' +down_revision = '3ff4fbc94644' + +from alembic import op +import sqlalchemy as sa +from util.migrate.backfill_image_fields import backfill_image_fields +from util.migrate.backfill_v1_metadata import backfill_v1_metadata + + +def upgrade(tables): + backfill_image_fields() + backfill_v1_metadata() + +def downgrade(tables): + pass diff --git a/data/model/image.py b/data/model/image.py index 00707d176..82953cd86 100644 --- a/data/model/image.py +++ b/data/model/image.py @@ -79,7 +79,14 @@ def get_repository_images_base(namespace_name, repository_name, query_modifier): query = query_modifier(query) - location_list = list(query) + return invert_placement_query_results(query) + + +def invert_placement_query_results(placement_query): + """ This method will take a query which returns placements, storages, and images, and have it + return images and their storages, along with the placement set on each storage. + """ + location_list = list(placement_query) images = {} for location in location_list: @@ -334,7 +341,6 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size, try: # TODO(jschorr): Switch to this faster route once we have full ancestor aggregate_size # parent_image = Image.get(Image.id == ancestors[-1]) - # total_size = image_size + parent_image.storage.aggregate_size ancestor_size = (ImageStorage .select(fn.Sum(ImageStorage.image_size)) .join(Image) @@ -343,6 +349,7 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size, # TODO stop writing to storage when all readers are removed if ancestor_size is not None: + # total_size = image_size + parent_image.storage.aggregate_size total_size = ancestor_size + image_size image.storage.aggregate_size = total_size image.aggregate_size = total_size diff --git a/digest/checksums.py b/digest/checksums.py index 154907823..ea30e4dc1 100644 --- a/digest/checksums.py +++ b/digest/checksums.py @@ -68,7 +68,7 @@ def compute_tarsum(fp, json_data): def simple_checksum_handler(json_data): - h = hashlib.sha256(json_data + '\n') + h = hashlib.sha256(json_data.encode('utf8') + '\n') def fn(buf): h.update(buf) diff --git a/endpoints/api/image.py b/endpoints/api/image.py index 014bed412..057e02cae 100644 --- a/endpoints/api/image.py +++ b/endpoints/api/image.py @@ -12,11 +12,7 @@ from util.cache import cache_control_flask_restful def image_view(image, image_map, include_ancestors=True): - extended_props = image - if image.storage and image.storage.id: - extended_props = image.storage - - command = extended_props.command + command = image.command def docker_id(aid): if not aid or not aid in image_map: @@ -26,10 +22,10 @@ def image_view(image, image_map, include_ancestors=True): image_data = { 'id': image.docker_image_id, - 'created': format_date(extended_props.created), - 'comment': extended_props.comment, + 'created': format_date(image.created), + 'comment': image.comment, 'command': json.loads(command) if command else None, - 'size': extended_props.image_size, + 'size': image.storage.image_size, 'uploading': image.storage.uploading, 'sort_index': len(image.ancestors), } diff --git a/endpoints/api/repository.py b/endpoints/api/repository.py index d177403c5..b9664864e 100644 --- a/endpoints/api/repository.py +++ b/endpoints/api/repository.py @@ -254,7 +254,7 @@ class Repository(RepositoryParamResource): tag_info = { 'name': tag.name, 'image_id': tag.image.docker_image_id, - 'size': tag.image.storage.aggregate_size + 'size': tag.image.aggregate_size } if tag.lifetime_start_ts > 0: diff --git a/endpoints/v1/registry.py b/endpoints/v1/registry.py index 082a07864..636da6f30 100644 --- a/endpoints/v1/registry.py +++ b/endpoints/v1/registry.py @@ -211,11 +211,10 @@ def put_image_layer(namespace, repository, image_id): try: logger.debug('Retrieving image data') uuid = repo_image.storage.uuid - json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid)) - except (IOError, AttributeError): + json_data = repo_image.v1_json_metadata + except (AttributeError): logger.exception('Exception when retrieving image data') - abort(404, 'Image %(image_id)s not found', issue='unknown-image', - image_id=image_id) + abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) logger.debug('Retrieving image path info') layer_path = store.image_layer_path(uuid) @@ -332,8 +331,7 @@ def put_image_checksum(namespace, repository, image_id): abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id) logger.debug('Looking up repo layer data') - uuid = repo_image.storage.uuid - if not store.exists(repo_image.storage.locations, store.image_json_path(uuid)): + if not repo_image.v1_json_metadata: abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id) logger.debug('Marking image path') @@ -373,12 +371,7 @@ def get_image_json(namespace, repository, image_id, headers): logger.debug('Looking up repo image') repo_image = model.image.get_repo_image_extended(namespace, repository, image_id) - - logger.debug('Looking up repo layer data') - try: - uuid = repo_image.storage.uuid - data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid)) - except (IOError, AttributeError): + if repo_image is None: flask_abort(404) logger.debug('Looking up repo layer size') @@ -388,7 +381,7 @@ def get_image_json(namespace, repository, image_id, headers): # so handle this case rather than failing. headers['X-Docker-Size'] = str(size) - response = make_response(data, 200) + response = make_response(repo_image.v1_json_metadata, 200) response.headers.extend(headers) return response @@ -491,8 +484,6 @@ def put_image_json(namespace, repository, image_id): model.tag.create_temporary_hidden_tag(repo, repo_image, app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']) - uuid = repo_image.storage.uuid - if image_id != data['id']: abort(400, 'JSON data contains invalid id for image: %(image_id)s', issue='invalid-request', image_id=image_id) @@ -510,17 +501,12 @@ def put_image_json(namespace, repository, image_id): if parent_id: logger.debug('Looking up parent image data') - if (parent_id and not - store.exists(parent_locations, store.image_json_path(parent_uuid))): + if parent_id and not parent_image.v1_json_metadata: abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s', issue='invalid-request', image_id=image_id, parent_id=parent_id) - logger.debug('Looking up image storage paths') - json_path = store.image_json_path(uuid) - logger.debug('Checking if image already exists') - if (store.exists(repo_image.storage.locations, json_path) and not - image_is_uploading(repo_image)): + if repo_image.v1_json_metadata and not image_is_uploading(repo_image): exact_abort(409, 'Image already exists') set_uploading_flag(repo_image, True) @@ -536,6 +522,8 @@ def put_image_json(namespace, repository, image_id): data.get('comment'), command, v1_metadata, parent_image) logger.debug('Putting json path') + uuid = repo_image.storage.uuid + json_path = store.image_json_path(uuid) store.put_content(repo_image.storage.locations, json_path, request.data) logger.debug('Generating image ancestry') diff --git a/endpoints/verbs.py b/endpoints/verbs.py index be0067d1d..6201bd897 100644 --- a/endpoints/verbs.py +++ b/endpoints/verbs.py @@ -114,17 +114,15 @@ def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None): abort(404) # Lookup the tag's image and storage. - repo_image = model.image.get_repo_image_extended(namespace, repository, tag_image.docker_image_id) + repo_image = model.image.get_repo_image(namespace, repository, tag_image.docker_image_id) if not repo_image: abort(404) # If there is a data checker, call it first. - uuid = repo_image.storage.uuid image_json = None if checker is not None: - image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid)) - image_json = json.loads(image_json_data) + image_json = json.loads(repo_image.v1_json_metadata) if not checker(image_json): logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repository, tag, verb) @@ -193,8 +191,7 @@ def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker= # Load the image's JSON layer. if not image_json: - image_json_data = store.get_content(repo_image.storage.locations, store.image_json_path(uuid)) - image_json = json.loads(image_json_data) + image_json = json.loads(repo_image.v1_json_metadata) # Calculate a synthetic image ID. synthetic_image_id = hashlib.sha256(tag_image.docker_image_id + ':' + verb).hexdigest() diff --git a/util/migrate/backfill_aggregate_sizes.py b/util/migrate/backfill_aggregate_sizes.py index 8e624753b..db6f195f2 100644 --- a/util/migrate/backfill_aggregate_sizes.py +++ b/util/migrate/backfill_aggregate_sizes.py @@ -1,44 +1,50 @@ import logging -from data.database import ImageStorage, Image, db +from data.database import ImageStorage, Image, db, db_for_update from app import app -LOGGER = logging.getLogger(__name__) + +logger = logging.getLogger(__name__) + def backfill_aggregate_sizes(): """ Generates aggregate sizes for any image storage entries without them """ - LOGGER.setLevel(logging.DEBUG) - LOGGER.debug('Aggregate sizes backfill: Began execution') + logger.debug('Aggregate sizes backfill: Began execution') while True: - batch_storage_ids = list(ImageStorage - .select(ImageStorage.id) - .where(ImageStorage.aggregate_size >> None) - .limit(10)) + batch_image_ids = list(Image + .select(Image.id) + .where(Image.aggregate_size >> None) + .limit(100)) - if len(batch_storage_ids) == 0: + if len(batch_image_ids) == 0: # There are no storages left to backfill. We're done! - LOGGER.debug('Aggregate sizes backfill: Backfill completed') + logger.debug('Aggregate sizes backfill: Backfill completed') return - LOGGER.debug('Aggregate sizes backfill: Found %s records to update', len(batch_storage_ids)) - for image_storage_id in batch_storage_ids: - LOGGER.debug('Updating image storage: %s', image_storage_id.id) + logger.debug('Aggregate sizes backfill: Found %s records to update', len(batch_image_ids)) + for image_id in batch_image_ids: + logger.debug('Updating image : %s', image_id.id) with app.config['DB_TRANSACTION_FACTORY'](db): try: - storage = ImageStorage.select().where(ImageStorage.id == image_storage_id.id).get() - image = Image.select().where(Image.storage == storage).get() + image = (Image + .select(Image, ImageStorage) + .join(ImageStorage) + .where(Image.id == image_id) + .get()) + + aggregate_size = image.storage.image_size image_ids = image.ancestors.split('/')[1:-1] - aggregate_size = storage.image_size for image_id in image_ids: - current_image = Image.select().where(Image.id == image_id).join(ImageStorage) - aggregate_size += image.storage.image_size + to_add = db_for_update(Image + .select(Image, ImageStorage) + .join(ImageStorage) + .where(Image.id == image_id)).get() + aggregate_size += to_add.storage.image_size - storage.aggregate_size = aggregate_size - storage.save() - except ImageStorage.DoesNotExist: - pass + image.aggregate_size = aggregate_size + image.save() except Image.DoesNotExist: pass diff --git a/util/migrate/backfill_image_fields.py b/util/migrate/backfill_image_fields.py new file mode 100644 index 000000000..184cc8a42 --- /dev/null +++ b/util/migrate/backfill_image_fields.py @@ -0,0 +1,87 @@ +import logging + +from peewee import (CharField, BigIntegerField, BooleanField, ForeignKeyField, DateTimeField, + TextField) +from data.database import BaseModel, db, db_for_update +from app import app + + +logger = logging.getLogger(__name__) + + +class Repository(BaseModel): + pass + + +# Vendor the information from tables we will be writing to at the time of this migration +class ImageStorage(BaseModel): + created = DateTimeField(null=True) + comment = TextField(null=True) + command = TextField(null=True) + aggregate_size = BigIntegerField(null=True) + uploading = BooleanField(default=True, null=True) + + +class Image(BaseModel): + # This class is intentionally denormalized. Even though images are supposed + # to be globally unique we can't treat them as such for permissions and + # security reasons. So rather than Repository <-> Image being many to many + # each image now belongs to exactly one repository. + docker_image_id = CharField(index=True) + repository = ForeignKeyField(Repository) + + # '/' separated list of ancestory ids, e.g. /1/2/6/7/10/ + ancestors = CharField(index=True, default='/', max_length=64535, null=True) + + storage = ForeignKeyField(ImageStorage, index=True, null=True) + + created = DateTimeField(null=True) + comment = TextField(null=True) + command = TextField(null=True) + aggregate_size = BigIntegerField(null=True) + v1_json_metadata = TextField(null=True) + + +def backfill_image_fields(): + """ Copies metadata from image storages to their images. """ + logger.debug('Image metadata backfill: Began execution') + while True: + batch_image_ids = list(Image + .select(Image.id) + .join(ImageStorage) + .where(Image.created >> None, Image.comment >> None, + Image.command >> None, Image.aggregate_size >> None, + ImageStorage.uploading == False, + ~((ImageStorage.created >> None) & + (ImageStorage.comment >> None) & + (ImageStorage.command >> None) & + (ImageStorage.aggregate_size >> None))) + .limit(100)) + + if len(batch_image_ids) == 0: + logger.debug('Image metadata backfill: Backfill completed') + return + + logger.debug('Image metadata backfill: Found %s records to update', len(batch_image_ids)) + for image_id in batch_image_ids: + logger.debug('Updating image: %s', image_id.id) + + with app.config['DB_TRANSACTION_FACTORY'](db): + try: + image = db_for_update(Image + .select(Image, ImageStorage) + .join(ImageStorage) + .where(Image.id == image_id.id)).get() + + image.created = image.storage.created + image.comment = image.storage.comment + image.command = image.storage.command + image.aggregate_size = image.storage.aggregate_size + image.save() + except Image.DoesNotExist: + pass + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + logging.getLogger('peewee').setLevel(logging.CRITICAL) + backfill_image_fields() diff --git a/util/migrate/backfill_v1_metadata.py b/util/migrate/backfill_v1_metadata.py new file mode 100644 index 000000000..be7a37c93 --- /dev/null +++ b/util/migrate/backfill_v1_metadata.py @@ -0,0 +1,67 @@ +import logging + +from peewee import JOIN_LEFT_OUTER + +from data.database import (Image, ImageStorage, ImageStoragePlacement, ImageStorageLocation, db, + db_for_update) +from app import app, storage +from data import model + + +logger = logging.getLogger(__name__) + + +def backfill_v1_metadata(): + """ Copies metadata from image storages to their images. """ + logger.debug('Image v1 metadata backfill: Began execution') + while True: + batch_image_ids = list(Image + .select(Image.id) + .join(ImageStorage) + .where(Image.v1_json_metadata >> None, ImageStorage.uploading == False) + .limit(100)) + + if len(batch_image_ids) == 0: + logger.debug('Image v1 metadata backfill: Backfill completed') + return + + logger.debug('Image v1 metadata backfill: Found %s records to update', len(batch_image_ids)) + for one_id in batch_image_ids: + with app.config['DB_TRANSACTION_FACTORY'](db): + try: + logger.debug('Loading image: %s', one_id.id) + + raw_query = (ImageStoragePlacement + .select(ImageStoragePlacement, Image, ImageStorage, ImageStorageLocation) + .join(ImageStorageLocation) + .switch(ImageStoragePlacement) + .join(ImageStorage, JOIN_LEFT_OUTER) + .join(Image) + .where(Image.id == one_id.id)) + + placement_query = db_for_update(raw_query) + + repo_image_list = model.image.invert_placement_query_results(placement_query) + if len(repo_image_list) > 1: + logger.error('Found more images than we requested, something is wrong with the query') + return + + repo_image = repo_image_list[0] + uuid = repo_image.storage.uuid + json_path = storage.image_json_path(uuid) + + logger.debug('Updating image: %s from: %s', repo_image.id, json_path) + try: + data = storage.get_content(repo_image.storage.locations, json_path) + except IOError: + data = None + logger.exception('failed to find v1 metadata, defaulting to None') + repo_image.v1_json_metadata = data + repo_image.save() + except ImageStoragePlacement.DoesNotExist: + pass + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + # logging.getLogger('peewee').setLevel(logging.CRITICAL) + backfill_v1_metadata() From 7bac04295488922ad21e6be1f2717ffecd1c36b7 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Fri, 23 Oct 2015 15:49:31 -0400 Subject: [PATCH 10/19] Fix verbs for merged changes to image and image storage Fixes #698 --- endpoints/verbs.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/endpoints/verbs.py b/endpoints/verbs.py index 6201bd897..4e67733c7 100644 --- a/endpoints/verbs.py +++ b/endpoints/verbs.py @@ -22,17 +22,14 @@ verbs = Blueprint('verbs', __name__) logger = logging.getLogger(__name__) -def _open_stream(formatter, namespace, repository, tag, synthetic_image_id, image_json, - image_id_list): +def _open_stream(formatter, namespace, repository, tag, synthetic_image_id, image_json, repo_image): store = Storage(app) # For performance reasons, we load the full image list here, cache it, then disconnect from # the database. with database.UseThenDisconnect(app.config): - image_list = list(model.image.get_matching_repository_images(namespace, repository, - image_id_list)) - - image_list.sort(key=lambda image: image_id_list.index(image.docker_image_id)) + image_list = list(model.image.get_parent_images(namespace, repository, repo_image)) + image_list.append(repo_image) def get_next_image(): for current_image in image_list: @@ -114,7 +111,7 @@ def _verify_repo_verb(store, namespace, repository, tag, verb, checker=None): abort(404) # Lookup the tag's image and storage. - repo_image = model.image.get_repo_image(namespace, repository, tag_image.docker_image_id) + repo_image = model.image.get_repo_image_extended(namespace, repository, tag_image.docker_image_id) if not repo_image: abort(404) @@ -186,8 +183,6 @@ def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker= uuid = repo_image.storage.uuid logger.debug('Building and returning derived %s image %s', verb, derived.uuid) - ancestry_data = store.get_content(repo_image.storage.locations, store.image_ancestry_path(uuid)) - full_image_list = json.loads(ancestry_data) # Load the image's JSON layer. if not image_json: @@ -202,7 +197,7 @@ def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker= # Create a queue process to generate the data. The queue files will read from the process # and send the results to the client and storage. - args = (formatter, namespace, repository, tag, synthetic_image_id, image_json, full_image_list) + args = (formatter, namespace, repository, tag, synthetic_image_id, image_json, repo_image) queue_process = QueueProcess(_open_stream, 8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max args, finished=_cleanup) From e0da666b5bdb4742c38a1a6406fd1ed1d1feebb6 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Fri, 23 Oct 2015 16:05:43 -0400 Subject: [PATCH 11/19] update apt-get dependencies again --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 90a17c9fd..4c5ad98b9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ ENV DEBIAN_FRONTEND noninteractive ENV HOME /root # Install the dependencies. -RUN apt-get update # 22OCT2015 +RUN apt-get update # 23OCT2015 # New ubuntu packages should be added as their own apt-get install lines below the existing install commands RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev libgpgme11 libgpgme11-dev From e0d715024c6597178259d5a3cb04eb49cdd28d9b Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Fri, 23 Oct 2015 16:39:40 -0400 Subject: [PATCH 12/19] Fix typo in test --- test/test_buildman.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_buildman.py b/test/test_buildman.py index b58eddb45..ffcbc7071 100644 --- a/test/test_buildman.py +++ b/test/test_buildman.py @@ -224,7 +224,7 @@ class TestEphemeral(unittest.TestCase): @async_test def test_change_worker(self): # Send a signal to the callback that a worker key has been changed - set_result = Mock(sepc=etcd.EtcdResult) + set_result = Mock(spec=etcd.EtcdResult) set_result.action = 'set' set_result.key = self.mock_job_key From 9da64f3abaa66b468d95bf1c107747a038a30269 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Thu, 17 Sep 2015 15:23:33 -0400 Subject: [PATCH 13/19] Stop writing to deprecated columns for image data. --- data/database.py | 4 ---- data/model/image.py | 21 +++------------------ endpoints/v1/registry.py | 9 ++------- initdb.py | 3 +-- storage/basestorage.py | 4 ---- test/data/test.db | Bin 847872 -> 831488 bytes tools/migrateimage.py | 1 - util/migrate/backfill_v1_metadata.py | 7 ++++++- workers/storagereplication.py | 3 +-- 9 files changed, 13 insertions(+), 39 deletions(-) diff --git a/data/database.py b/data/database.py index 771cc57d7..765a4948e 100644 --- a/data/database.py +++ b/data/database.py @@ -487,12 +487,8 @@ class EmailConfirmation(BaseModel): class ImageStorage(BaseModel): uuid = CharField(default=uuid_generator, index=True, unique=True) checksum = CharField(null=True) - created = DateTimeField(null=True) - comment = TextField(null=True) - command = TextField(null=True) image_size = BigIntegerField(null=True) uncompressed_size = BigIntegerField(null=True) - aggregate_size = BigIntegerField(null=True) uploading = BooleanField(default=True, null=True) diff --git a/data/model/image.py b/data/model/image.py index 82953cd86..078875417 100644 --- a/data/model/image.py +++ b/data/model/image.py @@ -286,24 +286,15 @@ def set_image_metadata(docker_image_id, namespace_name, repository_name, created # We cleanup any old checksum in case it's a retry after a fail fetched.storage.checksum = None - now = datetime.now() - # TODO stop writing to storage when all readers are removed - fetched.storage.created = now - fetched.created = now + fetched.created = datetime.now() if created_date_str is not None: try: - # TODO stop writing to storage fields when all readers are removed - parsed_created_time = dateutil.parser.parse(created_date_str).replace(tzinfo=None) - fetched.created = parsed_created_time - fetched.storage.created = parsed_created_time + fetched.created = dateutil.parser.parse(created_date_str).replace(tzinfo=None) except: # parse raises different exceptions, so we cannot use a specific kind of handler here. pass - # TODO stop writing to storage fields when all readers are removed - fetched.storage.comment = comment - fetched.storage.command = command fetched.comment = comment fetched.command = command fetched.v1_json_metadata = v1_json_metadata @@ -347,17 +338,11 @@ def set_image_size(docker_image_id, namespace_name, repository_name, image_size, .where(Image.id << ancestors) .scalar()) - # TODO stop writing to storage when all readers are removed if ancestor_size is not None: - # total_size = image_size + parent_image.storage.aggregate_size - total_size = ancestor_size + image_size - image.storage.aggregate_size = total_size - image.aggregate_size = total_size + image.aggregate_size = ancestor_size + image_size except Image.DoesNotExist: pass else: - # TODO stop writing to storage when all readers are removed - image.storage.aggregate_size = image_size image.aggregate_size = image_size image.storage.save() diff --git a/endpoints/v1/registry.py b/endpoints/v1/registry.py index 636da6f30..3d049c757 100644 --- a/endpoints/v1/registry.py +++ b/endpoints/v1/registry.py @@ -521,16 +521,11 @@ def put_image_json(namespace, repository, image_id): model.image.set_image_metadata(image_id, namespace, repository, data.get('created'), data.get('comment'), command, v1_metadata, parent_image) - logger.debug('Putting json path') - uuid = repo_image.storage.uuid - json_path = store.image_json_path(uuid) - store.put_content(repo_image.storage.locations, json_path, request.data) - logger.debug('Generating image ancestry') try: - generate_ancestry(image_id, uuid, repo_image.storage.locations, parent_id, parent_uuid, - parent_locations) + generate_ancestry(image_id, repo_image.storage.uuid, repo_image.storage.locations, parent_id, + parent_uuid, parent_locations) except IOError as ioe: logger.debug('Error when generating ancestry: %s', ioe.message) abort(404) diff --git a/initdb.py b/initdb.py index 29b151601..33b8e2b5a 100644 --- a/initdb.py +++ b/initdb.py @@ -88,8 +88,7 @@ def __create_subtree(repo, structure, creator_username, parent, tag_map): # Write some data for the storage. if os.environ.get('WRITE_STORAGE_FILES'): storage_paths = StoragePaths() - paths = [storage_paths.image_json_path, - storage_paths.image_ancestry_path, + paths = [storage_paths.image_ancestry_path, storage_paths.image_layer_path] for path_builder in paths: diff --git a/storage/basestorage.py b/storage/basestorage.py index 9406fffec..0b822b054 100644 --- a/storage/basestorage.py +++ b/storage/basestorage.py @@ -19,10 +19,6 @@ class StoragePaths(object): def image_path(self, storage_uuid): return '{0}/{1}/'.format(self.shared_images, storage_uuid) - def image_json_path(self, storage_uuid): - base_path = self.image_path(storage_uuid) - return '{0}json'.format(base_path) - def image_layer_path(self, storage_uuid): base_path = self.image_path(storage_uuid) return '{0}layer'.format(base_path) diff --git a/test/data/test.db b/test/data/test.db index 762ad4378cd754cf9f9eac84483c7c46ca30d4df..5e5246bbaaf99199d204c2e60c712e38757bbaf6 100644 GIT binary patch delta 21368 zcmeHvcXU-%_W$d7@7?oSLJ~+IB=ACLFS+-gSE=c}_k@;LlR!u#p+g8VI-??BkRwPJ zu~Ee#7OGt=fPNhd795?iq3Fz5#sc4c?khlWbiT{Ae!qWoEw5&ueLiQOz4tk1?|trl zE6(E;=lQk6#V@RjZ-`M%QSJ|$qj*U%Ufv*kSZ0viCy5Yui~cM+xGpV5EF&A&iIVI{J2)U_0Y3oi&EX|QiVk)CW>ze>r<#V^qo<2hhmuXGQ~KNNHiq!A-!xx zk68Bu{2_Wm_k(V{_FL@}+SS@x?MTfRnnyLOG$ooC^fz<>EkW7Hz`V`e%XBi68KwGF z^;UJW+N`>)I;FZp)u0-u{6=|Fd8@KUIU?-yut&mf2rCX7qWDm;U$Iz`rO?aI$#=^; z^Z=y%2RHCI- zy<EEbcQvzSc|j|-ce9^Q+2r_Ih; zeK;>nR3(zhlq8`U84czs;pKTF4^73n0dn86T}D0sa(!k_{D)8D96yz84?~HDxZcak zv46g_3ugy;Pwz4s#;rVCG3nFVR6dJRr2w2d8yWS!uVtdOBVXY&1Ju5RdyM)S^@Bw# z4_4xg003Fu0<5E2zTIJY;p8Bk?kAI5)iBzaPx}8bVeh6Jaaw@6Z!;hyKlWj2S_`Vf zsR7>A4DiOh7x`J@)<^R2)FAH{_ZSTd$%<-a^WWdaQvy^nYZ^@1a*&TL7?+)hCkL5R zcN-0S*^8fKcm5$8PYP0-fr_RjAwyyFJ9uIc;+eSsk+}8)VkY+}@Pr`qp83nym=93n*28cr>+mPh-)234xghYx82}t}X6@~XH@h!mHb}P30yB-2 z=ErfeP2b>@0LI!o03)VuSDo>LzenNZAoD@0!^IskJT3@vW;!xz)i17Isf(NSXFQgY$>KYiB!l+N=eh=+>QUk` zLDm9l53^X+nEP_-3_LoByXAhPA!WzwFQ)Hzmg1x!v#kwqCm*|Ybaltrt$37xyKfVd zq#c|q{Vd_qoWJ5kfmU-Dj5VTZ_9}MVbLH3=1WkLus5fP9Hcf5Nyn#phsbuJ8&~J2c z^y>KSH_gH$0@Ra0H4N`8v5r@?{tYJt5so}yG^k>qJBd#8bm8~_l{{V#>Xkh7!|l>% z8+bfC0I_2$Kuj2|jW3&%{cju>WZqnd67^3jOyeS6>%_x?zzq)?wL>o+y)=5|Urynn zl-jpi3bM&`b>L_hWJyD^_xE4Cc@D{YBf+Nug-X@di=OkI4Vf(*<;iv z6xkw@w-v?U$N=o}^)M~Xo!^yZPr3~c3bLLBfGOr#_L``;k8wnh`u!fG*1EoQS7Gzy zhj2Kh_D$N5WUy-`qz>6< z)S5CDt(ZD<{@*bpbU(i_$uOz^)xA~XX}4f?ka5mFP}I&r9j5(P?WC64$V#&B1{@~9&f1)07<2Kl7bhH+-+&cCSlxc3L0`Sb7xms9 zw_$mZY6t2d^9{p>=B=8BWkITQzfqr*dZXR?&x^5G>ZkS z^{OK~XIb2H6q(olx#+q+{m|zfhS=hzMK^92 zAKosx!`s7ou#a_F>?W6mwVTW?mN(hG7Ke$oSS(hXjkQ}{ZddoB))r@Hqj#LU)9dVN z>vXksS+eR%aAq#HS7+817Nr++>1FmxYdR3>GfVQ)t@Txz1$kU4^e@Qb%;g0|1sqqJ znUld)+v;uAbwxbOSxl^>$Hdv}|~Yw@yPlhfj`n#>mLGC5e@W5U?!h6Xq1aXGID+P0w8OU2}ivU4kIN^&hZ^_H?6 zM}@^wQJrS3%c`~&*6`)I857FQTm_q7Sy5?a)5@~yEatMBw7mS%3T)*IYf9M4fSlKg zX||iq7V?rp8r|b_aS-dcOgzWBA+F(Alf!Q1OkS_YYjJ73q1o_0?6jqEdUM8N|%DX4Vu|nahg{(rYW~aJJQ6TvlSutjo^HEvYN5v*h9` ztG&opQCm>^^ZGOMHr`70XVifH@YVXWx-2%qrNsf zJv-ZB&#dQiaw_N_`?C7u7-U2sH>Ad$pi;tAn+8tq?7P8-f;Sv}`t;O+Iid3+T?l`mBMaNTsxc zn61T`1r>#PS=f?W-Q3htSCF4u3! zCFFz(TsWvb5DMF2EWv>iR3OxltA(;z!G$o(n%pi3@XZe1Y;y5Fi1OXM&Fir6@Gl43 zgU8z$&|F6`UpuG0D2sJBXSnK1J(U(H?vMRTmfYPAbOCMp9ahF!vMVmHfqJ#K6>@jg4)+3m%!u{mLn!O%D&>r4s@sxC?drB7YvolKUWec4p9elnuxjfr9zq+m1?rtr!WYx~+ zo4EAO>Xw>3Piu1eoa~y`Hp?#xYEQPa7PCb#abnm&Uu13A>)_p-iSKdReI_&S;7oK4 zn(StW&u($rtw8Q{wtCv;cFzl`G{a)fES^_0-|WV<_M%R%J)<71mQ|ltT#Y?kXUqKg zm93pNY_G^^tesuAAip>{uZnMNGyjsHJ!X5dg=cxrOr0@F0g^hd-Z(6FFDy!%)#USW zpi&!1>vY-RgupJh*KGHg9X7MC(>t$;$~n&2+Sb+N?G*No90ylssV~R%g?w67Wj&W| z&$Z|9IcaQlRZdZfHN8IFQJj(8la^jt#?_cJ`0C2~+6+q;pXMkntgGfTtFK=-AS#C; zd!(YK$U)TMz4ED{1&*z5Z1%!NY~j3Sli6;g38vF+vO2KK<$*-g!8${t2E4S}ytu2( zvmmX-T&|a;mif{b&bAh1)#TK$c6+1Qxo}|zk!z$Q zuB;q;GS4~eHeufxCj+ryCUR|`R=^VitZGyFwd7&@%#(&Rd*L zJGNOo-uYclSBrO?r%SMBdR;*+n`N)9uFtlm7u$2oGYd1*_>zK*@|@Zdp>upLa=`37S-JI?%^cM5JZhKBz zc44)xX@R9ZeU6XMYc4OXt!!(`EbeHx@{YOWREcyLiPK0`hR|+kOSbZ6E60+lI%!lB z&$-RuJZ_WSXNT~^;k24SPYyUh95xr6D&S$hpuPfDw03pP&&x}<*W`2M^O}p=%L?Xn z7L`<2q}MMj^49tCYg+S4Q}TVzxtZ-gdu>xudehuCCpoHUinsSg~5PKr~OhDtc;Iw8&Dq`6M9LGSj{a5g(pAINnnyI9x)s_? z?dAUPRH;|f^IPLKeU_|OBmT9p>OV$RJ%O-FC&2vHAV@2q{7Q&&O@uP&=Ry?nr()@n z)cF6|I;PXg~`9!OiPv(KmsNf_i79}>HpnYJ=X@ofCh=1 zfTorHR-*wJ9`*vZ2WlEVF+gT4p1tyQr3IPmnc9`rQn+tLvyN&0qE;#SJo!<7g z2~N-4=GHFnysoacIo{T*;v`6}{R#zk$6mRdzU4estzYC(FAX z9@Ye@B#2;fK}HLQt;ub3V9xIIIath-^IPRyPgD2!c*x!>yzTK+fIu2=cEp=l_-DlU z_z_)gvpmi&?}!QUBPs`S^DIsJL)^=*ngeI?Hzh-4X~*_nnOG&N603UUX-YGXJq}Qj ziFXa5As!FPfvnAK@;cqD2hL%e)x(qfx5=~V8F~eWb8y_@?ea4z>@{QxFBv9HQ~mtt zLa#IOhjoSjg;FF*(kj(e^8H8g4xg7V)b&6LXX72b)x=qBkm*A40`%+wd&7l@*=Mmq z>A)eAq)3z^ag_@8B*cLyz{QExgv}N&bbyG>?zCG>ZU}6h5EFxO>{4lpSm+=YiIK|! zc`k4Fnpj$(Fq>@j0(*0ioLijme=MX%9+#VQ z*&yGvcnNn@F<~U@d+_n0Ep z2r~tJnW5g|gvj3vx`ZIo<1x7{E~mrC^PJt~BxjE)l8j(atIy*DIO-T?hu3a`NZbz2 z4g8=IuY)s_p~n?tjc$+C;r0TPoA-ette|0+6YODUIjh-WXRWYS$?W5b5k`j%N>Q+8 zAjZcqy4UUkoq(>*Afb=O)n2oQ95}8RZ}fN|KezC1@Btny3udAfHV!r)P>LIJE4#yd z_=G}nK=y@5_eH;f6@RD;*Nf#Mu`)p&T0qvrE5a`8`Y&HrhUukWigf)62Nb87L-Fij znO-LoC#bGs-OY+qcc+T=4=XR~LhBXkfN*)p!rrAqOuvP%z?5E{wqyEe;zGerPj$Not`o=KAi%CCVtWqEN|Q-*mE2S$RD**g;NG zi__6`!AFz_syiqqOeB>Wn)pCG))|`<#o->FE^`yxmh)^x#!x!l>^(a zZ4SGpd7xTZ#Wl?X70SwgrnwQS&H=Y#W!JP1)GjOi<>qS-k%7JYFYj00r2QFf$IHZN zV&)h0{c8;BUt>^RVd#N+yHLlzR;|h_96!$xHh|%J%@1I>UiSh+&y|T@#~~X4F;Ii7 zbhu1(m1kbpE{TKoE64bC?P`&zUwT5>@-tS5LL=m0hF`LSiuqL*jrvzvR4QO`|H??p zUxiVI4P+T;5mI(q8DYIn$$I&~j@Pru2nT)3u62!T>kSvY>T#H)3jR4-bLF%Huy_+&*<~7G&Ade1$R-_V; zbkI5@QeD``-!)rC?~_Sfl)6CgQVOlHVw})BPQG#~F@3|Nl1HP|sPEmonnC=VKT7nF ziO~QQ8zu}B4M6Wjt4qQ_Bs-Mw`+ECg`rO-Cz({a zB*l`V==*&0Fl4+X{D#1clY};MY>2vCEt7`?jfnw43%9rsy`m1W#>c2r!;_Y+oFJp{ zDk}3VDstKrAoAQ;HPd%-OQuG&cB^J^sE^R5AF3|b&XiJk6^NhEYm$-nq3QyvRa@VI zjZ~|psCmYX=dbkp3NRNBQB{tFt#Ws2TBd{is1bOD8@;TH4eF z;>XD_r#km=yZSg&_CthzaEeMaOd^)SJxmK^eDk_Eo9Grz6>Ui#*kTp^$=_lc*kTdg zB(xCyQgw6>?L2NE3l`nr?>u&33ky1@14hAxU#$2lQ5-#f`se|4`!YX0X#m}}gwl0+ zQKJSlSQiNmQHcZSmIVSm+6W?DJut_23k~GrQgu`;Kt_!i(9_)MXNX^>j+{;z;s-GB ztpY>DaFr<158d9`=J6m9P#YPi5)HX}JU=;N*fqQj4CN*3mZ=AUMzxW#13FDt30N@$ z$TdnJYa)jXAeVU@!5c93cY_38eMqeRXlGz+TG3pA@+>^{^`Kggts5qHTr9>1l zC`6=l948PXBL)cICr5>c^fa%_+1T9LIM$D02w}|UliAUf$VBJ^M5I(w}I_b=;{b{2%Zk^ zCq}9K%ssGP>`*g+7cg`Jca$=O5}25u9uXEofr;@NKeXa%A{QXauO_kqqAWzDLMSCv zvPh|F%C!#Ur-w@@vx=@|%1mdc9t0(rA%2>FeGAmXE7T`bq%isZr}nEi4;TK=eOld6 znwOcIS65!a!h0VT#r5@d1>?jDk=SBVNoLP;H??(krUvgy==FGgbx}omj=iKXyW9e| zV-ASbL=xTWBHin{3pCAqK{sZQ)Fd8=DT$DpR0I1SW*J5N&?iJXo90clkeLWCza}cf z6g%a8vT0J0_!H3+eFk9l~d;T)J zH4t}@_kBz{No``v)G-l)0Fl6ZR})j*clnMaS?f;5N@AM;GXq; zF^ZDW_a{lpY^Fdz%^-{hF>FHbFd7XtOam32n$2kYp4i|*rMu$S2g4nI)9N{(pj5qp zOe5n@<}lR|IzmlO2pvruUPL;W&xXN6MPyA2vq|sPT^YedN6`CwSC{C(LrX{uzU*ef0cuJg6ub)ISm*NI6CDl8$1%5rQ!ld4dpU~5Vu zS$-EYmMPh*I2hFPjv*+Woa_hQcp7yM~EE^v&Rq zt|*5AV%zB>g-Lxz;M7THQJK~}5Bj;UiwT!e z>_9jhLC$PsqGZd?riBr8H)zX~a0OUUtFxO)Me-EhmcrXpFrSh@=5Ar;%4W{|>MU72 zA9^&z3qAa)0UhY=935w=ykERmK>7aQypXA(f^mJN zTZYj*K`RnH*K+y!(9G8?1o3AqV#;-qaY8RTcVYFN{RmnvEn;+p-^Rf56Wx~i)%~G< zLhI7SOu4>p*cCwqpWLwoI>s$w5LvqoRM*z?*;2A`33#}5s4xQc|H(_4V(Q^MX}_Ie zsTa!1*P51-HjMhIDYbk$#us zEicW<(7~kRZUCN=9I}D1MV;IQ z!1cSp1~WD=ut$lWy|Caod2|=EN$WOUQM*Y_ocA!r^DzcU^&0y8F%U()dOLngxe{J_1X z(x(k^Pr47JQ{PX0XKYArK_SQg0xb{T4|4CilZgSjZ@=k{TgiL(gI`Y@b4Bi9)O=?u zVa(?rV9(i^h;MOVDJH=AJ_vo+3Z(1=fa9L21a2eN4H)O4l%HSkIKm z8oyR=CXM@$p!@-xD`!dJ{vV{bkKj^|1Onx+z%z6ZbHdzJUvfu(aPKC~Wlu07m+v#2+K5>40TF8%t z)^{HTubXEP1S^0sCOpQ>CM~y9mk{r6edo0hcj{w+HSKY5iBhvLN>)%cFPDKQ{rEVO zKn#7!ZosBQWD`EC5dhV8c1+$*wu2RE^OE4Czqi*7vlIwH*mv`OvA7G_B! z3{5yHBXgA~L$)O44H?-J4$W;@0;I56MWC7FA4)VE`k!9;2_}hyfMHIiz(B*J=Lex_ zWV;H{%F^lBf2EL3k-(6cAq+s5`jjXn92X9{Eeq!baK3zOQ#5%#3OFOv1x~uK=0>AR z^01mZw&=dHo%6}Z(ZEobCX7X8y>&1uC*cfo$(A>M`V}b{f;KVTsX{Z|y~&qD(9P5g z4w&>*aN~=&weKdUVt^}Vs*Wz--Z0>5i$#AVlMtPfqXWQP0v&`Pl&)3sZ5*1Updy%9vhG0y zN3`V5AMVQ!nO9)iKO9vL6ORySKh_RIN0~bHVCBPM8x&2le@T~0#z4baFz-XuybM_# zk22M=3SpA;^g0)hO2|WZqp?CTii}73uS4MP$0MPfR$4Ad7INaYH((~oBao4t7o${g z;<|&wTFAB$Xp_FXOh5)Fh8i06p4FcM@1I7Z1kxx$jletU#WJXd8G*N>RNw_K4rpjc zBFtEsh~h}36#0PHnlS7iWMd*&ZCr^U0(GIuqtFbonY7)5hLHGskOugMPKH(T@+eSZ zY_Y%>I`)pz0J=X3(PIxz@(v}6g8lm|Mf!80Sqdx@N27ARvq%sKyezQi?f4pczcU(z zlT|Xh=ZXG0?v?Xo<`@`gN}(_iT_(bwH~cMN%^8byQW%JwDS=6Bk5wDdLHW#r&`<)? zg|Vny?JN+6qF$Ig4!m%m9L<(3{r3KSL^mE(Gb3MUhA<=$xn2g9tHV-kI3=Hkh<)+v z#|uF#%9(WHUm<+TMUI&OKQ~Vhk6Jh;85Iy|7)p}SPcsRDKh=@BLFvhbWK;3jIuukRrTb-XQR3obQ^Te|1Y1iM*oLTS;4mpRKI_O`uj(yG8y^( zBh=qNLjC21;td6ZS?!&!;AxYM5kYQ-yO{0>wR=(C21$@yW`#Rq^(Id0ghRJ=II zI@JfP@;6UsD%mf-!;1t~=Yzm1Kex0y~R+%&Rt+&y z_F(r}wEKkoXI7Wp_1C9yo4`6L4OpkFzx7nj=BHzDYmgN+!cPJWI}tZ@%s8lPTBL>v6^Ea+>KiT)Js4$SYP=myYR%1$#_l>VkSTsM!R0wZoj!c0?!VT4}uVS zr8s{VW04HT%>j%XGXVpCCuz++xkZGVg3NDGX5FZ5Ns-oYDQ*lfzn6s)wG(H`8!LuQ zcoO?4m1Jjw8kDoYYGw4IbT{@%%TQ*JnthdlxeE3X;=WfgGDScflwz|0bRpB%gz?`VABFa=I!+>}z;NfZP|q1Aao}!eecg z!FCIt9;D{&FzT&0wbT^WaM^g8pGw??F#RD}%iBge^Y6m-luAw&f@soxdsfSnj?Kb# zf&N4bKW<^*KKReP&6c< zy+726<(KKD?L+df0 zn4w_vK>^LzMLTN>(n!ArZPLU~3(X)&MrK-3C9tLR}{h2i_abUg3NG0 zv!_?5AXI}rv(~OAM<#-f zywyS%p(rL4bGXSM^zKPe8AvJ-rvA*7DLrJ&WSIK!s?aQwAP~!;iKebH-ILA~lPQq4ZuvF~)7ipY|Hf|_Ij*8pV{p$1b^^A()nQ8Km znN`eQ<}b`;G#ZsDCZWY@uOb}X9kv&}h`!duX>t@^O}l2J<_XPb+CkdMVJfXpyGHwn z_I;gNhjr6+f6(pIy?*G{?eEHDW299Qa&bE{_TOHGUYF6GXiVQ1_l)V2?HSV#zaS%) z_5W=qiZ=`s+eJEy<~7v8@al1@n6U2@YvjYFuS(p|@DX_IbF{RC2!8}kjnJgbT{*f% zm|YH?opAfH24)wx1|`CeXVD4_RBU3N{u2Cn4fW%>a|GQ&$!J3dY9u#hAaHEa2mkn| zD&!*K@4-N^o!}yQaCrblG8;e(WtTOdL(7p)Gz_56mY`w&qRZoJYC9;x*S?DWmJcNc)e&dWy8 zj3qodc*#Z9E`XcCyB4A{z051fAS}mWbU8L`0_aJLkcynn1~;cC64}2973d~;1W=&_ zMQ`6y-UD6aEe4%Rl2no zW?Hca%|HvC;SC|JjQ;>MHgP5Rm1lks3U*6SddGSg^zD^UF#To^nj)id7h!Lpn@6_* zD%1#t0_XHq!1=-rXfO%SM@hl!oAqP=TuCZdp-oy>m(YQF3cMXieq05TjM;}gsQJ~r zRUt_(QAyM{f+WM|36g|7Yu}xK|KN?tKwjC0jIz}2Taw6UH&UPK4C;xjxe1kv?euyi zim?^p=m0E9y9vCi@Q>81Is_>~UbTVVoPPdCaH>1@gHzFHk2L%VoXXZ7MD-sVKb{B1 z@Bf4}pI`ognA0?%WgwuyVoN9;R8saALs0fUZpCFqlV=DHSSdP*U|4#G ze-e%vO^WD7Qnw4W$Y>5kqEDg%W@xY6uT;RAdlHp~!DS7UTg{;tW4otoZzUH`!t$Hc zE4;cYpo*v?o`!i-&{bs4J!lxB=~8HBv`q`Aac!^g=Bk*=I_*hRA>nwq;GHR(@>;Eh z40#IfE4;lDK>`q#9D53m#E#u4O-3^+Qu{PW*w8Bu&350@s6qxGUa^GT1lLgR57@P& z>lpw+y`tcZ$&hE^+F;kcuyxQ(i@fzLthd;npzDy7=TMmhUPyu07)Hw4vnxlC1JA*( zIdQpP3NW28xyy%9s#KG*ob103=0t7;*}BPW}C;0gXKSAQR#o zehTa#_X2Efo~1!}viJqq4-Var(h$wrLcBdMz$OEzvz7!=Y0LvobbK3t?0XR*iEzc` z4*_Jc)9>merk7x`F^m0E2QkjR1p52%0hl_?@yNQDQT`yh&nZ-mB3-oRar6UoPW^(a z5wiFS6?G(ye=Jb_yRV}%85p1M`;z!j6*{~lB$@8dAi#w;P&ud>`Nx6|d2ga3e}~d7 zfwxp>hELRQ32w#s6%LFIZ=q6h0+M>v@Z_0<5GvXVsAJE;3Z2yJe+QhpE7kpmx50P7 zuWp9x1S%1I&$+iRLPl!>#Uk@NsN&o-= delta 27152 zcmeHwd3;pW75|&eOfoa?%}f>uAqyms5J;HhE%P?kY$Rmg7lIIGO#*>H5?0ahC<^XC zkSn`O-B3iUlG-Y$wYJ3twOVbpinc0MD=w|O@H_X-mdvEu-_OtI_gB%*v+vz=?m6e) zd+zs~yRhXfyX7pmbF${luJmPzhEEO7m`TxF^l#}Cbe-C#q84kO)=VFYqjzhSl<`9^ zL?=DCQ}c+yv^C!tGa)NWvn>PFU#Z!PnvUq=5PQTpb?6@^1~nfsPJA$#JgbY&AqXOA z+%IBvTjuGl@4^2e&RD;-eqsI8`jPcL@-EYNrVYlIjT?+sgFEI#3>|%KG^>9`KS{S* z`+>G3>Ri;I<|WNE;#Oh}h``EBGv)w)jBXDrdN@|HxoEG)W*6*|&A|yyo7?V^-HgZ0 zavZH_Qc0qj;4(UNL$mg0qeGj?(xlnV&t2IV_egWklh39qGg2tEfMU}P_3qD(VQGf5 zF)VsKok~P6d`s4)Fg+LcE?^l6Kb4$hfAL(MPHz}`{hrJqMXZOMu@{kZlH>Q6zLZSn ze@1A?s)WkUi9QLkfnezM{aL8PLFObIZR5{Tul=%wAijFw*^TW!F28&bU$fzX;48=p=iRW-faA%6SvYGYH~mfB7WclMv8(Xu`p4zkIZf zl~Cb%GCfhg>Fa{JTfexrRhhBBEk2<7P}RX~G;=wbmXvmmZlCey(FypeWW)v49a0Wv z8-2>sHU{mQY)V0~*N}zDNzcD}?UY~Jp1N$u$xjq*>VD*Xcc(dE6Wj`)9Lq%Mai*MP!*|6xYR{6F z2;w%D`m&u0SOePN+Cy1B$!Xa}BaK3iOO_s7eX-VYm;s^rKYXrakRjWgI+TqTC73+P zlV=_ga)18jyN<1G|57xjpfPlYp%wRKqWj00N>h@=>eh_+^=FHkA33SaFosmu8PLb& zl)qEc5{mtZ^d{#_>gk;IrF$oSDmjLr)*$1u!&&INL{m=^Z)iDOG4HLT1hISTIo=Z! zPIN)D|7o6U4F}&qSLhn$nV}Ll1uvclS+o^E)m* ztYqjz%6)%0U|tc$Z6wot&CWjY+xyC_WuN1xqSXa81^YUV2Xqh-49tR*8}+vkFWdq{ zVtd)|XeC!0QdvLLeIyH=Nj0rZoAuZ9pEq>4a-Xv2` zBD>|Zq4c2|vhtOiKlEsVs=jz6AfmT8l_}zol&8MyUr>AS1HR$71D)YgM$Mui1<1|kbnx~_hqMp}wX+PH;(HH4IjlL=7 zv_UewVsxACB1_5lsRzv+mMH5A%dOS|!VUhB(ddTPDw?Gl{QHvbzwpsR$>g}=#AN+hBZqaM-@=%(sUN3V&Ajk(QGW6Ut#VH#)JN5+shQHf^Be5)nPvfFC0 z?y)y=g+1yfm6TQ4=T zVx3sBu!P~6!shnk(i*9qt0`XC#MZY|fxqExv^;2I_;iM!#o1?ZLLS3%Jj=wdC6uYw zSfZIB6tjUa7!8I(ti#Wda2+EQ(=mWsLRhN7Yh zx@b{LReK{Z_~jfeCL;*4jGl|uP8gJE*3B{=n_FTXHoFIG=yW+;HpXeUyIfwDr3F4D z)hf^W-oDPhUJ$H_FRo^qi@5fR*4l>VlD3Kpj;m;DFRvN;)0sDC+-sW_8 zL>uj})4bbbXI+vjB&n;nw?~z;uDYD5U)UttXylQos5llu&iD7ur3Gd4obOZtphm>+8Ki*x)5y_WxI&GP!!-Acg_! zag0r1onEiq&DcF&JGck8r_XP>R>9+`C~EGmYiQlLY14+TMz%|K)Y5CJR`>F9;p*-Z zXKAmaX2W1Rn^(lI^Ay(gdD_?Kt?Ffd&~kL1Ah9&3S}x1rZ({Hgz9x1Ff(YH*ZF385 z1}rD=Hc6B`HklJx$-~fIhl5|cYMrOA+uQANtAY-)nSG1;nHA2$xmX?OfMIybZsC{8sU87KImj~OJ%9gfTsi?Ia{6=lF)LK$j zLYMJHtUYqv(0R0MXMLjP>p|4Xihh1@fJ56$ZjZ-l^TGhJ*=ajtbBHiez}S%)krf>d zkq?UM@1<^g8CS;@yXiHZtSmFG{6#$#YYS`n+HPmUF-mIMx9679Od?QIdg+zxkU-j1`eB79XWRBp0eHJv9T^$=Di%} z5d>!ST4ztU%P&*k@@lEB#M|CqvUUKx;qtED;?mV?OX{RrVL-BX=Phb;tX#d&VUfJu zrR)m1vuzzyQL_>~WY%Vkl!<2;JEKkq8CsCZHnLS+jO=zfB?+PjJep-EOk6An0iVm| z;b9VX@Zc*w>joUo9#5`&KyA<3R)MLMi zhYNyFuK=Ri0kKOwIBdZs^56;WFm-v{-Thcb-&kfd`JS@M(t#Ciu2#lVwQit~tEiC6 z^VimtGG4i5O>_Nv4*X$zS4n$ObA3hqfYc%{=X%P}=~`_Xnq<}*;{x6gL}O_gh9sJA z)h2fF(1&Ozcs~cjdTn;b1MtH|J8ce+opynYF3!OO?B#F8=Cbyd!UY>j)|RcWlPlNv zulKHKtXo^v=y6uL`$~(~cvdf>tGqR$FfdTu-_z)DtZ}bii5|0R^Fu;Nc`^n?^6>Xo zZA@(Rcmg6u5gF38)Ahu9qF;j^Jff){-j=T2HrZskXznxr(z?Y`G8|u^^_T~LDn^T4 zpi|76AA?Z;5kU?5A(T~R^HTwjMdk7%aAXu388;RjL08piFD>}*Jkx)6OsJ$*TUhYl zjE5f@4#U5#)o#f9$<1`>WtI9ELZea4aaQgBeQOPl2E4#c=jUe8{?tYTW?DZpv-Ur< z-CzJqj%vKI+77ry`ME`Fe`*r~GmXlu#2ly9{^X{-FsL=9Q`R+>ddoiRBi0#~w;zn7 z=UN8K^w2IE?N1$w(SAPvx6L~k9?$6gjDBj%>im5EL!$=T^vB}^t)J0PZO^DuY-wXy zOG0=$)-3fyqw>W4+HY&7+bfF})h{frWaViN=&nlgxPtBUH%*mAbH z7@fMP%|Z(=YHOz#w>64oa$#jfiCDo26%g?j6|=QcbF;k&LZ+5_zM-hPxvjKz7)UeW8{#XN+`J-Z6WxLcxmJ;8d1s$zb?+>Pdu8{k0Z;$HK<^6As*!W3 zG>c_sF+v_KGLkHvpmlFpGyfa5R~G0_6zCsYI@Qa&T`rhfog83fy9jZY5ZP%=~#e3$j+qOX!tW_X8M$Y-VV28z%yl5`jn=K(t;Y` z2TETyQU}KODUas`NnjP7#z-`4jEcU{;B-g;n!J*Y@l1s5vE3=Lkfq>gn?qn+yi;Vo z9P34QAJLcM`7*?ZlNtHM&H5Me=~1sS`=~T+Ax{3`)Pow4hEctsf5=+(Ul=7STia|L zA>V%#@5EXC2J4{138=*(*&&te2Jqk&IGY3DvrUG)1>+SRUWR9NQTb5@Lep#nPfpPg z?>ap$uZ?CzXc&)Eut_xJ7FdZDppS?U=sUF9e2q$=Av6@jLX;yrL>mJM0Nj#bC5X;! ztOqD!>~`L37v$j!=k@w$Gey95E`fL099|bN$9TrUanSn!rqXV~%X?)9%6>F@P9}u3 z4DSFzS&vHsXb3q28Ddi#G>pgYbqG9!+GzWu(RouLF1ND~s|sEj0#=6zWmp$)V?;rQ z*i#Vfv=_bhXf&Vc;BdMDmKP+B20mk44X#DoG*)y|mrt zamg?~Tr9*!GNdj5rMqN1E!r6xUH@40)J(wW&>viG8|?x-YZrOYodH=OO6CCZ%V2pK zz4BP}oJ*V{$m5YEn^$l^J2*f>2V{?IoEIo%H|qi{Dn!Q+#taS7+zA;k$Ety~j#W`^ zj+Pm(#|>x?vIlk;m^K;KL!cU$9a<2q!?I}Q$>@Si+U4~)cmRi97#23WB!YS#*=3_a zV7HTlcD3{9#goz5nY^2Z@eg1Y+<_91 zk{=9%?mrbhGgI`sMJMlo+9d|^EG|f-$uz{FoJSO4BznCbhClImboBkY^Mv*Ma9phB zU2B|GqbH1pjNtgOpV$&}(WOLi`pAN?rXx_J4Ejq|-hKk)mYYOft(8NAu zIByLNRX$G!RYc)LtYOpa*x*o=@_1QfWps6=VQ8w0fmJBkA=?Dr3+5DLug&R(o-1)q zuN#t;k}RNtDnoK4c}=Y$5pAh5Fp;00s4_G~z6}uQQ9-q#F!KAZYD3YOZ*!2j#xNn4 z3Amtu0;3q}P-BfDGnNi71hULT57rp6BR_pmV`z+{kFYMS{o&7}=8p*K;fu!&+bv^QJ6)$K)Q~@5?;k5*`mqAWP=UeGb6Y@h zL#V{q1!BSqL|Q(qK&0)}0)rt%BTDE-vxu-Dt(~kRLOnC`d(UQd9KM3_sKkM)F6Cm`y)g#At*ft{9K#pR81AqxF667FhLU2Ymgg4o6?B6nm(`XQ zGxi#OVO?ECy;NJVi05UNZf;y;U(i}s-PXd@lyFTY#YGj&f~sl(uXKkYHkdp_J15G+ z7vC~GF}dKU*6@&VfBt_n*$h9HZ!BFBn!^Bk+Zg~U!{e?qF4K&95A(l74_|TpsP`bY z3=l&a{^ADX_$?Ej5AN7_-};XAuh!SBf3!Yt{XMKW7L!|Z&1Oj0X|gnh=($)^%Y-ls zu#ldD>C=wZB$*~e`_{QpU7TsZHHA_MCu<5b2>sUis5jnJfaG|SB|CxzBTBq1uj);Q zMT(W4RRg{SaMT%Z+HdM0Ex7)PP(PVqs*kLnf$Aj=#r=b9pzx~rrP{;pAC@uji zgmujZ>l~9RN&<+|p#xDa{)sG@tj(A6$7oVEj|TY1F(_^=$?5PCI{IcD$dO=F<*1@Ia@IxrDPNqtc7`N@%bGmvs-Sik*scNnry1H7V2<~Mo>7T z1Gj$Rtz-eZCdEWXsxGpe%=4kKRf#?T_u$;+l;Npx@x!|^koH$5w1|*pTT%yL}A#eZ%S+yB-c>Nz8o&=3h ztx=V+8gU~i8R%pO>^aCEiym;S%*(3z=S^(-)87J}k#Wi%m&3!e0_+KRP60iMKWu=X z->-n5r#j&0H;wS~WEuQCItPA!J^Tr5fT10xS_sl%!{iF$fXm72#;n!~j4#A?$ivi_@C*C*v0iL@w<4pyXkARg`?U_{~?b2I*u z7yd;cuJV1cg@56R?dlg4yV*2h5EJKy7vYGX`G|AEztGTnxWYtO`b^P`5Jeuu@2^Xo z9`;_k%=bP!?7euY`o27ITG$6+qmLjf>^;9;eLo=+L>gH!!>(07p!1tei78lwDPffM zK3{?K%S_`h!3EO83b3ox0tu6i#CRX;)qTBgHw6k^JARUpm^iXN-|K|5QDv8rxGdUr znQ0tY^xE+$VMLc0;rlh?lfz!O8r0Vl!(KPU;MWOB!7A3lWUw$=eK{fYlFy^d@XM%g;4_q~kuzoZNX%z26)b4JH6H;o62T$NxMMKf9bI??Q_R~P8d$vPmq9R9asJQXI= z1pH}7A{i73Mx}$5?6F$>KEV`Zk1O}ROf>om55jnn!2LUU$*Pu4Gz7W$74yAMhzW85 z#aOeCb@a%WjQ^#6yj? z?|kE1eS2by>4|(TC^`Jpanr%c>VLV;n3mO*x7AfOmDX1?_S&`u_WJtvTun5g;cdpK z<^8U%-oCzqz^W16C_qbMd3jaI!iu&grmlf$2RKVaSzje!Pv@%-#@+vnb;dZYO%u*C zDnV;AhLfJ4$>{V{4`HRv&rl9B&15xv9rJQ@m%c%l5cPm&i17RmX8aFk{10aQKf??i zh8gHY2U(YTCt*S6>ExTHjmC!zJ7Y5S59zw2zSaDhxRYpsQ6{3}%g9=5{zMfE+AyNT zsES?$FuB@6jt@s>Y#!|BVMEX!?`1|eILQ6h$w?}UY+#{6`>JkWvB62kM?!niE<)-z z1T1~qoY7Gyx!<&6f(k7`5>(?N7f04FvNUY%QK3B`CcNDl#kryWg^9uXRdC6?0pRi* zHyIzvLJ%QKEvVqq;@b-yfMiIeI^-c6Vpon=1<42^O^pi}&i9hmF&ILUT=Jt#ylEPp z^pg8yyT_>W+bR8j>oNp zQR}HYfFIRO8pq(r@w~+QAV1!8jS9NKMn&-|1Bh2L2#i@T-bEJV>hj|wQ7fAVZ6r!e zsFCp2Y*f9Rtc-QWse0icPD#UZEL#jseQr5vNeqi2fQwy_Bvk)1vQdY(YNJb5KpU6E zs$xL+xVMKKBYNbZdvFlFL9hi)j5lkeBRycy9&4y#75rOo2LoKSlC(q`fMaMJfneZI z;l0~IbE-7etH=hE*P^n+QRLmL$T6ZwR*-ns?gRdKLpSQ|C6A=W5SAV0K8iIxXS~la z7?YsCP1hFnNmMQ@*`9zAh46?X(Udi0i76v1&|qlw8nPx_N)F5e+naoGGFi9t-e>(% zqQbwCMTqSq>rF+OfhI%O^^uh`!z>Ie7}?HAp>~?rwA>P4Grx~4wr=%IRrOSRa+)61 z_DAZ<<#9aN9JX3R#0(wqX?a##&|>xdVC9+t=zt5SsKVeeu8y&~weaQqK;#$$esFR_ z2)5zljT2r+3)hnSEh{rZ!r1ialeJ?B!$Plth)Ipwjz!yFN5|LU@szHThQ^cTG*rKD zedKuJ@?cA*2wnn*&Y;dM!igzCbyTYM4WQ1<$w75guQ&f5;P>r@vDATm!V+wb_a!tm zQC~bc8_n7XT{(4<$^vYQ-rPtwun;|IH=d)sdYUI!cEDyd0Mx~m} zz(-=x`&EAuB$$+D;f9=ZL{w@d03$yHk(=CgP6E+>VT(G-=hCTVbz{6s{M}}<+(vN*t za-`6iC;iasJz405y<|^vzN^Yp)cRsOq50^_#*>|5;LH;o0lR&VdRESEgYe^qPP{r@>s>}@ zrXQVoO3CF$obWQBj!HPYwdD}GGBy8>$+L?dUiuO2MH#>LoRY(i)c3v{)gyPHivUON z@L@rGqBA8U^^s|Li@OgH#2dXW$2ysi_J*OEcc~}MfQ0uQ6+HVMX-*z@o3<(S@8au_ zrfoR1y`2uJKa{jLU@ypMZzK~_jn?1xQ^)U%A&5JU@8^`M`6I1Ry*CTZ{~1^z;rf3b zNpUXA3IYFJF;>x?#Q%?_!nHz2VV%(Ck`IL2!H)o&g#ABoq(@KL^eOqo6{q%lZ$J}4cW+QKI- zQ&9Q@)564Q!gh%tygFsw@9a8HRzT;WB{yZGU#}r^CdSVxIyrIs&f73pZ_6C1Z{g8E zuVC>}z{C^`#9NELIg6jl)Tu#j;V>vNDVr4?TJ`6U|!(9i2RL`6IDA{(g`kFlbhC(nBiihr0F! zIw6C$WKo`Ed;bT~<-Oxp`QUNqx7_)(82`aLK5VN@O&h7}(6W0n@sb+!0tSy5d|o-vp1R`Jh5y=noFFiW z#cq-!77&)b=4s^Xa5#LiVP^DWkPS3N9nowe767cvBDb=P3UV>JJiLWm5*|$1^JoYf zS$RkO$gyjlzG!%+Lx z2+{*3qN+nH7*tIUIG%?uxi^tHWANi)PlYet29YhsKS6#f)#;mnpIuP-)dw2r6xMUa zJ~BJ9fgr`oGGZvHO3Tn|;S+WO>Qy0VcKGvU|eo^)F8#|juE2IL}%(Z=>Dav z(SD}A3IKPW<}O%_K1ytau{28xT{!|LsARm`7dETnD@R~&u}#LuOW~|9Bf-I6r`zjp zYYI$@=u{e|UI+kFj5^CErp$3~TGVvyJ>}KEGyg-$XbjDa_<8}!ckhB)8tz?~f$yn^ zK|2#ndC76kp`A%B&+o-gMcWWU1qM_wYXlkKwfU*$jc>mc|JPSe5gOgA=%A8YKLQ`1 zWs|5ZbnbJ2L=)@NCr@27w3HyeT=MW~C8ur#J}{u=H87tq`8U~4`?vBDdp4gCRdUn z?YUy|l8#)$s>>+2Oy4}>=q}NX5C+2(B7BziS?&3-S{R{q%0w*eZ=qmz}X zWh3o?&R6@|6;z`!L>ti^Qss(W19d!W0I>GP!-AqI4HyK6_-B5eiEep^EKN?*totJ4 zz#Z3g?VkFilDi;i3V?J^?8-tP{0%xm+8pMy>HmD|N`iRjMBZPNoRXjk@GT`7w`Zb7 z??NAl`u$s0?R)xhaFfNx@6L4=2ek*}cR5^(fj1ffq?1v47d16`+Tpt6LhP%HVYkWy zX@71n3fW<3>FxmT^xcz^ES-G2wsy@euMot+_~&wzjKZK1hIZ}FMiXXiV z{bcUcoOOg&Gyk5Cm7IAaRYyA@qVfY(IDi2`K`-e{ng5U4=NE1KQ#uj#PVX;fcFrAP zhM|Hx0uc_rt>x3tA%?(#aB|%9TV^Kx^Sz%FQMX>{T-rWogfR{dsdx8);ox+u+9MNE z`PR1IR~*}1>6?U=sk4LjfM$DfC}2IfnZ-lWs&h!4al_=l%*uP3A=DXH(as7P$bbsx z`LbVXC4t!H5H%%r?zwI8E%$shkx-{wC08D4BuFfv@*ByOc`?pikDFSar3iIC#n7B2 z_7j#n%`v3Q_;-U6hBAHD z#`~l-eEAgE!2uJ=J^~!50|}AmK$c~NsuEy4bo?9{v8T|U2bX5Sr7Av%j7L{M6301^ z$Zu^kj5lP7l zA}p%;@R~XZ(dLhUFm0ZXPy>V+|0J&%gAf*UfY3Mft79=Z^$#9{{;}j^09Y+^RU2Lk zHuM3u>%oQ>J|^dk!4K9z{OdUu-#88Ob6~3D=gF3d#0!MwZHtwDy# zg$2~*TTAQxC8)9SGcq%b8jkevu!R%{Bmgb({2r84CCU99fb!;gRRD~{wtr4`hOxlw zsnGw_{*GsUwD@1pRW{VAG=RL&#eb3Ra2j5K`(Xl|9peCW=HD7+iiEzX@Xh z@(VI4+{QdC0jhgA{hNR>NIK2wq&J)`|z1x<)qnS0H%TH<6wR?ka&B6J) z({NkpFjPT@FF+qmT^O{9>U4T9f;`g2v78Pdd1#f9ZGs^T>byuEnnY=`2+KdrPg2dM zzZ<_W?2XwRU8j3f`}?RJ@Zn{cA|Ccl5!RDrfrZRpwsl!Yz?5i92Gu;O>j`lEj3hF? zI7L_Oyk!NFrcnD$bC;^Jg7MHBQ>glI8iwZJz@NILnRTQ)4y~C=?T<}ZqSk{g1h@Uc zhKC~{0a~0%8NB1!yN8_sfy~toN9+~Ik`43| z+Eseo<VEG#)z*hV1bgCh&UJ(`! z;EFyR5$ftD(Oc1`89=k5O|2Dsvq?ErO*oB+?=kXQdhN0dbbk(*YE^5$8Et4D zwxu&8JQI%U0wV0q$MZ%$RSz>cUiP`yw~Sq6K50&-9;aH#qoxa{O~yYNHyg(rmdBio znH9Y^dXD~&ux2W^ za57bN&{&5Zv;!Osbe>3Q!^=gJT$ zMth);{{RKt9|S`JZhuT>pPM$RY%;%uAij4=o$Wm#K#cwX<^5+CMC8b&5>sR6kbh77 z)q1!?@ATgIawTI$01j)ldUOHHaHavW*YRKFCtrUT+^*Li?NPMLgQ`P#@&HzSRufg4 zn!K~?x-5D(Y^DEInMF`?yMxL@l-e~hD5?+$4^vGKyt>}<)i?bFapc79cPlwvBQ0bI52fs?1d%ph<9!Ee!WJw98IP5q|^{xI* zj1%aJq@<%7{A^n`Qon9USD;7IskD?y|DLrdx3&IwPwVSXDH$C>{UC-#g*f|gCIeK= z{*BZw9zAzCcKvX%VNk_mP_h3!7lbyUdp%Q9PR?03?bVDv?Dv%!OGauulzvMlTAE3% zOu=5={PJrQekwVa1Qo`sF%RIiw2fKNv-$Sg+KLBmolOwfiFYmzXbs(^CKVe0>@+GT zY5d}^XQ#bCSB#Ytr-ZQ<>2kR1>8`(h0Taf|5}$ zZdkAV%t~R$s!VSQDG5n}`M0XkD)dT!mPK9YOw=X3vq1lF?>E?)dYS|JqN_uREH%yI zYh^0q`~4g9_s+W;`^&vFB{zFa*PDj*drW6ZBgyK|k&DS~Ne_G>fh!`(H?V|d9`_;`APGKmT@sg%Up}sa)aez0dk8Y$w0| z=3nS?z=`Itk_&F}a)RR%;pRfvEC*-7J)GUl>V|jEq8fDw zy4#H5cjr;*acRWA3CrIsanwho6Isy}}1ePWDm-7IS{j z)`1o3EQC??ss?H_iWc(Vaxb{rDF7_7-$An)VaBLiuFe=DP`%kmIY-Sh0)(i%h?gV) zzQeSD-H?C2Ksy_$B6OySsy7XEtM%pshn!}rBb0*FoGL=OsttH9Y@_1B>1hc1gaAUp zP7En+KtJBA(&O`3{(v2-ea6cz;ZEW;=n0o1XxMQ@~2`(W=TL73s*R?G9|YbcHBL*r4y!I*8) zi*^6hZi>2AlS_DEOq7sH(T1R)UpuJk+)?+DhsAR?b^_v@)~|}L4kDbfs_lYKYv~FbL?8-; zU~nsh8iZZGrSmgX)&+`>?^CIWX{rVxzy6Amh{ zM7gV=(t?$$$hAOqW)-z6oC?MbMElsJ(7iFd^jC|NRp4Il@1^Qux90b#EXuHx!fRyM z;iDb(R95C_?;s02BpyON+Xq3>>3VA49jmEWKdyg^fSD@Fa=`qtxd67OCXpw}R@1#E z!noP+n&EQ8gqRi4C!=Nk9=)u4QJ1CN5cPRfjplRBHN;!QQW#*9jSAu4&xMY}QS*yk z9$9q5oh)E+8SW4D*qtu0K3vN~%TD+Q)||%ru1r^`;QWqK;sF9=+(Y$Dm^AzCs+`7G zbol8}$l!!t4<#@#2S^I@KSWRf%qQemEl4R6hdx`pb#7vt;=eeY(4p;^UEMJ%ZQ@M+ z%WvntoBb7jDj7=P-f&ofe+nFc_r>XJcqp6BUF$4k4=a^m3 zajJCU+~SgFnx7ilil531B_;%7W+XtF$9GaIC+0kwF);h(yjNzf*|rh1j1Fnpfy<=t z2ArmO*rkthx30ubkD?D~iGk!FN~lb{jw2w)zsYnpBR?Ug&2YrDkcyk@zV+8_ivQYh z(DND0uyzmVDNy@UEAN??jh~=r;IeSgQ!_4r#-MiwZz501aoJy<-FpAxIsg836et@N zR91xYXvJQLKH_&@nU_p7_v0rhs|hNL#ay!hVBjJI`WlX%ZKkH(ynfEHamk=B?1aI8 zeF22Q=(bSRjBlqIzGFU-yU6`^;WPK0#ZS**KzH;SuK4smDko7WKJrZESEZ?4nNwbA zyWCd*v^9_Sbpg2hh{0U|*25A~n6p1KE%@UHcjBj#ahbnz_=^TW#sVP_$pC?WB|*J? ze|od!Yia$sFUnt7E-ISM0o^gpKe1ll9fY_u~BBI}jun?h!Idt`aMOp15tB~JacTf^3W-HD%`jX^6UCXQf! z{&WB4#EDV58*h*LqxgQ|XPglyoG7P%uVU1=auZK#xt0rSr@PYdKe0$MQ*Az8P8O08FNMaRC3n_4FJK! zM~M(jRC}nEslpR;zo@!v>NUQ(Ny!-q>J7y*CQ#E1{saBKh)3O=m}RPegL{2`r*A^? z^aqs3>X>oxJFyFT!K7Dz+m)L)ZYiE=+WG=2qpKzam4P)VyV?tqatl7XdCv7o33zr< zGS-AtKMAG%u?VE?|2A@on!?jc`H~Cw;pd4hRCMg53c~Wb`Dtni*=w>G%MF<^X8o`4m}Jf;u8d4vO86iL$N3UIIy~CZ&fSo1 znyTo16B#g3lRQcvxb)m1D&i^uDUX4xj(LCT6FUIp7=rn~SWzz{&{%;3p#v$-Yy=LK zdnn`7QFsUF?XX@nBDscL1X{g^sw6uU^)>>{gs4LF^d72ld>AF%Gt0mVsX7N^KY=d0 z5o+#HqEy*&%^kSrPd54V-#V85G~C4vSt=YqP}q?KXH`sP0bJOrXjFS*AxIQ1gb25K z&?0a5CET!!L8lS5pG;7QKr5i$o2c5+q5~D|q=R4wLs$~~nZT@?eiKzo?o%3Q4(^Wxl@gDar9x9Lc1J-~Z`tn$m6!Q6 z1s?4OsD^NvMG>x@0MLT7nbTAy78NQ<^@r|KTOqxiwbE>LH5B)3Olf% zsxNN$sXAq2(8RAVqw2%y!CG*coG)6yE-grZb{X{ILk_AQPD2tzOw3CJ>^r}mDk3j6 zH5uo|y!2ok{b)=)bQ-fyDd>Zc83#;cy#fdyxqGYu1OO}0gQ&{)o(x^4qS9EOhX>>s@phABZ! zsmnp&qxVAP#ftAL7Z^|oQVmt%+X|&TxWSUMj{wma&7d{MsLHqw#dn1Zj2{1C#<(XT z4lv(0mLWJEZlL#N0CX7Spv&)5$FsrL!vf>^ Date: Thu, 17 Sep 2015 15:51:06 -0400 Subject: [PATCH 14/19] Remove the unused imagestorage columns from the db. --- ...fdd_remove_the_deprecated_imagestorage_.py | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 data/migrations/versions/127905a52fdd_remove_the_deprecated_imagestorage_.py diff --git a/data/migrations/versions/127905a52fdd_remove_the_deprecated_imagestorage_.py b/data/migrations/versions/127905a52fdd_remove_the_deprecated_imagestorage_.py new file mode 100644 index 000000000..06c20c015 --- /dev/null +++ b/data/migrations/versions/127905a52fdd_remove_the_deprecated_imagestorage_.py @@ -0,0 +1,32 @@ +"""Remove the deprecated imagestorage columns. + +Revision ID: 127905a52fdd +Revises: 2e0380215d01 +Create Date: 2015-09-17 15:48:56.667823 + +""" + +# revision identifiers, used by Alembic. +revision = '127905a52fdd' +down_revision = '2e0380215d01' + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import mysql + +def upgrade(tables): + ### commands auto generated by Alembic - please adjust! ### + op.drop_column('imagestorage', 'comment') + op.drop_column('imagestorage', 'aggregate_size') + op.drop_column('imagestorage', 'command') + op.drop_column('imagestorage', 'created') + ### end Alembic commands ### + + +def downgrade(tables): + ### commands auto generated by Alembic - please adjust! ### + op.add_column('imagestorage', sa.Column('created', mysql.DATETIME(), nullable=True)) + op.add_column('imagestorage', sa.Column('command', mysql.TEXT(), nullable=True)) + op.add_column('imagestorage', sa.Column('aggregate_size', mysql.BIGINT(display_width=20), autoincrement=False, nullable=True)) + op.add_column('imagestorage', sa.Column('comment', mysql.TEXT(), nullable=True)) + ### end Alembic commands ### From cb7ec2f2394d2b381ceca67f1980eddeea5b6a04 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Fri, 23 Oct 2015 13:49:23 -0400 Subject: [PATCH 15/19] Backport remaining v2 changes to phase4 --- data/database.py | 26 +++++++++-- data/fields.py | 38 ++++++++++++++++ .../33bd39ef5ed6_backport_v2_db_changes.py | 43 +++++++++++++++++++ data/model/storage.py | 2 +- 4 files changed, 104 insertions(+), 5 deletions(-) create mode 100644 data/fields.py create mode 100644 data/migrations/versions/33bd39ef5ed6_backport_v2_db_changes.py diff --git a/data/database.py b/data/database.py index 765a4948e..cc0beafa6 100644 --- a/data/database.py +++ b/data/database.py @@ -3,15 +3,16 @@ import logging import uuid import time import toposort +import resumablehashlib from random import SystemRandom from datetime import datetime from peewee import * from data.read_slave import ReadSlaveModel +from data.fields import ResumableSHAField, JSONField from sqlalchemy.engine.url import make_url from collections import defaultdict -from data.read_slave import ReadSlaveModel from util.names import urn_generator @@ -348,7 +349,7 @@ class Repository(BaseModel): # These models don't need to use transitive deletes, because the referenced objects # are cleaned up directly - skip_transitive_deletes = {RepositoryTag, RepositoryBuild, RepositoryBuildTrigger} + skip_transitive_deletes = {RepositoryTag, RepositoryBuild, RepositoryBuildTrigger, BlobUpload} # We need to sort the ops so that models get cleaned in order of their dependencies ops = reversed(list(self.dependencies(delete_nullable))) @@ -490,6 +491,7 @@ class ImageStorage(BaseModel): image_size = BigIntegerField(null=True) uncompressed_size = BigIntegerField(null=True) uploading = BooleanField(default=True, null=True) + cas_path = BooleanField(default=True) class ImageStorageTransformation(BaseModel): @@ -761,6 +763,23 @@ class RepositoryAuthorizedEmail(BaseModel): ) +class BlobUpload(BaseModel): + repository = ForeignKeyField(Repository, index=True) + uuid = CharField(index=True, unique=True) + byte_count = IntegerField(default=0) + sha_state = ResumableSHAField(null=True, default=resumablehashlib.sha256) + location = ForeignKeyField(ImageStorageLocation) + storage_metadata = JSONField(null=True, default={}) + + class Meta: + database = db + read_slaves = (read_slave,) + indexes = ( + # create a unique index on email and repository + (('repository', 'uuid'), True), + ) + + class QuayService(BaseModel): name = CharField(index=True, unique=True) @@ -788,7 +807,6 @@ class QuayRelease(BaseModel): ) - all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission, Visibility, RepositoryTag, EmailConfirmation, FederatedLogin, LoginService, QueueItem, RepositoryBuild, Team, TeamMember, TeamRole, LogEntryKind, LogEntry, @@ -799,4 +817,4 @@ all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission, RepositoryAuthorizedEmail, ImageStorageTransformation, DerivedImageStorage, TeamMemberInvite, ImageStorageSignature, ImageStorageSignatureKind, AccessTokenKind, Star, RepositoryActionCount, TagManifest, UserRegion, - QuayService, QuayRegion, QuayRelease] + QuayService, QuayRegion, QuayRelease, BlobUpload] diff --git a/data/fields.py b/data/fields.py new file mode 100644 index 000000000..123811ccd --- /dev/null +++ b/data/fields.py @@ -0,0 +1,38 @@ +import base64 +import resumablehashlib +import json + +from peewee import TextField + + +class ResumableSHAField(TextField): + def db_value(self, value): + sha_state = value.state() + + # One of the fields is a byte string, let's base64 encode it to make sure + # we can store and fetch it regardless of default collocation. + sha_state[3] = base64.b64encode(sha_state[3]) + + return json.dumps(sha_state) + + def python_value(self, value): + to_resume = resumablehashlib.sha256() + if value is None: + return to_resume + + sha_state = json.loads(value) + + # We need to base64 decode the data bytestring. + sha_state[3] = base64.b64decode(sha_state[3]) + to_resume.set_state(sha_state) + return to_resume + + +class JSONField(TextField): + def db_value(self, value): + return json.dumps(value) + + def python_value(self, value): + if value is None or value == "": + return {} + return json.loads(value) diff --git a/data/migrations/versions/33bd39ef5ed6_backport_v2_db_changes.py b/data/migrations/versions/33bd39ef5ed6_backport_v2_db_changes.py new file mode 100644 index 000000000..c63924c93 --- /dev/null +++ b/data/migrations/versions/33bd39ef5ed6_backport_v2_db_changes.py @@ -0,0 +1,43 @@ +"""Backport v2 db changes. + +Revision ID: 33bd39ef5ed6 +Revises: 127905a52fdd +Create Date: 2015-10-23 12:34:22.776542 + +""" + +# revision identifiers, used by Alembic. +revision = '33bd39ef5ed6' +down_revision = '127905a52fdd' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(tables): + ### commands auto generated by Alembic - please adjust! ### + op.create_table('blobupload', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('repository_id', sa.Integer(), nullable=False), + sa.Column('uuid', sa.String(length=255), nullable=False), + sa.Column('byte_count', sa.Integer(), nullable=False), + sa.Column('sha_state', sa.Text(), nullable=True), + sa.Column('location_id', sa.Integer(), nullable=False), + sa.Column('storage_metadata', sa.Text(), nullable=True), + sa.ForeignKeyConstraint(['location_id'], ['imagestoragelocation.id'], name=op.f('fk_blobupload_location_id_imagestoragelocation')), + sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_blobupload_repository_id_repository')), + sa.PrimaryKeyConstraint('id', name=op.f('pk_blobupload')) + ) + op.create_index('blobupload_location_id', 'blobupload', ['location_id'], unique=False) + op.create_index('blobupload_repository_id', 'blobupload', ['repository_id'], unique=False) + op.create_index('blobupload_repository_id_uuid', 'blobupload', ['repository_id', 'uuid'], unique=True) + op.create_index('blobupload_uuid', 'blobupload', ['uuid'], unique=True) + op.add_column(u'imagestorage', sa.Column('cas_path', sa.Boolean(), nullable=False, server_default="0")) + ### end Alembic commands ### + + +def downgrade(tables): + ### commands auto generated by Alembic - please adjust! ### + op.drop_column(u'imagestorage', 'cas_path') + op.drop_table('blobupload') + ### end Alembic commands ### diff --git a/data/model/storage.py b/data/model/storage.py index 97b94ed4e..0ddbc8ac8 100644 --- a/data/model/storage.py +++ b/data/model/storage.py @@ -124,7 +124,7 @@ def garbage_collect_storage(storage_id_whitelist): def create_storage(location_name): - storage = ImageStorage.create() + storage = ImageStorage.create(cas_path=False) location = ImageStorageLocation.get(name=location_name) ImageStoragePlacement.create(location=location, storage=storage) storage.locations = {location_name} From ddbe33e2ce31726f2561019beb9a17bd27598427 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Fri, 23 Oct 2015 15:50:31 -0400 Subject: [PATCH 16/19] Switch Text to LongText for MySQL manifests --- ...da62_switch_manifest_text_to_a_longtext.py | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 data/migrations/versions/35f538da62_switch_manifest_text_to_a_longtext.py diff --git a/data/migrations/versions/35f538da62_switch_manifest_text_to_a_longtext.py b/data/migrations/versions/35f538da62_switch_manifest_text_to_a_longtext.py new file mode 100644 index 000000000..f11b5336a --- /dev/null +++ b/data/migrations/versions/35f538da62_switch_manifest_text_to_a_longtext.py @@ -0,0 +1,47 @@ +"""Switch manifest text to a longtext. + +Revision ID: 35f538da62 +Revises: 33bd39ef5ed6 +Create Date: 2015-10-23 15:31:27.353995 + +""" + +# revision identifiers, used by Alembic. +revision = '35f538da62' +down_revision = '33bd39ef5ed6' + +from alembic import op +import sqlalchemy as sa + +from sqlalchemy.types import TypeDecorator, Text +from sqlalchemy.dialects.mysql import LONGTEXT +import uuid + +class EngineLongText(TypeDecorator): + """Platform-independent LongText type. + + Uses MySQL's LONGTEXT type, otherwise uses + Text, because other engines are not as limited + as MySQL. + + """ + impl = Text + + def load_dialect_impl(self, dialect): + if dialect.name == 'mysql': + return dialect.type_descriptor(LONGTEXT()) + else: + return dialect.type_descriptor(Text()) + +def upgrade(tables): + ### commands auto generated by Alembic - please adjust! ### + op.drop_column(u'tagmanifest', 'json_data') + op.add_column(u'tagmanifest', sa.Column('json_data', EngineLongText(), nullable=False)) + ### end Alembic commands ### + + +def downgrade(tables): + ### commands auto generated by Alembic - please adjust! ### + op.drop_column(u'tagmanifest', 'json_data') + op.add_column(u'tagmanifest', sa.Column('json_data', sa.Text(), nullable=False)) + ### end Alembic commands ### From b920bf64362498ab89abe4d9395dbc0c5dfa16c6 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Fri, 23 Oct 2015 16:15:09 -0400 Subject: [PATCH 17/19] Fix references to mysql in migrations --- .../127905a52fdd_remove_the_deprecated_imagestorage_.py | 9 ++++----- ...4fe12ade3df_add_build_queue_item_reference_to_the_.py | 1 - ...1594a74a74ca_add_metadata_field_to_external_logins.py | 1 - .../versions/17f11e265e13_add_uuid_field_to_user.py | 1 - .../versions/1c5b738283a5_backfill_user_uuids.py | 1 - .../versions/1d2d86d09fcd_actually_remove_the_column.py | 3 +-- .../201d55b38649_remove_fields_from_image_table_that_.py | 1 - data/migrations/versions/2088f2b81010_add_stars.py | 1 - .../versions/228d1af6af1c_mysql_max_index_lengths.py | 1 - .../versions/31288f79df53_make_resource_key_nullable.py | 5 ++--- .../313d297811c4_add_an_index_to_the_docker_image_id_.py | 1 - .../325a4d7c79d9_prepare_the_database_for_the_new_.py | 1 - .../versions/3fee6f979c2a_make_auth_token_nullable.py | 5 ++--- .../versions/4a0c94399f38_add_new_notification_kinds.py | 1 - ...db65816b8d_add_brute_force_prevention_metadata_to_.py | 1 - .../versions/5ad999136045_add_signature_storage.py | 1 - .../707d5191eda_change_build_queue_reference_from_.py | 3 +-- .../versions/82297d834ad_add_us_west_location.py | 1 - .../f42b0ea7a4d_remove_the_old_webhooks_table.py | 1 - 19 files changed, 10 insertions(+), 29 deletions(-) diff --git a/data/migrations/versions/127905a52fdd_remove_the_deprecated_imagestorage_.py b/data/migrations/versions/127905a52fdd_remove_the_deprecated_imagestorage_.py index 06c20c015..6ab6d79b7 100644 --- a/data/migrations/versions/127905a52fdd_remove_the_deprecated_imagestorage_.py +++ b/data/migrations/versions/127905a52fdd_remove_the_deprecated_imagestorage_.py @@ -12,7 +12,6 @@ down_revision = '2e0380215d01' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### @@ -25,8 +24,8 @@ def upgrade(tables): def downgrade(tables): ### commands auto generated by Alembic - please adjust! ### - op.add_column('imagestorage', sa.Column('created', mysql.DATETIME(), nullable=True)) - op.add_column('imagestorage', sa.Column('command', mysql.TEXT(), nullable=True)) - op.add_column('imagestorage', sa.Column('aggregate_size', mysql.BIGINT(display_width=20), autoincrement=False, nullable=True)) - op.add_column('imagestorage', sa.Column('comment', mysql.TEXT(), nullable=True)) + op.add_column('imagestorage', sa.Column('created', sa.DateTime(), nullable=True)) + op.add_column('imagestorage', sa.Column('command', sa.Text(), nullable=True)) + op.add_column('imagestorage', sa.Column('aggregate_size', sa.BigInteger(), nullable=True)) + op.add_column('imagestorage', sa.Column('comment', sa.Text(), nullable=True)) ### end Alembic commands ### diff --git a/data/migrations/versions/14fe12ade3df_add_build_queue_item_reference_to_the_.py b/data/migrations/versions/14fe12ade3df_add_build_queue_item_reference_to_the_.py index 5e8d21211..561a32dca 100644 --- a/data/migrations/versions/14fe12ade3df_add_build_queue_item_reference_to_the_.py +++ b/data/migrations/versions/14fe12ade3df_add_build_queue_item_reference_to_the_.py @@ -12,7 +12,6 @@ down_revision = '5ad999136045' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### diff --git a/data/migrations/versions/1594a74a74ca_add_metadata_field_to_external_logins.py b/data/migrations/versions/1594a74a74ca_add_metadata_field_to_external_logins.py index 2f6c60706..e4276effe 100644 --- a/data/migrations/versions/1594a74a74ca_add_metadata_field_to_external_logins.py +++ b/data/migrations/versions/1594a74a74ca_add_metadata_field_to_external_logins.py @@ -12,7 +12,6 @@ down_revision = 'f42b0ea7a4d' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### diff --git a/data/migrations/versions/17f11e265e13_add_uuid_field_to_user.py b/data/migrations/versions/17f11e265e13_add_uuid_field_to_user.py index 9371941f8..3bf692fe6 100644 --- a/data/migrations/versions/17f11e265e13_add_uuid_field_to_user.py +++ b/data/migrations/versions/17f11e265e13_add_uuid_field_to_user.py @@ -12,7 +12,6 @@ down_revision = '313d297811c4' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): diff --git a/data/migrations/versions/1c5b738283a5_backfill_user_uuids.py b/data/migrations/versions/1c5b738283a5_backfill_user_uuids.py index 44ea6f5ec..baa78465b 100644 --- a/data/migrations/versions/1c5b738283a5_backfill_user_uuids.py +++ b/data/migrations/versions/1c5b738283a5_backfill_user_uuids.py @@ -12,7 +12,6 @@ down_revision = '2fb36d4be80d' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql from util.migrate.backfill_user_uuids import backfill_user_uuids def upgrade(tables): diff --git a/data/migrations/versions/1d2d86d09fcd_actually_remove_the_column.py b/data/migrations/versions/1d2d86d09fcd_actually_remove_the_column.py index a7942b7d4..460296f17 100644 --- a/data/migrations/versions/1d2d86d09fcd_actually_remove_the_column.py +++ b/data/migrations/versions/1d2d86d09fcd_actually_remove_the_column.py @@ -12,7 +12,6 @@ down_revision = '14fe12ade3df' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql from sqlalchemy.exc import InternalError def upgrade(tables): @@ -29,7 +28,7 @@ def upgrade(tables): def downgrade(tables): ### commands auto generated by Alembic - please adjust! ### try: - op.add_column('logentry', sa.Column('access_token_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True)) + op.add_column('logentry', sa.Column('access_token_id', sa.Integer(), nullable=True)) op.create_foreign_key(u'fk_logentry_access_token_id_accesstoken', 'logentry', 'accesstoken', ['access_token_id'], ['id']) op.create_index('logentry_access_token_id', 'logentry', ['access_token_id'], unique=False) except InternalError: diff --git a/data/migrations/versions/201d55b38649_remove_fields_from_image_table_that_.py b/data/migrations/versions/201d55b38649_remove_fields_from_image_table_that_.py index 8185c1118..02a119074 100644 --- a/data/migrations/versions/201d55b38649_remove_fields_from_image_table_that_.py +++ b/data/migrations/versions/201d55b38649_remove_fields_from_image_table_that_.py @@ -12,7 +12,6 @@ down_revision = '5a07499ce53f' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### diff --git a/data/migrations/versions/2088f2b81010_add_stars.py b/data/migrations/versions/2088f2b81010_add_stars.py index ad4ccdf2b..af27da83e 100644 --- a/data/migrations/versions/2088f2b81010_add_stars.py +++ b/data/migrations/versions/2088f2b81010_add_stars.py @@ -12,7 +12,6 @@ down_revision = '707d5191eda' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): op.create_table('star', diff --git a/data/migrations/versions/228d1af6af1c_mysql_max_index_lengths.py b/data/migrations/versions/228d1af6af1c_mysql_max_index_lengths.py index 2f6ff722b..ed7fdc8be 100644 --- a/data/migrations/versions/228d1af6af1c_mysql_max_index_lengths.py +++ b/data/migrations/versions/228d1af6af1c_mysql_max_index_lengths.py @@ -12,7 +12,6 @@ down_revision = '5b84373e5db' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): op.drop_index('queueitem_queue_name', table_name='queueitem') diff --git a/data/migrations/versions/31288f79df53_make_resource_key_nullable.py b/data/migrations/versions/31288f79df53_make_resource_key_nullable.py index e52795ce6..e14dfaca1 100644 --- a/data/migrations/versions/31288f79df53_make_resource_key_nullable.py +++ b/data/migrations/versions/31288f79df53_make_resource_key_nullable.py @@ -12,12 +12,11 @@ down_revision = '214350b6a8b1' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### op.alter_column('repositorybuild', 'resource_key', - existing_type=mysql.VARCHAR(length=255), + existing_type=sa.String(length=255), nullable=True) ### end Alembic commands ### @@ -25,6 +24,6 @@ def upgrade(tables): def downgrade(tables): ### commands auto generated by Alembic - please adjust! ### op.alter_column('repositorybuild', 'resource_key', - existing_type=mysql.VARCHAR(length=255), + existing_type=sa.String(length=255), nullable=False) ### end Alembic commands ### diff --git a/data/migrations/versions/313d297811c4_add_an_index_to_the_docker_image_id_.py b/data/migrations/versions/313d297811c4_add_an_index_to_the_docker_image_id_.py index 2ed6bd2f5..3987fe2cc 100644 --- a/data/migrations/versions/313d297811c4_add_an_index_to_the_docker_image_id_.py +++ b/data/migrations/versions/313d297811c4_add_an_index_to_the_docker_image_id_.py @@ -12,7 +12,6 @@ down_revision = '204abf14783d' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### diff --git a/data/migrations/versions/325a4d7c79d9_prepare_the_database_for_the_new_.py b/data/migrations/versions/325a4d7c79d9_prepare_the_database_for_the_new_.py index d6bdcb35e..c11199a67 100644 --- a/data/migrations/versions/325a4d7c79d9_prepare_the_database_for_the_new_.py +++ b/data/migrations/versions/325a4d7c79d9_prepare_the_database_for_the_new_.py @@ -12,7 +12,6 @@ down_revision = '4b7ef0c7bdb2' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### diff --git a/data/migrations/versions/3fee6f979c2a_make_auth_token_nullable.py b/data/migrations/versions/3fee6f979c2a_make_auth_token_nullable.py index 2574271ef..04379eb60 100644 --- a/data/migrations/versions/3fee6f979c2a_make_auth_token_nullable.py +++ b/data/migrations/versions/3fee6f979c2a_make_auth_token_nullable.py @@ -12,12 +12,11 @@ down_revision = '31288f79df53' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### op.alter_column('repositorybuildtrigger', 'auth_token', - existing_type=mysql.VARCHAR(length=255), + existing_type=sa.String(length=255), nullable=True) ### end Alembic commands ### @@ -25,6 +24,6 @@ def upgrade(tables): def downgrade(tables): ### commands auto generated by Alembic - please adjust! ### op.alter_column('repositorybuildtrigger', 'auth_token', - existing_type=mysql.VARCHAR(length=255), + existing_type=sa.String(length=255), nullable=False) ### end Alembic commands ### diff --git a/data/migrations/versions/4a0c94399f38_add_new_notification_kinds.py b/data/migrations/versions/4a0c94399f38_add_new_notification_kinds.py index 6b4160b19..efd3d1c60 100644 --- a/data/migrations/versions/4a0c94399f38_add_new_notification_kinds.py +++ b/data/migrations/versions/4a0c94399f38_add_new_notification_kinds.py @@ -12,7 +12,6 @@ down_revision = '1594a74a74ca' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): op.bulk_insert(tables.externalnotificationmethod, diff --git a/data/migrations/versions/4fdb65816b8d_add_brute_force_prevention_metadata_to_.py b/data/migrations/versions/4fdb65816b8d_add_brute_force_prevention_metadata_to_.py index bc8373655..42afef28f 100644 --- a/data/migrations/versions/4fdb65816b8d_add_brute_force_prevention_metadata_to_.py +++ b/data/migrations/versions/4fdb65816b8d_add_brute_force_prevention_metadata_to_.py @@ -12,7 +12,6 @@ down_revision = '43e943c0639f' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### diff --git a/data/migrations/versions/5ad999136045_add_signature_storage.py b/data/migrations/versions/5ad999136045_add_signature_storage.py index f306c58b8..210b91175 100644 --- a/data/migrations/versions/5ad999136045_add_signature_storage.py +++ b/data/migrations/versions/5ad999136045_add_signature_storage.py @@ -12,7 +12,6 @@ down_revision = '228d1af6af1c' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### diff --git a/data/migrations/versions/707d5191eda_change_build_queue_reference_from_.py b/data/migrations/versions/707d5191eda_change_build_queue_reference_from_.py index 9b2110df7..dc8f88087 100644 --- a/data/migrations/versions/707d5191eda_change_build_queue_reference_from_.py +++ b/data/migrations/versions/707d5191eda_change_build_queue_reference_from_.py @@ -12,7 +12,6 @@ down_revision = '4ef04c61fcf9' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### @@ -26,7 +25,7 @@ def upgrade(tables): def downgrade(tables): ### commands auto generated by Alembic - please adjust! ### - op.add_column('repositorybuild', sa.Column('queue_item_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True)) + op.add_column('repositorybuild', sa.Column('queue_item_id', sa.Integer(), autoincrement=False, nullable=True)) op.create_foreign_key(u'fk_repositorybuild_queue_item_id_queueitem', 'repositorybuild', 'queueitem', ['queue_item_id'], ['id']) op.create_index('repositorybuild_queue_item_id', 'repositorybuild', ['queue_item_id'], unique=False) op.drop_index('repositorybuild_queue_id', table_name='repositorybuild') diff --git a/data/migrations/versions/82297d834ad_add_us_west_location.py b/data/migrations/versions/82297d834ad_add_us_west_location.py index b939a939e..33a543062 100644 --- a/data/migrations/versions/82297d834ad_add_us_west_location.py +++ b/data/migrations/versions/82297d834ad_add_us_west_location.py @@ -12,7 +12,6 @@ down_revision = '47670cbeced' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): op.bulk_insert(tables.imagestoragelocation, diff --git a/data/migrations/versions/f42b0ea7a4d_remove_the_old_webhooks_table.py b/data/migrations/versions/f42b0ea7a4d_remove_the_old_webhooks_table.py index 5b3f6c812..e36586a09 100644 --- a/data/migrations/versions/f42b0ea7a4d_remove_the_old_webhooks_table.py +++ b/data/migrations/versions/f42b0ea7a4d_remove_the_old_webhooks_table.py @@ -12,7 +12,6 @@ down_revision = '4fdb65816b8d' from alembic import op import sqlalchemy as sa -from sqlalchemy.dialects import mysql def upgrade(tables): ### commands auto generated by Alembic - please adjust! ### From 3186311669daf784abf4d1b417802d33b3c084e6 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Fri, 23 Oct 2015 16:24:40 -0400 Subject: [PATCH 18/19] Test postgres before mysql variations in migration --- data/migrations/migration.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/data/migrations/migration.sh b/data/migrations/migration.sh index 9d20c5a6a..65521f6a6 100755 --- a/data/migrations/migration.sh +++ b/data/migrations/migration.sh @@ -108,6 +108,16 @@ test_migrate $MYSQL_CONFIG_OVERRIDE set -e down_mysql +# Test via Postgres. +echo '> Starting Postgres' +up_postgres + +echo '> Testing Migration (postgres)' +set +e +test_migrate $PGSQL_CONFIG_OVERRIDE +set -e +down_postgres + # Test via MariaDB. echo '> Starting MariaDB' up_mariadb @@ -127,13 +137,3 @@ set +e test_migrate $PERCONA_CONFIG_OVERRIDE set -e down_percona - -# Test via Postgres. -echo '> Starting Postgres' -up_postgres - -echo '> Testing Migration (postgres)' -set +e -test_migrate $PGSQL_CONFIG_OVERRIDE -set -e -down_postgres From f0d19dbbc87c5d983f143b9177f46fd774ded112 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Sat, 24 Oct 2015 15:31:32 -0400 Subject: [PATCH 19/19] Add the resumeable hashlib requirement --- requirements-nover.txt | 1 + requirements.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/requirements-nover.txt b/requirements-nover.txt index 452f54b63..0ebe5534d 100644 --- a/requirements-nover.txt +++ b/requirements-nover.txt @@ -39,6 +39,7 @@ git+https://github.com/DevTable/container-cloud-config.git git+https://github.com/coreos/py-bitbucket.git git+https://github.com/coreos/pyapi-gitlab.git@timeout git+https://github.com/coreos/mockldap.git +git+https://github.com/coreos/resumablehashlib.git git+https://github.com/DevTable/python-etcd.git@sslfix gipc pyOpenSSL diff --git a/requirements.txt b/requirements.txt index c1eec4fbe..a6f4c28a3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -99,5 +99,6 @@ git+https://github.com/DevTable/container-cloud-config.git git+https://github.com/coreos/py-bitbucket.git git+https://github.com/coreos/pyapi-gitlab.git@timeout git+https://github.com/coreos/mockldap.git +git+https://github.com/coreos/resumablehashlib.git git+https://github.com/DevTable/python-etcd.git@sslfix git+https://github.com/NateFerrero/oauth2lib.git