From 78c5aec5b945f670188026943377fc21b2cff369 Mon Sep 17 00:00:00 2001 From: Jake Moshenko Date: Wed, 11 Jun 2014 15:37:45 -0400 Subject: [PATCH] Switch the checksums to use the registry computed value, remove all assumptions of namespaced paths for legacy storage, fix an upload race condition in the registry code. --- data/database.py | 7 +- data/model/legacy.py | 23 +----- endpoints/api/image.py | 3 +- endpoints/index.py | 8 +- endpoints/registry.py | 176 ++++++++++++++++++---------------------- initdb.py | 8 +- storage/basestorage.py | 38 ++++----- test/data/test.db | Bin 200704 -> 202752 bytes test/specs.py | 6 +- tools/auditancestry.py | 62 +------------- tools/audittagimages.py | 45 ---------- 11 files changed, 112 insertions(+), 264 deletions(-) delete mode 100644 tools/audittagimages.py diff --git a/data/database.py b/data/database.py index ffa8c909e..9ec53435b 100644 --- a/data/database.py +++ b/data/database.py @@ -228,12 +228,7 @@ class Image(BaseModel): # security reasons. So rather than Repository <-> Image being many to many # each image now belongs to exactly one repository. docker_image_id = CharField() - checksum = CharField(null=True) - created = DateTimeField(null=True) - comment = TextField(null=True) - command = TextField(null=True) repository = ForeignKeyField(Repository) - image_size = BigIntegerField(null=True) # '/' separated list of ancestory ids, e.g. /1/2/6/7/10/ ancestors = CharField(index=True, default='/', max_length=64535, null=True) @@ -244,7 +239,7 @@ class Image(BaseModel): database = db indexes = ( # we don't really want duplicates - (('repository', 'docker_image_id'), False), + (('repository', 'docker_image_id'), True), ) diff --git a/data/model/legacy.py b/data/model/legacy.py index f27e29170..c7fd49b4c 100644 --- a/data/model/legacy.py +++ b/data/model/legacy.py @@ -1011,14 +1011,6 @@ def find_create_or_link_image(docker_image_id, repository, username, return new_image -def set_image_checksum(docker_image_id, repository, checksum): - fetched = Image.get(Image.docker_image_id == docker_image_id, - Image.repository == repository) - fetched.checksum = checksum - fetched.save() - return fetched - - def set_image_size(docker_image_id, namespace_name, repository_name, image_size): try: @@ -1122,15 +1114,9 @@ def garbage_collect_repository(namespace_name, repository_name): for image_id_to_remove in to_remove: image_to_remove = all_images[image_id_to_remove] - if image_to_remove.storage and image_to_remove.storage.id: - logger.debug('Adding image storage to the gc list: %s', - image_to_remove.storage.uuid) - uuids_to_check_for_gc.add(image_to_remove.storage.uuid) - else: - image_path = config.store.image_path(namespace_name, repository_name, - image_to_remove.docker_image_id, None) - logger.debug('Deleting image storage: %s', image_path) - config.store.remove(image_path) + logger.debug('Adding image storage to the gc list: %s', + image_to_remove.storage.uuid) + uuids_to_check_for_gc.add(image_to_remove.storage.uuid) image_to_remove.delete_instance() @@ -1145,8 +1131,7 @@ def garbage_collect_repository(namespace_name, repository_name): for storage in storage_to_remove: logger.debug('Garbage collecting image storage: %s', storage.uuid) storage.delete_instance() - image_path = config.store.image_path(namespace_name, repository_name, - image_to_remove.docker_image_id, storage.uuid) + image_path = config.store.image_path(storage.uuid) config.store.remove(image_path) return len(to_remove) diff --git a/endpoints/api/image.py b/endpoints/api/image.py index 4571b140b..6593d18df 100644 --- a/endpoints/api/image.py +++ b/endpoints/api/image.py @@ -79,8 +79,7 @@ class RepositoryImageChanges(RepositoryParamResource): if not image: raise NotFound() - uuid = image.storage and image.storage.uuid - diffs_path = store.image_file_diffs_path(namespace, repository, image_id, uuid) + diffs_path = store.image_file_diffs_path(image.storage.uuid) try: response_json = json.loads(store.get_content(diffs_path)) diff --git a/endpoints/index.py b/endpoints/index.py index b1a22f09e..f1d6075f7 100644 --- a/endpoints/index.py +++ b/endpoints/index.py @@ -181,7 +181,7 @@ def update_user(username): @generate_headers(role='write') def create_repository(namespace, repository): profile.debug('Parsing image descriptions') - image_descriptions = json.loads(request.data) + image_descriptions = json.loads(request.data.decode('utf8')) profile.debug('Looking up repository') repo = model.get_repository(namespace, repository) @@ -292,13 +292,11 @@ def update_images(namespace, repository): abort(404, message='Unknown repository', issue='unknown-repo') profile.debug('Parsing image data') - image_with_checksums = json.loads(request.data) + image_with_checksums = json.loads(request.data.decode('utf8')) updated_tags = {} for image in image_with_checksums: - profile.debug('Setting checksum for image id: %s to %s', image['id'], image['checksum']) updated_tags[image['Tag']] = image['id'] - model.set_image_checksum(image['id'], repo, image['checksum']) if get_authenticated_user(): profile.debug('Publishing push event') @@ -366,7 +364,7 @@ def get_repository_images(namespace, repository): for image in model.get_repository_images(namespace, repository): new_image_view = { 'id': image.docker_image_id, - 'checksum': image.checksum, + 'checksum': image.storage.checksum, } all_images.append(new_image_view) diff --git a/endpoints/registry.py b/endpoints/registry.py index 8af9fcb03..76c7d56cd 100644 --- a/endpoints/registry.py +++ b/endpoints/registry.py @@ -38,27 +38,29 @@ class SocketReader(object): return buf -def image_is_uploading(namespace, repository, image_id, repo_image): - if repo_image and repo_image.storage and repo_image.storage.uploading is not None: +def image_is_uploading(repo_image): + if repo_image is None: + return False + + if repo_image.storage.uploading is not None: return repo_image.storage.uploading - logger.warning('Setting legacy upload flag') - uuid = repo_image and repo_image.storage and repo_image.storage.uuid - mark_path = store.image_mark_path(namespace, repository, image_id, uuid) + logger.warning('Checking legacy upload flag') + mark_path = store.image_mark_path(repo_image.storage.uuid) return store.exists(mark_path) -def mark_upload_complete(namespace, repository, image_id, repo_image): - if repo_image and repo_image.storage and repo_image.storage.uploading is not None: - repo_image.storage.uploading = False - repo_image.storage.save() - else: +def set_uploading_flag(repo_image, is_image_uploading): + if repo_image.storage.uploading is None and not is_image_uploading: logger.warning('Removing legacy upload flag') - uuid = repo_image and repo_image.storage and repo_image.storage.uuid - mark_path = store.image_mark_path(namespace, repository, image_id, uuid) + uuid = repo_image.storage.uuid + mark_path = store.image_mark_path(uuid) if store.exists(mark_path): store.remove(mark_path) + repo_image.storage.uploading = is_image_uploading + repo_image.storage.save() + def require_completion(f): """This make sure that the image push correctly finished.""" @@ -66,7 +68,7 @@ def require_completion(f): def wrapper(namespace, repository, *args, **kwargs): image_id = kwargs['image_id'] repo_image = model.get_repo_image(namespace, repository, image_id) - if image_is_uploading(namespace, repository, image_id, repo_image): + if image_is_uploading(repo_image): abort(400, 'Image %(image_id)s is being uploaded, retry later', issue='upload-in-progress', image_id=kwargs['image_id']) @@ -111,21 +113,20 @@ def get_image_layer(namespace, repository, image_id, headers): profile.debug('Looking up repo image') repo_image = model.get_repo_image(namespace, repository, image_id) - uuid = repo_image and repo_image.storage and repo_image.storage.uuid - profile.debug('Looking up the layer path') - path = store.image_layer_path(namespace, repository, image_id, uuid) - - profile.debug('Looking up the direct download URL') - direct_download_url = store.get_direct_download_url(path) - - if direct_download_url: - profile.debug('Returning direct download URL') - return redirect(direct_download_url) try: + path = store.image_layer_path(repo_image.storage.uuid) + + profile.debug('Looking up the direct download URL') + direct_download_url = store.get_direct_download_url(path) + + if direct_download_url: + profile.debug('Returning direct download URL') + return redirect(direct_download_url) + profile.debug('Streaming layer data') return Response(store.stream_read(path), headers=headers) - except IOError: + except (IOError, AttributeError): profile.debug('Image not found') abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) @@ -144,21 +145,19 @@ def put_image_layer(namespace, repository, image_id): profile.debug('Retrieving image') repo_image = model.get_repo_image(namespace, repository, image_id) - - uuid = repo_image and repo_image.storage and repo_image.storage.uuid try: profile.debug('Retrieving image data') - json_data = store.get_content(store.image_json_path(namespace, repository, - image_id, uuid)) - except IOError: + uuid = repo_image.storage.uuid + json_data = store.get_content(store.image_json_path(uuid)) + except (IOError, AttributeError): abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) profile.debug('Retrieving image path info') - layer_path = store.image_layer_path(namespace, repository, image_id, uuid) + layer_path = store.image_layer_path(uuid) if (store.exists(layer_path) and not - image_is_uploading(namespace, repository, image_id, repo_image)): + image_is_uploading(repo_image)): abort(409, 'Image already exists', issue='image-exists', image_id=image_id) profile.debug('Storing layer data') @@ -193,9 +192,7 @@ def put_image_layer(namespace, repository, image_id): '{0}'.format(e)) try: - checksum = store.get_content(store.image_checksum_path(namespace, - repository, - image_id, uuid)) + checksum = store.get_content(store.image_checksum_path(uuid)) except IOError: # We don't have a checksum stored yet, that's fine skipping the check. # Not removing the mark though, image is not downloadable yet. @@ -209,7 +206,7 @@ def put_image_layer(namespace, repository, image_id): issue='checksum-mismatch', image_id=image_id) # Checksum is ok, we remove the marker - mark_upload_complete(namespace, repository, image_id, repo_image) + set_uploading_flag(repo_image, False) # The layer is ready for download, send a job to the work queue to # process it. @@ -232,9 +229,11 @@ def put_image_checksum(namespace, repository, image_id): if not permission.can(): abort(403) - checksum = request.headers.get('X-Docker-Checksum') + checksum = (request.headers.get('X-Docker-Checksum-Payload', None) or + request.headers.get('X-Docker-Checksum')) if not checksum: - abort(400, "Missing checksum for image %(image_id)s", issue='missing-checksum', image_id=image_id) + abort(400, "Missing checksum for image %(image_id)s", issue='missing-checksum', + image_id=image_id) if not session.get('checksum'): abort(400, 'Checksum not found in Cookie for image %(image_id)s', @@ -242,21 +241,19 @@ def put_image_checksum(namespace, repository, image_id): profile.debug('Looking up repo image') repo_image = model.get_repo_image(namespace, repository, image_id) - - uuid = repo_image and repo_image.storage and repo_image.storage.uuid + uuid = repo_image.storage.uuid profile.debug('Looking up repo layer data') - if not store.exists(store.image_json_path(namespace, repository, image_id, - uuid)): + if not store.exists(store.image_json_path(uuid)): abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id) profile.debug('Marking image path') - if not image_is_uploading(namespace, repository, image_id, repo_image): + if not image_is_uploading(repo_image): abort(409, 'Cannot set checksum for image %(image_id)s', issue='image-write-error', image_id=image_id) profile.debug('Storing image checksum') - err = store_checksum(namespace, repository, image_id, uuid, checksum) + err = store_checksum(repo_image.storage, checksum) if err: abort(400, err) @@ -268,7 +265,7 @@ def put_image_checksum(namespace, repository, image_id): issue='checksum-mismatch', image_id=image_id) # Checksum is ok, we remove the marker - mark_upload_complete(namespace, repository, image_id, repo_image) + set_uploading_flag(repo_image, False) # The layer is ready for download, send a job to the work queue to # process it. @@ -296,13 +293,11 @@ def get_image_json(namespace, repository, image_id, headers): profile.debug('Looking up repo image') repo_image = model.get_repo_image(namespace, repository, image_id) - uuid = repo_image and repo_image.storage and repo_image.storage.uuid profile.debug('Looking up repo layer data') try: - data = store.get_content(store.image_json_path(namespace, repository, - image_id, uuid)) - except IOError: + data = store.get_content(store.image_json_path(repo_image.storage.uuid)) + except (IOError, AttributeError): flask_abort(404) profile.debug('Looking up repo layer size') @@ -312,12 +307,6 @@ def get_image_json(namespace, repository, image_id, headers): except OSError: pass - profile.debug('Retrieving checksum') - checksum_path = store.image_checksum_path(namespace, repository, image_id, - uuid) - if store.exists(checksum_path): - headers['X-Docker-Checksum'] = store.get_content(checksum_path) - response = make_response(data, 200) response.headers.extend(headers) return response @@ -337,13 +326,11 @@ def get_image_ancestry(namespace, repository, image_id, headers): profile.debug('Looking up repo image') repo_image = model.get_repo_image(namespace, repository, image_id) - uuid = repo_image and repo_image.storage and repo_image.storage.uuid profile.debug('Looking up image data') try: - data = store.get_content(store.image_ancestry_path(namespace, repository, - image_id, uuid)) - except IOError: + data = store.get_content(store.image_ancestry_path(repo_image.storage.uuid)) + except (IOError, AttributeError): abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) @@ -355,32 +342,32 @@ def get_image_ancestry(namespace, repository, image_id, headers): return response -def generate_ancestry(namespace, repository, image_id, uuid, parent_id=None, +def generate_ancestry(image_id, uuid, parent_id=None, parent_uuid=None): if not parent_id: - store.put_content(store.image_ancestry_path(namespace, repository, - image_id, uuid), + store.put_content(store.image_ancestry_path(uuid), json.dumps([image_id])) return - data = store.get_content(store.image_ancestry_path(namespace, repository, - parent_id, parent_uuid)) + data = store.get_content(store.image_ancestry_path(parent_uuid)) data = json.loads(data) data.insert(0, image_id) - store.put_content(store.image_ancestry_path(namespace, repository, - image_id, uuid), + store.put_content(store.image_ancestry_path(uuid), json.dumps(data)) -def store_checksum(namespace, repository, image_id, uuid, checksum): +def store_checksum(image_storage, checksum): checksum_parts = checksum.split(':') if len(checksum_parts) != 2: return 'Invalid checksum format' # We store the checksum - checksum_path = store.image_checksum_path(namespace, repository, image_id, - uuid) + checksum_path = store.image_checksum_path(image_storage.uuid) store.put_content(checksum_path, checksum) + # And store it in the db + image_storage.checksum = checksum + image_storage.save() + @registry.route('/images//json', methods=['PUT']) @process_auth @@ -393,9 +380,10 @@ def put_image_json(namespace, repository, image_id): profile.debug('Parsing image JSON') try: - data = json.loads(request.data) - except json.JSONDecodeError: + data = json.loads(request.data.decode('utf8')) + except ValueError: pass + if not data or not isinstance(data, dict): abort(400, 'Invalid JSON for image: %(image_id)s\nJSON: %(json)s', issue='invalid-request', image_id=image_id, json=request.data) @@ -406,22 +394,8 @@ def put_image_json(namespace, repository, image_id): profile.debug('Looking up repo image') repo_image = model.get_repo_image(namespace, repository, image_id) - uuid = repo_image and repo_image.storage and repo_image.storage.uuid + uuid = repo_image.storage.uuid - # Read the checksum - checksum = request.headers.get('X-Docker-Checksum') - if checksum: - # Storing the checksum is optional at this stage - profile.debug('Storing image checksum') - err = store_checksum(namespace, repository, image_id, uuid, checksum) - if err: - abort(400, err, issue='write-error') - - else: - # We cleanup any old checksum in case it's a retry after a fail - profile.debug('Cleanup old checksum') - store.remove(store.image_checksum_path(namespace, repository, image_id, - uuid)) if image_id != data['id']: abort(400, 'JSON data contains invalid id for image: %(image_id)s', issue='invalid-request', image_id=image_id) @@ -433,26 +407,33 @@ def put_image_json(namespace, repository, image_id): profile.debug('Looking up parent image') parent_image = model.get_repo_image(namespace, repository, parent_id) - parent_uuid = (parent_image and parent_image.storage and - parent_image.storage.uuid) + parent_uuid = parent_image and parent_image.storage.uuid if parent_id: profile.debug('Looking up parent image data') if (parent_id and not - store.exists(store.image_json_path(namespace, repository, parent_id, - parent_uuid))): + store.exists(store.image_json_path(parent_uuid))): abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s', issue='invalid-request', image_id=image_id, parent_id=parent_id) profile.debug('Looking up image storage paths') - json_path = store.image_json_path(namespace, repository, image_id, uuid) + json_path = store.image_json_path(uuid) profile.debug('Checking if image already exists') if (store.exists(json_path) and not - image_is_uploading(namespace, repository, image_id, repo_image)): + image_is_uploading(repo_image)): abort(409, 'Image already exists', issue='image-exists', image_id=image_id) + set_uploading_flag(repo_image, True) + + # We cleanup any old checksum in case it's a retry after a fail + profile.debug('Cleanup old checksum') + try: + store.remove(store.image_checksum_path(uuid)) + except Exception: + pass + # If we reach that point, it means that this is a new image or a retry # on a failed push # save the metadata @@ -468,8 +449,7 @@ def put_image_json(namespace, repository, image_id): store.put_content(json_path, request.data) profile.debug('Generating image ancestry') - generate_ancestry(namespace, repository, image_id, uuid, parent_id, - parent_uuid) + generate_ancestry(image_id, uuid, parent_id, parent_uuid) profile.debug('Done') return make_response('true', 200) @@ -479,12 +459,10 @@ def process_image_changes(namespace, repository, image_id): logger.debug('Generating diffs for image: %s' % image_id) repo_image = model.get_repo_image(namespace, repository, image_id) - uuid = repo_image and repo_image.storage and repo_image.storage.uuid + uuid = repo_image.storage.uuid - image_diffs_path = store.image_file_diffs_path(namespace, repository, - image_id, uuid) - image_trie_path = store.image_file_trie_path(namespace, repository, - image_id, uuid) + image_diffs_path = store.image_file_diffs_path(uuid) + image_trie_path = store.image_file_trie_path(uuid) if store.exists(image_diffs_path): logger.debug('Diffs already exist for image: %s' % image_id) @@ -506,7 +484,7 @@ def process_image_changes(namespace, repository, image_id): parent_trie.frombytes(parent_trie_bytes) # Read in the file entries from the layer tar file - layer_path = store.image_layer_path(namespace, repository, image_id, uuid) + layer_path = store.image_layer_path(uuid) with store.stream_read_file(layer_path) as layer_tar_stream: removed_files = set() layer_files = changes.files_and_dirs_from_tar(layer_tar_stream, diff --git a/initdb.py b/initdb.py index 33d2f048e..a48cac4e3 100644 --- a/initdb.py +++ b/initdb.py @@ -70,10 +70,10 @@ def __create_subtree(repo, structure, creator_username, parent): new_image = model.find_create_or_link_image(docker_image_id, repo, None, {}) new_image.storage.uuid = IMAGE_UUIDS[image_num % len(IMAGE_UUIDS)] + new_image.storage.uploading = False + new_image.storage.checksum = checksum new_image.storage.save() - model.set_image_checksum(docker_image_id, repo, checksum) - creation_time = REFERENCE_DATE + timedelta(days=image_num) command_list = SAMPLE_CMDS[image_num % len(SAMPLE_CMDS)] command = json.dumps(command_list) if command_list else None @@ -85,9 +85,7 @@ def __create_subtree(repo, structure, creator_username, parent): random.randrange(1, 1024 * 1024 * 1024)) # Populate the diff file - diff_path = store.image_file_diffs_path(repo.namespace, repo.name, - docker_image_id, - new_image.storage.uuid) + diff_path = store.image_file_diffs_path(new_image.storage.uuid) source_diff = SAMPLE_DIFFS[image_num % len(SAMPLE_DIFFS)] with open(source_diff, 'r') as source_file: diff --git a/storage/basestorage.py b/storage/basestorage.py index 1e924ed1a..93b2b64e8 100644 --- a/storage/basestorage.py +++ b/storage/basestorage.py @@ -29,41 +29,35 @@ class BaseStorage(object): return tmpf, fn - def image_path(self, namespace, repository, image_id, storage_uuid): - if storage_uuid: - return '{0}/{1}/'.format(self.shared_images, storage_uuid) - else: - return '{0}/{1}/{2}/{3}/'.format(self.images, namespace, repository, - image_id) + def image_path(self, storage_uuid): + return '{0}/{1}/'.format(self.shared_images, storage_uuid) - def image_json_path(self, namespace, repository, image_id, storage_uuid): - base_path = self.image_path(namespace, repository, image_id, storage_uuid) + def image_json_path(self, storage_uuid): + base_path = self.image_path(storage_uuid) return '{0}json'.format(base_path) - def image_mark_path(self, namespace, repository, image_id, storage_uuid): - base_path = self.image_path(namespace, repository, image_id, storage_uuid) + def image_mark_path(self, storage_uuid): + base_path = self.image_path(storage_uuid) return '{0}_inprogress'.format(base_path) - def image_checksum_path(self, namespace, repository, image_id, storage_uuid): - base_path = self.image_path(namespace, repository, image_id, storage_uuid) + def image_checksum_path(self, storage_uuid): + base_path = self.image_path(storage_uuid) return '{0}_checksum'.format(base_path) - def image_layer_path(self, namespace, repository, image_id, storage_uuid): - base_path = self.image_path(namespace, repository, image_id, storage_uuid) + def image_layer_path(self, storage_uuid): + base_path = self.image_path(storage_uuid) return '{0}layer'.format(base_path) - def image_ancestry_path(self, namespace, repository, image_id, storage_uuid): - base_path = self.image_path(namespace, repository, image_id, storage_uuid) + def image_ancestry_path(self, storage_uuid): + base_path = self.image_path(storage_uuid) return '{0}ancestry'.format(base_path) - def image_file_trie_path(self, namespace, repository, image_id, - storage_uuid): - base_path = self.image_path(namespace, repository, image_id, storage_uuid) + def image_file_trie_path(self, storage_uuid): + base_path = self.image_path(storage_uuid) return '{0}files.trie'.format(base_path) - def image_file_diffs_path(self, namespace, repository, image_id, - storage_uuid): - base_path = self.image_path(namespace, repository, image_id, storage_uuid) + def image_file_diffs_path(self, storage_uuid): + base_path = self.image_path(storage_uuid) return '{0}diffs.json'.format(base_path) def get_direct_download_url(self, path, expires_in=60): diff --git a/test/data/test.db b/test/data/test.db index 801d3a28a5c7daae5d8ee678ae00d991b218c1ba..8c227a1ca31e2b9d9520bd068017f71610338b56 100644 GIT binary patch delta 13025 zcmbtadwf*YwLWK`NhUAIgFqq>2oPQ&lk=W4g9w>PGMP-0$t06E&v`-!Nq__bv=C3N zR;@}X$`P%g6cq$f3=LLApyCUkw7*(!wbyFZYg;RLZEa~0?%Mmz#1kKIe>Hz>n6@D6*WR0U?W1V}6B6<%ih6Ea{sKDx@G_1u|6mpT-n3#+V>*+PCNV4F7xm1x z)y1Qk8=32vEzCxym+59!4)ST!nf{X-KzpJWhU2SYcnZ&bas>>(2*WV642GXCg5gLD z42K(Fc+>~O!__bxaKo^_42FFrFg!31hP^fz_DC??$HDN^A{g$T1;fq)81Br6VaG%m zZqJ2bdv-RA0~s*fng+uyNih7_^jJo!n|Xx!_h2S>mO<2^!AlEM(#+R07G@7xFqpAI zLj5%@e3Mry_gC5+Ev0^+;I6DHD{HkmdC{$il{TB76YWx!tEyUP_BYtP*4C;fx0|bL zuCj`yUcS6Ct=y!JqD4R7Ab2aQ+W5vYkrQ0iHs0y*i4B~?<*TR=9rk9yYO_~}{+ecA zQ=7X{cGox>y|sRyS!j~#TN@qQPp@22;^1pVSACh)*(Nnht+ftsL#eA-Y;9)cd8=!@jiQxz+kM^^rP<*f{IKIf%6Hxs zH5mb<>awFDUtnTSj(l{AJ?m(sdWc&1$Lf z+p1fuq|%D!7Dsi3&u=TQX{>ZL+Z(L)&3=EC)6cc~TI@~RkKa9gyZi3iS=DWIQbU7c zFRgV~+TB&2vNoyQrquBD4Gnx-TZ31s5k)uO);M_l?sMsNk|fAcAsDg9!EneTMFX5A zD2q``Fd&A5p>QB93F6ww>h8ILaA$j0Z=|QUx4R?KC2+iC;j9*(&liNbf;g8~iaB23 zIq9*3Z<`DLKUN<*VyZUL%!8Dc@jiWcuq5phWND_&R3n|tIs`r-+M@y45{U|7i)0Ok zEp}N9Tf)I0FIf3tFcJ-D?;<)ko2-GC=Ze<3qFgL0f^6r8uB9(AwCSQc5l&$ci;^h1F&WsM5)RnyoLvfAf*}PiOA5mTF37{?+hlu0k!&F@f*6)26KIM~ z2?|m$5)>timE(Ae6tdbZ!Kf0n@BvvAf{9`%7?ruB7bnoQvkC;O5)AS7h$X~_B#Xoa zVId(2Pvrw4d&m|E3Y@m4j4mq(!77w!RIr3ayKIpHp`axwgo73vC)s6QlKFtms{L0P zZJj3A?e;JS`)-xNy=048>_WtDkt0#N$VY>m%^uc-a{8KrfFgu=o2*zsVh5yz7a9>Q zT*N9Vfv8=!MR;v{IqfX4iD5zJL%f9#a}aTyC|H70&}xYWB&!tWg-BFVwDaZkoC0f5 z5W`$lu|&cGT#zh_;5`755P*e*1Cf9tTQ#?fc1#l`A)-XA5H6wu7i5P^w}*JeB6Fh5 z!D7Okq#bn8c?DruvcmH=i#4FY1%)A`0(@As@RD5$$kDJ6iiEVkx@dQS9d<&I<*-Ew zSrxpSutzr7C_Wg7*kJQ{SroO_3c9c$8V+-?O?Hbl0@or%UzM*XQzk>YUC>$?1~4<)x*q zZmvwOtKwYs9(S#?tfju1^HeleIl1ztYLADjv&(|F!R}T3)lis4IqBA7^u4=SSOoSYo`hn-r&hjfp&@>Y84(F-5aCfeM#A~DC3Lu-p^ zG(j6jqp{j;G@3s25v?Wm8#9Zo7~+PL<+MqRgZ}#7#?`5pSEpQFojgL_&*#TdRNOi= zl$UdEHmW)0tFseFsI6aBCwx`iujkabbJGaD{MG5q<<-cTe7Ud9PJrGsm3a|H>X*#F zna`O|nUA+^7(JMo`w&xhY(0n{$F20>5*QBpVR*0thM(c4cNY)CU9(}hV+ssgb6~hF z4Z(<*hhU_hU|wS$0!#NW^u5}lf6xU9yNj5`v=(Sbv$RY9pv^}UFVO#Dc7$ouQPV%^ z3k;c@Oyho=I(0P7gsN!zX{{vzY4l^-IsDK4+Kq{*D(N1*VK8Y+*P@aH+-+-Gz8T;@ z(N^JqwrP)>QFW4uE~n^n`rsX-(R$v$K&m1!5JFu+<`1wfmMLi&i6`^sE_;Q$J9iki;YA1>#<^bZ;8vv0$2YJmgo0@M4iezT??)#ovNXMMd1mNnQ0`RaA zcyNdTizc9dt{=1M=?35k3J3mSp`_#O|7=VI>OxXmw1d~(vgc^OdX@}Z4%sS!4mHurRJp>?2{c?Qv|bblEMD` zn|}I%8ciKxA8*BY3n1Q-VXr3cTYpw{rNm6&gPO7w04D$tJ1E7%35J78ytXV=?MjZD zBnR~w%m@{O$xp1Pvw)jBBNHz1g7XVf0b3rjCH*Qc2& zXp=Rz2syJD*3R-A%kyknZK^JR6XfYJd1`fvF5F^4&NM7c@g(b#DaAyR>`NlQC(v5@ zd{mNwSedo53Y+9L>$r9nIY;YKb9JIFTQDmoOY|fVNjj}H!|YVVCe)g+Bzc0iZys`F zuskcUB5SH)h%`q#3(`?U%2XjD(_Se=&QuT~H#ChfRqLLEoHJOw8L3n!#q4KV7!!IJ zwW3k<)ATAjhdK^ooFk}e*b#TKF9*+~;S!E_qmnEW%iCDq&gL|YBl4NFcD4bPjEl<&eL7K2rZvTZN(|G|Yey5!Rd8`;RFWB+ zHo9?C+#0(8mE^~*@hmT}qx8`KGO5yGt#K|y?bZrNFB87tpHVwU#}PQakd$>1BW{&|?(Bi>FR6v2 zVm@(NAsK4#)c`1x(H!QEyIvTYhfGPMFOyHQjSdLa!XB7kTn-KgFrd%{e9ygk=Snr28TUc*38bVQur1p6U64*p&px^D z3h5-*V4m#21AdtOvKP$rllSdVqd7+NWDBBhKuoHIbfQG>hI1JgGlA0y$w;FrhjX-| z4ID$`v;ixcon`;x`K#5gv2lAimyk^T1i-ah%24Oj+<8yW!|P3eweh??n3 z^j_LUeE>pvn`1mp$YTypA%UcAZ9z^g5JcmXrYz0ZMebHzl#`UdXtFN47!zeDO>Ui} ziw?IT=T*b}QCPvo3U+o{b3;qMbr({f($(l0ln$d5TOl90duMi$23q z@Tc)nY=O|aCh(BjHDSa_;99n?4o;%D2KXU$j3&EPU z6v$$~c>3upkfp%r3TL1(L74r{av%%!pzn+too2L8QqFGx@#SI^Tv``pJozM}x~9gA z4Yy0Thrn3TV&uxAYq)=@T~p%5hTEevEr57%G3w;zJ#j(8i%p_>DP||bqFdR?eQdoP6Yv~wLOU0b#inUniw5p6rfa5HFuQq#1I9+Z|c zt9K^RglMe~>eR3%Ic4UW8AJok>`X}V!xqz$W^_#_8Z9lLb8%X#Xl18&6zI}3F=;{a zifKerMQab?TRHM^_FP2OAWJdzYeO zz4tWBdwbQcnIkqAN5b_Tu(^&X1b^1Y7u(d%8F2&-enO@jmKn}c5{}sGFz;gExLNk8lzep z*aQVwH5KW~MQaU8$h>oOPfAZL1aUZzO`5a0P?x{H!XVG03x|5aShfi|?r&+mIm3>U zfVlJn*sq&5!;Ti-Gv#A7%8uAk90B{U1H@aKp;nH*Rw!0o#W544^~M?G537e`3c{4+ zhuA&RtiQc9S?#jKO_FknGwjJVVDghKsB>KI;<;IuuLqLB9f^VcKQ^#}*FIemZ<*#a z-eXp~#1YvIVU8r zvdqd>R#sTq#>#efUenb?joV{L8Mz|?SNrD@Erj0lw?gO{v~zuS-R{gAkuxK%u-Djh z>8l`3CsLb7AtGpwci)Ce(h^w3Z;e}JAvq2Qv@EbzzpT$ccC(?zgF^O6L;+0!Yi#eO zMIw=lu>Mu3WMZ5HI&~yFD|4*aD(FU+smNLIHH2`5>|q;mNI1+gZh|vluZKgj{(JKC zYN28{BvPl2V8E-_L-v@~_s_@GJZlVqvj@pPjW+{e&J7Ujvv==2rbgws4^m3;&HE*c z*n9(IkLjPiEDzm+v&UuFG|%9HWbDeH!2DNsfCt-$yJ$6f^#~6*lFnnqBn{XkUB9zO zbb`W=c*HLQwvLt_Ho#py%PW*?ZU0}ZKwCDdCnMsG@+PqJ9Zkc zTG5-(NfX&0t5HYX9jUuGrOzF)MdU%*Lerc-e*D{^y;mTS+u)b%%&7ZdVecFO66L>` zxlfH&jBtjx<_I8Scg^sM05?X9GUseadR%q6V#Y{u!l}Jw4;U-D2Rc&gKTg!CUFC6O zq$4Ga1VpU8g@^E@B?`^U|Kv}}c+1l$o?;F$tqhHRj-n`=K2G=01=O#o&7k<|pU1Dq zqUCYqOi5+^{(AlQtMGel3ai*z8^_vsw%%7q6!>22LAck7tLkce`h>|(KprPjpQo0H zfFKW^09vN7wSF&=0P$FuJSLe{WLAOaMhffo)#&qIItavzD{5-0i4=&}2NTkXyxLP0 zx4RqYo|4Q~HF@;OM-M?WL~??{vx>kfBJ1&25*_$#2J{$%w$kU;r@eG9RJypreR+aa z8&3&`+L4Ec&j1e8_?R_o_5YkuyQ++*Mv{HYAvjmZLqM>C`8RNCrzeiU1S6z94~W=f zR=ei=5Hjw})AJR&6M+T)BLCN3Yde*V)uy3%Y8Tdz%{8E$?U(-KC}48;_z{vMW$yZ`s88 zg-G4{w)L$tylq-nTwAxMZM|njSGc`}bGDcBF21+3IZ$6&8I5)dWt?9V52GS^P2alq zwLR5q+@UHtx}nrjTpqGj7uVZ*Tl}HoK8GvP<@Cs5du`{MHN`6$19E6>sduAxJD%Fn zB#B|UwR6L|NOy~^eN7)HxmqK2qOBs>=2%}YRyUTHvQ?|fR@biVXW`*>$JZg zMtuKweQCegluAuj6J{n;Nl8i3wUM3`>v|$<3xq%cFBEjO_i&E2Qovqc=?SpjuHH~j z&Bof`YDaIaWD7cNT^q+NSIRu?*4~iRyRosVtIyrTc5075hGxo}>Nf`Mr7LS)wh%8j zxqACr$|~2?Zd||49<1lr^@h4z{gL)iaiiZQI$K;GYcYJOV%4$-(RcnXMZ0kj<;nI2 z8`~DHw)^=GTaU6L+7a~GRu_j?_V#fbg+{9qiLUXr1!TF*SGm5@>ULChZ|JK!`ny4N z15@%{Kh4nYJ&N+{|D*3&44<=Hp?vs^>>-^@;0fT@z;P%aQx9GETrI3MvIeKkgeL*e zzYofX`M`CX)jV$ufXjzofH2_geUQev{gH?ot%>^}H3L_mQNVhwct1S2kjg*3OJCx* z-nldw>=S+;d>&@sbPVj9FV0=1M*T+nqzHFE4v4po;a=;O_fu6@ear+d4y4j#{KAlS zc;ib0)?Rd8?W&8LB)t}{JWs$3?UScbrzzvff~`X*ptiviCV^rOGfSBi^cXzqJ5S#U z-PSAcs887D$Trt3(evZVUnA$JRMsxCOFWBp(bktqomd=Mr2pn#fga|>D`=8AxKNjT z9FvSsS-7-~i0sgfB|rlNu$-i}MXfq+;cNQL?4;J#7G3nzDR?%R%GyNMCb2e|wOLu4 z!nQOu>*`g{!n47V>SlkFuDj|vs8YfIBF8P=o6sQ=dBf#X ze~aM^NSItaWyrp8J2(6UoaN4pQ@ff+9087+L(jqyY<&qbf6kVP*Q%XOaRj-Y!_{Qr zbAY(@DTv)f6W6O2HpYCC)(ST(2{?3$jzMcRxx01r6}Yj;XrF}5+E-wD@rz)ey?^Q{ zwQ!-)K8}*&R{`+Ki@=TC7VcU#uPp{3trhM{{s#jJPebhT#djZ3qpfitz>yD z@%_`ljRjAg4qky9%Z&E%4lMr_%zpU|usv3G3mF!jf9~e-LJZq z#7tmr==1`>m*sN_nynq;ky(HKl--@)s&*}o8zrq2uB2yP2BWvU0PINqR53G8xDR7cHxkD#L7NE~$lm_uxFY{V1R zQJsGzBn3$zYqzp?8yoTki2{Ou_b1S-0fmVw=nLo*ww!@xD=rPxEF)5& z#O`0B5=?lRUFKP;Ltp$AvAy*26bT!Sg5=^y{%A;3(4pQp{1Q5E%U_nLU7-;NfupDU zbvTIP-vCGG>pmY;JA-iq<_MwPRc8S4`fnhH^TN-;OPoN=Dd7mF*$E8jegmF@&pgmh z4ZRJS?s8ATR~S8z0GjqL7^wUZJdAb>zN!{3H+UfF`4j;3E}rM`yD#XwGBZC|@rjxj zjX^PA2;qjT zN3myh6lY^QJ*#xo%s&&4tGpfh?-$^=8Png&b_{*22jREfx34zrISHFb{$V(uczypO zf-*AK?QT-Lx-s`29G4KpTdCtb5MHdpQk;i=BJV@ zX~myH>7kDteC`UY*JJf>J8Pk zK4uD2h7j-(01Ok^*c$K=v0v?47q>_lg!#DTZ(#A0f1%E?1^KHJ@X8a=hA(-eWtPyd z!bh?@>16Ns;z5{>^vWR!?DssI^q_n90twYk-i>k8Zb0F{hYuJwdNvR#rrKZO);X2k z;O*1ry>OA_clb7v?Ze&~__Rp>!f23B3)7?3uJ0MwOtRpX3$W&`W~MVM^@rD;Q#&`s z5jf;X6i@pC5V!pk&Rh-)tJK1c#xuuJdkO=J&O@r6|EIf@BaF$UnUa{w5nlzyKS9$1 zNle){YyLZ#nWb^bj7j?-iJAV*7??d?vn4YNzF7?>W@|^1nQ`B$o)4O@e^-ZBHD2?i zFf*|FYv0vrWN8Oem|5Sf{wRg1!et{KM+!U)l2e zy1MFn-+T3{x^_L1x$8*gEwhsjavb;K-u%VFsh z?}oyYIjOsZwlh26*VCJ!d1?(br$W%2Tnf!2ozR?UhUR!JH1{rn<}L%8BMLOP&x7V$ zMbO-K6*RX_gXWuA(A=EzSawF8@Q`rfWRCQ{fVl6Ryx5nXnX*{03wNO#PGK)y~@bN?B|4RP}aiiqxc4D=vqxvPS9c)+@StDq3pP`ktQl+7_k4)m`c9 z)O1gioLOzcllWG*UQ^j*^!mCR9QA62tFxvrB9bmf(^6B>uZ4}mlR_z|_ccV0 zRH5V{^kwB_s!amg_L6-SlP)ZHOfK{%YbW6r+1JDN*eT5nfRtGiC^bk@}A zUT25f=XLcqbU8guy)_+bMXf{ewN|$|PM*2r19Ph*;*`UZ?6AASfsoxPX(4;SFama$ zVT4sf3Ir8rcvE=8`UU>bz^Zjy!kf2jS-&Q{PLX7%T~h6ekuU2DBm@4GONy!(`eP6N zBE|NP^!nK2rp9yuJmJHL*!p-Xk!i5X+3%B~$4u%V_V}~U#P?QCVt6WzMMRjaLv}1m~+Y!>V zklz`$2Q_EV?$ljjyRL>bdngS5F=S{FRS7u-VP|1@+lEz}!kd@-x6l`K^bgL@m(=|F z!FBocE%}nPK+zVcQodb+pDVpuEmtK)QH>}co6~x??(+KshSRW1x+>Y7S|ngMbk(qj z9YG^xL_#6SD~0}V-t077imacS&;PfOX%dnIo)?fw;L}Df&gTC|n4QLt{+}SErKLmS z7ldP+@EhR;;TZVgfIx0O&!@M=-8+#-!78}<%V|!`73B)X9pHqKv>E&cVMq@L9R7f8cgl(e@((F?U4lT6 zw!qf;TMpIQaRq22KhNXVmAy$5310S?w85ND!$4VafTF!7F6s(7l52DO|=Jt^w+Q~hn!Abm4gBD zpH;l8$R+8r9*o!}KZLf^AC&B%G|euD!iGzCfRdFUQL6bxHkU)wgHF(-BEblpL1)OW zQ*}y_pyUr}Aw|+6WM4I3VT)+G>UU@nKn8%cNLT}8z-3o8!{Jx_!AQU#ARkxr^K2nq z3R17YY&B54ADH#4Do_TTJB*OiZ#W&Ku7-CPMN|j@HR7~~jWEy@@w@DPO?ANh!cy27 z49Xz~d9a4hwFRAie^~QF{01Bj0D>!ktsr%P7S?r#(+Fzhy&AsGCPR*J1Ykm{OLxM{ z6<{+AOz1&JAnbzoxbz6=spZRTK#S~<{D$2xLt1mXf_}T9x+HtV1(`u{Nzjo>?yKch zo2rIX%|T-}&buD5`(b=`haL%p)ey)f5;;}J9~1kkIy`b)TZ7)#;M2X`6`sm^N1f5( zYLUB}J3W%CvcAcuYc)M;x4WaZLThSnadpbN%jnS?ydKHn>rTBqiu7_r)gZ1%E=Kw1 zge7104X6Hvztddkk0?~hhT<|H8#+KoO2lsu%T6g62?zYZ%E&V}@m}jxHXFc5Ks!#I z{5%SsV;?NoVbETGE~RNhbQ=H`NE51HRcN=7hmO*re-B^&vD#yyup8qzaZSV zFDLckk?;SGU%;n-wKS4qu~_&UWs>nWzj@zeYZEcQ#!n}^Zss%hEw@e^Ir27d;!pj7 zC&@$1Fdb64(;+K{k$J_WHD=EEd~^Eeo6{zkhhRaBWz4bOQzzIjGp9^2tCv2VJi+X| z)SPswd1&IxO%v>nOCO(@%`Y=g%;uMwlOUH)7jA`?dset0d?@@`_|x8PHdsNH3PaBf z!hb)#9-1dtLGwfqn#bwZ|B@P8K$tl(iK`Wyd=Fqy6iN!$k<_W|F@pG00^4~Xxrlu&<)#bMz| zV-m_E-+PCjb|4u|BENitPbRk|p&2QCtCB5t3nC}p;b$Xm7y14>ypfJ)tWL4CTM%v~ zUI39-lF)SidNPoN3h2MHM{<+VZ~06*YVZVFF^-0;lta^#(cL{&`2)+EMm=1Ert*gYch<{&W% z7n#PaX&m{n3ssP_CCEEnlto2!h$dVhuz&&$0LMy^cWMkE;5@{dH&FBbGUS~MW`wWe zS>tJH+&vF@C6W3jnJeIgKJ+-6!f)VShF{npJU4UfQJy0{Cp>yyLf)BzXo#{T%CeY= zi!xZu{S-4SBkzn+Oa{(RX8=IWnM#;?Q~_!zBpqAR7&}n&eihi=4&A*B`H1D70#u7EkZUzL=-kp`R@6dQB(Zs<@D)mz*16*YI4$do;-x}gwdz$^r=#& zXYnA!)ENg_lrwXyelxbBQQ$nS+8q&V%}zdJo6QTLsS#JJ3K*Iob3Swx+W4SPiybJ6b!Vbmr{xcusnR6FSgC zD2eak9^sb5-w3 zCCHRIHHGWrgk|XWkjO6Zhrqvm-2A<6OTxc-(-_WToDVkxXWE^a9&DY;at5rEUJqJp zUhMWg-8Y5tHf7pVkIKyUz*B3ylNtX4&!Zs(IyeoXarIpf-+(PSqy5H0V?4|>(((+- zC10_lOfpG9syS`P?IfDbcpgHHlq`Z^ng8IivpCPpjGcxUqs=2CXp@FZ*_4}Zg|tOe z+jJ`Qr_oe?HFp|*;jqoZ@CeD|%x3V~a1*%0B+9xd8=?i5O=mF0kdC?m6ZJwmNnjL7 zRAg~FE}q69iPZqFxSK%%3~m~>O=Upv%EP_TU8aY5*(0(0x43g1;Fuo(4EwL%wZrJOi_s~JKgg*>U-_7|z%uC{4&xl^da7tsL#0Q*~ zef_sbao%i}Q}9iCBHm?#92L>rcn(TEex?gsXR-PK-;D7u6L}HM4R*B9Jm>fAkM_-E zH*~WM&!+8kTXtoBa6L&WAF6823gR2+sz%=lFN=5a_AVQ<@K>F z7(lLRKbrY&T(>Sxs6sdJFL0l6E||M53s=u&C?s!(!5jNm$GurhAg8 zGKOAo$tZO?X?YjuElc~*i7llJz0^6Q$w{Y$%g3mkymAqVS)oPiqb2daV$~R3G?w5TL|!$zz~ngAm0XXlb6J%@P95{6-wrw~@IO%BH;0)VyXrB; zWyVnCjMv6Yj;?qM{V2CI%jCP4%D}`u&j~78$sd4!Fa&}r_Hz`JaWD(Fma#tB69u0f zUzhNSnuWcktbJz)?4I=ryPQ*s8%h|!D#&tuAdR#A2}rrQ1lJWa5VC=)@7SsYU>;w5 zY6E1WEE6}(39vbV4aG2b4UA4qwwX=^sRuBziq)@3AnNPtI?Job7aqPE{<& zeRc+kQ$VQIFdFw)6EAa;Uk#!(vmJNOXMkjAD|DGTFaecpo{u}`u>()0v^@S)OuOR?Fx5B|K;>k)4`pW;-~R`kXJ?g6)8p`0 zA;HbhcB3M;zn z)Qi}nj=m*UC&Q{VTG;4zIab*8nXRwC;g32Q4bbJO)7f@OhHH^GzX65 zNxx@wH(8dO3Pmujj%UWboWzH?XW6;+pL-~ydNK&?8o z7r4&68EVxSM#ol-0YI&K_*MX10Kk+aQNzV9#tLPPNVClGZzAukgsW6Vbm3x^fsy;Z z0psutGj0cvifv8?1a22;HGg(D@}?#Fa-O;+ULV~OuK+*hYq(pu#h~c&xT3SM;~J(D z13K9Uo$NmZnm%(ovY4;I(jo>x?!SX7^)T`lB`8rfQPoA&5M7eE2-_F3=%st8UT=eP z=rZ&|Ty`}Br5lY~;LXGPAqXadug1j-7$nK2UWpz+UK@i01g)8Fi0=XpAvb>n1bze8 z33GKPb^%Km;n1)zIf!brze4unyu}IKOgx1E5id=gLdeBgCTzWije`jb@v=k~ob~vh z_w+3q4~mI8d<|d&>!8ed^qG&E7LJAlsC6SpoA`IZ^_L_wTO~{DE#C%OFB}E2CyOpP zS#pV$xS)bTfY{$ZM%8*39K6La71-)#07SkWy71f$hkD7P8yibl+jZ1-_RhGjtl8ML zn6WN~%9)1Av4e5@EOK!OCBaty3u@q6hNW0NdhUK;DP^(o*JusH5>Pu<48;nWMLBcV z|KKK^SIxSevOIhOGM@Fwn&a47#d?}9P`yVXi@3bN!_`Z***j+^ws+$=Q6op!qieIu_i53_ zct4la6M&(IeT#Bt|M<>SoY%n0gw_&&c@$(KyYKxCw$`&U8DG{6TEU0SmUp+$?ej3R zfJJR=tWdibndf}?^aI{HW;T#i$in+ku1$V@|A+sG46?ATiD911euzfl88{(M6E#)T zTw)V0Xk-xs6mj+ukU$L4h^-BbnXXw>0>>VOKu!}Ia9KTLC8-a?!}}kDG@a0xrihwD ztjEP37Po@pcEcx+Oa|vU@HjfEw%2b_!}Y6c;R?vW7P-D*Lu=LAZQE8fBk6;O|juH}(%2{;=NY z=~z>>HN36OTV5{>)ToJTitc*+}q1H)O0j@w87?z z_Hs|Z*0#n7ZyRh>xAhNtrRI%dEtxukzVxLwvS$S4svFe7>R@M3)0*m6H>{5KM}pmv zj=`4piYiA-r&!gy&aHW>8rKZ2?e(qRA<6#mO25)Ia&`pWC@lH1_qxcDr&0dUKXn<7 zxeG^wGnvbyIr<5d1Ba~7!Y|`L*xJs-gPB)c@PQpJPy_;s3g5p~QvV<%{Nd*y$}+@u zT-?T>$?gXsdZN!m$(Jr_hNw%TE{kp0*2;kBQQUnr8@~VvKLKmS1uYDQF4`~BJbW4! z?HHy7TbmgG+3_8C(*-b3Nf(=Ov6r#Y`jo!l_%l#!C6bAziGZQS*5~gRJK4lIz7|4a z{1g&n632a|53aq8eS8{U2@Sd`^N zpx;G~<==|7#h;2Lbx5aU2{*P0fP?mw*a2zIqME%1&TD0od#r{SegTrZ`N7T}Y;9rZ zKnt|q(`c=!Jo%UBPQ4f}tl?<&TJU}bzwFYZReLwXBw6+{uzdE1uo=t{b%&@sMO_tj zm#AxEH!ka9p-)oiu^&N#oe1s1#ZVTE?ej>^4*<9S71%m3xK3>Iv7iVA?S2&mk^uQ| zK?j4N0{IW{@%d+w*BZlsm3sH{zoINxe30N zzr`0$JY3Gk)};)Sux<|hCk@A+K`dp6OR;e+W37f*G7AuhuR)3X>z9y)*k7WDjjlch zM6P}b+;t%VnQTa6A9nPzh{F_d{PlPzGqSMM!&nbc>3H6V8>dp!Xok8LJ_A|cK?bzlN-nAsVV^z=l^BgV+{eV32hVe+aPszkv!P0SjQ8pTUq>zofGJ zZA^CYj{LY_1%m+D9sVAb-J6h?V!f=u*5wR^fc%xG?U-Ly}_05bXG5A~-}*6{nv+&__Z(~qGov0qNsuKX zuAW*&micr?$;R!ZoaIgxZGR8}+D5T!XEv89Uu~o`v3{ z=i!c2hPe9F|NMV!6E`yyQ^$Bd1Bxx@^wZu=48`D<_=*9Jjh`n@$tl^zUym&t8IH%l z!cdNHpd6op?#L*4$&L)->fV!ltSUmIP-8gSOqn6lG_m_`BE%TrHH`s8mW7RU2 zi)c@$L6KSSd7AqMm?@3rA3(r`fg*EWd;1T(YniD*;voy)L%B0dN7q3VUQr6!xNwjm z9Nc;4`#`u`5Mm)JyTn0k-NGQqg%7}GnV-hQumu-xW^C~EVd}06V5I>rJH^e|x`{y$ z@@IJZ_-C=FH{rsKj18XdqRHQLF)jzVys;5mH!uj0gGfhr_HVHO2NMnOr8HrUU))(r zlh(|{#g5z^304L6f)Hi(H5AU;@t5Mh?c=0Q2qpObSY)1Z;&*~~8$&9s_ZIR(ZjQL) zmOHT}%8CSBLeB9**sL6zUlrXNe<~)7v66$TEM9VGGmCD-d4r4~Vy8yKf-sOHzVZtR zXip4%^xTN_ehMEXjHJg#FLGQdu6HtYk)dP(;yNwvN<|TOV(Sjp{wlTaPZwxmvI7@x zXKd7!S&&P2X9#pXm*IBDc5L0oAZTIpSF~jRd4cX!w&B7kV7F#pQtyQT=*6)hw0 ZO%ZAZYP#&QFsZW$x7oh<%68#({|g)aeysoi diff --git a/test/specs.py b/test/specs.py index c8f53a376..33db0493e 100644 --- a/test/specs.py +++ b/test/specs.py @@ -113,11 +113,11 @@ class IndexTestSpec(object): def build_index_specs(): return [ IndexTestSpec(url_for('registry.get_image_layer', image_id=FAKE_IMAGE_ID), - PUBLIC_REPO, 200, 200, 200, 200), + PUBLIC_REPO, 404, 404, 404, 404), IndexTestSpec(url_for('registry.get_image_layer', image_id=FAKE_IMAGE_ID), - PRIVATE_REPO), + PRIVATE_REPO, 403, 403, 404, 404), IndexTestSpec(url_for('registry.get_image_layer', image_id=FAKE_IMAGE_ID), - ORG_REPO), + ORG_REPO, 403, 403, 404, 404), IndexTestSpec(url_for('registry.put_image_layer', image_id=FAKE_IMAGE_ID), PUBLIC_REPO, 403, 403, 403, 403).set_method('PUT'), diff --git a/tools/auditancestry.py b/tools/auditancestry.py index c9c350a79..3bcd8aa78 100644 --- a/tools/auditancestry.py +++ b/tools/auditancestry.py @@ -5,9 +5,6 @@ from data.database import Image, ImageStorage, Repository from data import model from app import app, storage as store -import boto.s3.connection -import boto.s3.key - logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG) @@ -22,23 +19,9 @@ query = (Image .switch(Image) .join(Repository)) - bad_count = 0 good_count = 0 -s3_conn = boto.s3.connection.S3Connection(app.config['AWS_ACCESS_KEY'], - app.config['AWS_SECRET_KEY']) -s3_bucket = s3_conn.get_bucket('quay-registry') - -PATHS = [ - store.image_json_path, - store.image_checksum_path, - store.image_layer_path, - store.image_ancestry_path, - store.image_file_trie_path, - store.image_file_diffs_path, -] - def resolve_or_create(repo, docker_image_id, new_ancestry): existing = model.get_repo_image(repo.namespace, repo.name, docker_image_id) if existing: @@ -58,42 +41,9 @@ def resolve_or_create(repo, docker_image_id, new_ancestry): logger.debug('Created image: %s' % created) return created except ImageStorage.DoesNotExist: - logger.warning('No storage for ancestor, tring to find it anywhere: %s', - docker_image_id) - try: - found = Image.get(docker_image_id=docker_image_id) - logger.debug('Found some legacy storage for docker_image_id: %s', - docker_image_id) - new_storage = ImageStorage.create(checksum=found.checksum, - created=found.created, - comment=found.comment, - command=found.command, - image_size=found.image_size) - - logger.debug('Migrating data to new storage: %s' % new_storage.uuid) - - for path in PATHS: - old_path = path(found.repository.namespace, found.repository.name, - docker_image_id, None) - new_path = path(None, None, None, new_storage.uuid) - logger.debug('Copying %s -> %s', old_path, new_path) - - old_path_key = s3_bucket.get_key(old_path) - old_path_key.copy('quay-registry', new_path, encrypt_key=True, - validate_dst_bucket=False) - - logger.debug('Creating new image from copied legacy storage: %s', - new_storage.uuid) - created = Image.create(docker_image_id=docker_image_id, - repository=repo, - storage=new_storage, ancestors=new_ancestry) - logger.debug('Created image: %s' % created) - return created - - except Image.DoesNotExist: - msg = 'No image available anywhere for storage: %s in namespace: %s' - logger.error(msg, docker_image_id, repo.namespace) - raise RuntimeError() + msg = 'No image available anywhere for storage: %s in namespace: %s' + logger.error(msg, docker_image_id, repo.namespace) + raise RuntimeError() def all_ancestors_exist(ancestors): @@ -109,11 +59,7 @@ def all_ancestors_exist(ancestors): cant_fix = [] for img in query: try: - uuid = img.storage.uuid - ancestry_storage = store.image_ancestry_path(img.repository.namespace, - img.repository.name, - img.docker_image_id, - uuid) + ancestry_storage = store.image_ancestry_path(img.storage.uuid) if store.exists(ancestry_storage): full_ancestry = json.loads(store.get_content(ancestry_storage))[1:] full_ancestry.reverse() diff --git a/tools/audittagimages.py b/tools/audittagimages.py deleted file mode 100644 index 7ae223146..000000000 --- a/tools/audittagimages.py +++ /dev/null @@ -1,45 +0,0 @@ -from data.database import Image, RepositoryTag, Repository - -from app import storage as store - - -tag_query = (RepositoryTag - .select(RepositoryTag, Image, Repository) - .join(Repository) - .switch(RepositoryTag) - .join(Image)) - -for tag in tag_query: - if tag.image.repository.id != tag.repository.id: - print('Repository tag pointing to external image: %s/%s:%s' % - (tag.repository.namespace, tag.repository.name, tag.name)) - - proper_image_layer_path = store.image_layer_path(tag.repository.namespace, - tag.repository.name, - tag.image.docker_image_id) - - has_storage = False - if store.exists(proper_image_layer_path): - print('Storage already in place: %s' % proper_image_layer_path) - has_storage = True - else: - print('Storage missing: %s' % proper_image_layer_path) - - has_db_entry = False - new_image = None - try: - new_image = Image.get(Image.docker_image_id == tag.image.docker_image_id, - Image.repository == tag.repository) - has_db_entry = True - print('DB image in place: %s invalid image id: %s' % (new_image.id, - tag.image.id)) - except Image.DoesNotExist: - print('DB image missing: %s' % tag.image.docker_image_id) - - if has_storage and has_db_entry: - print('Switching tag to proper image %s/%s/%s -> %s' % - (tag.repository.namespace, tag.repository.name, tag.name, - new_image.id)) - tag.image = new_image - tag.save() - print('Done')