From 6c5c2f1a752af365eaca51ee226da4ec12af62af Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 28 Aug 2018 13:02:26 -0400 Subject: [PATCH] Add new methods to registry data model interface in prep for moving verbs to using it --- data/model/blob.py | 17 ++ data/model/image.py | 29 +-- data/model/storage.py | 2 +- data/registry_model/datatype.py | 3 + data/registry_model/datatypes.py | 117 +++++++++- data/registry_model/interface.py | 75 ++++++- data/registry_model/registry_pre_oci_model.py | 210 +++++++++++++++++- .../registry_model/test/test_pre_oci_model.py | 160 ++++++++++++- endpoints/verbs/MAINTAINERS | 1 - 9 files changed, 585 insertions(+), 29 deletions(-) delete mode 100644 endpoints/verbs/MAINTAINERS diff --git a/data/model/blob.py b/data/model/blob.py index 6c74f6bde..6f6ff1f14 100644 --- a/data/model/blob.py +++ b/data/model/blob.py @@ -7,6 +7,23 @@ from data.database import (Repository, Namespace, ImageStorage, Image, ImageStor BlobUpload, ImageStorageLocation, db_random_func) +def get_repository_blob_by_digest(repository, blob_digest): + """ Find the content-addressable blob linked to the specified repository. + """ + try: + storage_id_query = (ImageStorage + .select(ImageStorage.id) + .join(Image) + .where(Image.repository == repository, + ImageStorage.content_checksum == blob_digest, + ImageStorage.uploading == False) + .limit(1)) + + return storage_model.get_storage_by_subquery(storage_id_query) + except InvalidImageException: + raise BlobDoesNotExist('Blob does not exist with digest: {0}'.format(blob_digest)) + + def get_repo_blob_by_digest(namespace, repo_name, blob_digest): """ Find the content-addressable blob linked to the specified repository. """ diff --git a/data/model/image.py b/data/model/image.py index 527f1ccb7..1b29ee33d 100644 --- a/data/model/image.py +++ b/data/model/image.py @@ -489,44 +489,33 @@ def find_or_create_derived_storage(source_image, transformation_name, preferred_ new_storage = storage.create_v1_storage(preferred_location) try: - DerivedStorageForImage.create(source_image=source_image, derivative=new_storage, - transformation=trans, uniqueness_hash=uniqueness_hash) + derived = DerivedStorageForImage.create(source_image=source_image, derivative=new_storage, + transformation=trans, uniqueness_hash=uniqueness_hash) except IntegrityError: # Storage was created while this method executed. Just return the existing. new_storage.delete_instance(recursive=True) return find_derived_storage_for_image(source_image, transformation_name, varying_metadata) - return new_storage + return derived def find_derived_storage_for_image(source_image, transformation_name, varying_metadata=None): uniqueness_hash = _get_uniqueness_hash(varying_metadata) try: - found = (ImageStorage + found = (DerivedStorageForImage .select(ImageStorage, DerivedStorageForImage) - .join(DerivedStorageForImage) + .join(ImageStorage) + .switch(DerivedStorageForImage) .join(ImageStorageTransformation) .where(DerivedStorageForImage.source_image == source_image, ImageStorageTransformation.name == transformation_name, DerivedStorageForImage.uniqueness_hash == uniqueness_hash) .get()) - - found.locations = {placement.location.name for placement in found.imagestorageplacement_set} return found - except ImageStorage.DoesNotExist: + except DerivedStorageForImage.DoesNotExist: return None -def delete_derived_storage_by_uuid(storage_uuid): - try: - image_storage = storage.get_storage_by_uuid(storage_uuid) - except InvalidImageException: - return - - try: - DerivedStorageForImage.get(derivative=image_storage) - except DerivedStorageForImage.DoesNotExist: - return - - image_storage.delete_instance(recursive=True) +def delete_derived_storage(derived_storage): + derived_storage.derivative.delete_instance(recursive=True) diff --git a/data/model/storage.py b/data/model/storage.py index 8ec237406..a7cbf4517 100644 --- a/data/model/storage.py +++ b/data/model/storage.py @@ -336,7 +336,7 @@ def get_storage_locations(uuid): def save_torrent_info(storage_object, piece_length, pieces): try: - TorrentInfo.create(storage=storage_object, piece_length=piece_length, pieces=pieces) + return TorrentInfo.create(storage=storage_object, piece_length=piece_length, pieces=pieces) except IntegrityError: # TorrentInfo already exists for this storage. pass diff --git a/data/registry_model/datatype.py b/data/registry_model/datatype.py index f5ea0b5ae..1183fd1ea 100644 --- a/data/registry_model/datatype.py +++ b/data/registry_model/datatype.py @@ -30,6 +30,9 @@ def datatype(name, static_fields): raise AttributeError('Unknown field `%s`' % name) + def __repr__(self): + return '<%s> #%s' % (name, self._db_id) + return DataType diff --git a/data/registry_model/datatypes.py b/data/registry_model/datatypes.py index 03ccfc4cd..4701571b8 100644 --- a/data/registry_model/datatypes.py +++ b/data/registry_model/datatypes.py @@ -1,5 +1,11 @@ +import hashlib + +from collections import namedtuple from enum import Enum, unique +from cachetools import lru_cache + +from data import model from data.registry_model.datatype import datatype, requiresinput from image.docker.schema1 import DockerSchema1Manifest @@ -13,6 +19,34 @@ class RepositoryReference(datatype('Repository', [])): return RepositoryReference(db_id=repo_obj.id) + @classmethod + def for_id(cls, repo_id): + return RepositoryReference(db_id=repo_id) + + @property + @lru_cache(maxsize=1) + def _repository_obj(self): + return model.repository.lookup_repository(self._db_id) + + def namespace_name(self): + """ Returns the namespace name of this repository. + """ + repository = self._repository_obj + if repository is None: + return None + + return repository.namespace_user.username + + @property + def name(self): + """ Returns the name of this repository. + """ + repository = self._repository_obj + if repository is None: + return None + + return repository.name + class Label(datatype('Label', ['key', 'value', 'uuid', 'source_type_name', 'media_type_name'])): """ Label represents a label on a manifest. """ @@ -40,7 +74,15 @@ class Tag(datatype('Tag', ['name', 'reversion', 'manifest_digest', 'lifetime_sta lifetime_start_ts=repository_tag.lifetime_start_ts, lifetime_end_ts=repository_tag.lifetime_end_ts, manifest_digest=manifest_digest, - inputs=dict(legacy_image=legacy_image)) + inputs=dict(legacy_image=legacy_image, + repository=RepositoryReference.for_id(repository_tag.repository_id))) + + @property + @requiresinput('repository') + def repository(self, repository): + """ Returns the repository under which this tag lives. + """ + return repository @property @requiresinput('legacy_image') @@ -124,3 +166,76 @@ class SecurityScanStatus(Enum): SCANNED = 'scanned' FAILED = 'failed' QUEUED = 'queued' + + +class ManifestLayer(namedtuple('ManifestLayer', ['layer_info', 'blob'])): + """ Represents a single layer in a manifest. The `layer_info` data will be manifest-type specific, + but will have a few expected fields (such as `digest`). The `blob` represents the associated + blob for this layer, optionally with placements. + """ + + def estimated_size(self, estimate_multiplier): + """ Returns the estimated size of this layer. If the layers' blob has an uncompressed size, + it is used. Otherwise, the compressed_size field in the layer is multiplied by the + multiplier. + """ + if self.blob.uncompressed_size: + return self.blob.uncompressed_size + + return (self.layer_info.compressed_size or 0) * estimate_multiplier + + +class Blob(datatype('Blob', ['uuid', 'digest', 'compressed_size', 'uncompressed_size', + 'uploading'])): + """ Blob represents a content-addressable piece of storage. """ + @classmethod + def for_image_storage(cls, image_storage, storage_path, placements=None): + if image_storage is None: + return None + + return Blob(db_id=image_storage.id, + uuid=image_storage.uuid, + inputs=dict(placements=placements, storage_path=storage_path), + digest=image_storage.content_checksum, + compressed_size=image_storage.image_size, + uncompressed_size=image_storage.uncompressed_size, + uploading=image_storage.uploading) + + @property + @requiresinput('storage_path') + def storage_path(self, storage_path): + """ Returns the path of this blob in storage. """ + # TODO: change this to take in the storage engine? + return storage_path + + @property + @requiresinput('placements') + def placements(self, placements): + """ Returns all the storage placements at which the Blob can be found. """ + return placements + + +class DerivedImage(datatype('DerivedImage', ['verb', 'varying_metadata', 'blob'])): + """ DerivedImage represents an image derived from a manifest via some form of verb. """ + @classmethod + def for_derived_storage(cls, derived, verb, varying_metadata, blob): + return DerivedImage(db_id=derived.id, + verb=verb, + varying_metadata=varying_metadata, + blob=blob) + + @property + def unique_id(self): + """ Returns a unique ID for this derived image. This call will consistently produce the same + unique ID across calls in the same code base. + """ + return hashlib.sha256('%s:%s' % (self.verb, self._db_id)).hexdigest() + + +class TorrentInfo(datatype('TorrentInfo', ['pieces', 'piece_length'])): + """ TorrentInfo represents information to pull a blob via torrent. """ + @classmethod + def for_torrent_info(cls, torrent_info): + return TorrentInfo(db_id=torrent_info.id, + pieces=torrent_info.pieces, + piece_length=torrent_info.piece_length) diff --git a/data/registry_model/interface.py b/data/registry_model/interface.py index 203c3f23b..c1014b4d4 100644 --- a/data/registry_model/interface.py +++ b/data/registry_model/interface.py @@ -26,11 +26,12 @@ class RegistryDataInterface(object): or None if none. """ @abstractmethod - def get_manifest_for_tag(self, tag): + def get_manifest_for_tag(self, tag, backfill_if_necessary=False): """ Returns the manifest associated with the given tag. """ @abstractmethod - def lookup_manifest_by_digest(self, repository_ref, manifest_digest, allow_dead=False): + def lookup_manifest_by_digest(self, repository_ref, manifest_digest, allow_dead=False, + include_legacy_image=False): """ Looks up the manifest with the given digest under the given repository and returns it or None if none. """ @@ -131,3 +132,73 @@ class RegistryDataInterface(object): NOTE: This method will only be necessary until we've completed the backfill, at which point it should be removed. """ + + @abstractmethod + def is_namespace_enabled(self, namespace_name): + """ Returns whether the given namespace exists and is enabled. """ + + @abstractmethod + def list_manifest_layers(self, manifest, include_placements=False): + """ Returns an *ordered list* of the layers found in the manifest, starting at the base and + working towards the leaf, including the associated Blob and its placements (if specified). + Returns None if the manifest could not be parsed and validated. + """ + + @abstractmethod + def lookup_derived_image(self, manifest, verb, varying_metadata=None, include_placements=False): + """ + Looks up the derived image for the given manifest, verb and optional varying metadata and + returns it or None if none. + """ + + @abstractmethod + def lookup_or_create_derived_image(self, manifest, verb, storage_location, varying_metadata=None, + include_placements=False): + """ + Looks up the derived image for the given maniest, verb and optional varying metadata + and returns it. If none exists, a new derived image is created. + """ + + @abstractmethod + def get_derived_image_signature(self, derived_image, signer_name): + """ + Returns the signature associated with the derived image and a specific signer or None if none. + """ + + @abstractmethod + def set_derived_image_signature(self, derived_image, signer_name, signature): + """ + Sets the calculated signature for the given derived image and signer to that specified. + """ + + @abstractmethod + def delete_derived_image(self, derived_image): + """ + Deletes a derived image and all of its storage. + """ + + @abstractmethod + def set_derived_image_size(self, derived_image, compressed_size): + """ + Sets the compressed size on the given derived image. + """ + + @abstractmethod + def get_torrent_info(self, blob): + """ + Returns the torrent information associated with the given blob or None if none. + """ + + @abstractmethod + def set_torrent_info(self, blob, piece_length, pieces): + """ + Sets the torrent infomation associated with the given blob to that specified. + """ + + @abstractmethod + def get_repo_blob_by_digest(self, repo_ref, blob_digest, include_placements=False): + """ + Returns the blob in the repository with the given digest, if any or None if none. Note that + there may be multiple records in the same repository for the same blob digest, so the return + value of this function may change. + """ diff --git a/data/registry_model/registry_pre_oci_model.py b/data/registry_model/registry_pre_oci_model.py index 8096979f3..ba0944047 100644 --- a/data/registry_model/registry_pre_oci_model.py +++ b/data/registry_model/registry_pre_oci_model.py @@ -1,4 +1,5 @@ # pylint: disable=protected-access +import logging from collections import defaultdict @@ -8,8 +9,12 @@ from data import database from data import model from data.registry_model.interface import RegistryDataInterface from data.registry_model.datatypes import (Tag, RepositoryReference, Manifest, LegacyImage, Label, - SecurityScanStatus) -from image.docker.schema1 import DockerSchema1ManifestBuilder + SecurityScanStatus, ManifestLayer, Blob, DerivedImage, + TorrentInfo) +from image.docker.schema1 import DockerSchema1ManifestBuilder, ManifestException + + +logger = logging.getLogger(__name__) class PreOCIModel(RegistryDataInterface): @@ -38,11 +43,14 @@ class PreOCIModel(RegistryDataInterface): repo = model.repository.get_repository(namespace_name, repo_name, kind_filter=kind_filter) return RepositoryReference.for_repo_obj(repo) - def get_manifest_for_tag(self, tag): + def get_manifest_for_tag(self, tag, backfill_if_necessary=False): """ Returns the manifest associated with the given tag. """ try: tag_manifest = database.TagManifest.get(tag_id=tag._db_id) except database.TagManifest.DoesNotExist: + if backfill_if_necessary: + return self.backfill_manifest_for_tag(tag) + return return Manifest.for_tag_manifest(tag_manifest) @@ -171,6 +179,7 @@ class PreOCIModel(RegistryDataInterface): Returns the latest, *active* tag found in the repository, with the matching name or None if none. """ + assert isinstance(tag_name, basestring) tag = model.tag.get_active_tag_for_repo(repository_ref._db_id, tag_name) if tag is None: return None @@ -355,5 +364,200 @@ class PreOCIModel(RegistryDataInterface): return Manifest.for_tag_manifest(tag_manifest) + def is_namespace_enabled(self, namespace_name): + """ Returns whether the given namespace exists and is enabled. """ + namespace = model.user.get_namespace_user(namespace_name) + return namespace is not None and namespace.enabled + + def list_manifest_layers(self, manifest, include_placements=False): + """ Returns an *ordered list* of the layers found in the manifest, starting at the base and + working towards the leaf, including the associated Blob and its placements (if specified). + Returns None if the manifest could not be parsed and validated. + """ + try: + parsed = manifest.get_parsed_manifest() + except ManifestException: + logger.exception('Could not parse and validate manifest `%s`', manifest._db_id) + return None + + try: + tag_manifest = database.TagManifest.get(id=manifest._db_id) + except database.TagManifest.DoesNotExist: + logger.exception('Could not find tag manifest for manifest `%s`', manifest._db_id) + return None + + repo = tag_manifest.tag.repository + blob_query = model.storage.lookup_repo_storages_by_content_checksum(repo, parsed.checksums) + storage_map = {blob.content_checksum: blob for blob in blob_query} + + manifest_layers = [] + for layer in parsed.layers: + digest_str = str(layer.digest) + if digest_str not in storage_map: + logger.error('Missing digest `%s` for manifest `%s`', layer.digest, manifest._db_id) + return None + + image_storage = storage_map[digest_str] + placements = None + if include_placements: + placements = list(model.storage.get_storage_locations(image_storage.uuid)) + + blob = Blob.for_image_storage(image_storage, + storage_path=model.storage.get_layer_path(image_storage), + placements=placements) + manifest_layers.append(ManifestLayer(layer, blob)) + + return manifest_layers + + def lookup_derived_image(self, manifest, verb, varying_metadata=None, include_placements=False): + """ + Looks up the derived image for the given manifest, verb and optional varying metadata and + returns it or None if none. + """ + try: + tag_manifest = database.TagManifest.get(id=manifest._db_id) + except database.TagManifest.DoesNotExist: + logger.exception('Could not find tag manifest for manifest `%s`', manifest._db_id) + return None + + repo_image = tag_manifest.tag.image + derived = model.image.find_derived_storage_for_image(repo_image, verb, varying_metadata) + return self._build_derived(derived, verb, varying_metadata, include_placements) + + def lookup_or_create_derived_image(self, manifest, verb, storage_location, varying_metadata=None, + include_placements=False): + """ + Looks up the derived image for the given maniest, verb and optional varying metadata + and returns it. If none exists, a new derived image is created. + """ + try: + tag_manifest = database.TagManifest.get(id=manifest._db_id) + except database.TagManifest.DoesNotExist: + logger.exception('Could not find tag manifest for manifest `%s`', manifest._db_id) + return None + + repo_image = tag_manifest.tag.image + derived = model.image.find_or_create_derived_storage(repo_image, verb, storage_location, + varying_metadata) + return self._build_derived(derived, verb, varying_metadata, include_placements) + + def _build_derived(self, derived, verb, varying_metadata, include_placements): + if derived is None: + return None + + derived_storage = derived.derivative + placements = None + if include_placements: + placements = list(model.storage.get_storage_locations(derived_storage.uuid)) + + blob = Blob.for_image_storage(derived_storage, + storage_path=model.storage.get_layer_path(derived_storage), + placements=placements) + + return DerivedImage.for_derived_storage(derived, verb, varying_metadata, blob) + + def get_derived_image_signature(self, derived_image, signer_name): + """ + Returns the signature associated with the derived image and a specific signer or None if none. + """ + try: + derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id) + except database.DerivedStorageForImage.DoesNotExist: + return None + + storage = derived_storage.derivative + signature_entry = model.storage.lookup_storage_signature(storage, signer_name) + if signature_entry is None: + return None + + return signature_entry.signature + + def set_derived_image_signature(self, derived_image, signer_name, signature): + """ + Sets the calculated signature for the given derived image and signer to that specified. + """ + try: + derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id) + except database.DerivedStorageForImage.DoesNotExist: + return None + + storage = derived_storage.derivative + signature_entry = model.storage.find_or_create_storage_signature(storage, signer_name) + signature_entry.signature = signature + signature_entry.uploading = False + signature_entry.save() + + def delete_derived_image(self, derived_image): + """ + Deletes a derived image and all of its storage. + """ + try: + derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id) + except database.DerivedStorageForImage.DoesNotExist: + return None + + model.image.delete_derived_storage(derived_storage) + + def set_derived_image_size(self, derived_image, compressed_size): + """ + Sets the compressed size on the given derived image. + """ + try: + derived_storage = database.DerivedStorageForImage.get(id=derived_image._db_id) + except database.DerivedStorageForImage.DoesNotExist: + return None + + storage_entry = derived_storage.derivative + storage_entry.image_size = compressed_size + storage_entry.uploading = False + storage_entry.save() + + def get_torrent_info(self, blob): + """ + Returns the torrent information associated with the given blob or None if none. + """ + try: + image_storage = database.ImageStorage.get(id=blob._db_id) + except database.ImageStorage.DoesNotExist: + return None + + try: + torrent_info = model.storage.get_torrent_info(image_storage) + except model.TorrentInfoDoesNotExist: + return None + + return TorrentInfo.for_torrent_info(torrent_info) + + def set_torrent_info(self, blob, piece_length, pieces): + """ + Sets the torrent infomation associated with the given blob to that specified. + """ + try: + image_storage = database.ImageStorage.get(id=blob._db_id) + except database.ImageStorage.DoesNotExist: + return None + + torrent_info = model.storage.save_torrent_info(image_storage, piece_length, pieces) + return TorrentInfo.for_torrent_info(torrent_info) + + def get_repo_blob_by_digest(self, repo_ref, blob_digest, include_placements=False): + """ + Returns the blob in the repository with the given digest, if any or None if none. Note that + there may be multiple records in the same repository for the same blob digest, so the return + value of this function may change. + """ + try: + image_storage = model.blob.get_repository_blob_by_digest(repo_ref._db_id, blob_digest) + except model.BlobDoesNotExist: + return None + + placements = None + if include_placements: + placements = list(model.storage.get_storage_locations(image_storage.uuid)) + + return Blob.for_image_storage(image_storage, + storage_path=model.storage.get_layer_path(image_storage), + placements=placements) + pre_oci_model = PreOCIModel() diff --git a/data/registry_model/test/test_pre_oci_model.py b/data/registry_model/test/test_pre_oci_model.py index 9d1d530a6..bdd77637f 100644 --- a/data/registry_model/test/test_pre_oci_model.py +++ b/data/registry_model/test/test_pre_oci_model.py @@ -8,7 +8,8 @@ from app import docker_v2_signing_key from data import model from data.database import (TagManifestLabelMap, TagManifestToManifest, Manifest, ManifestBlob, ManifestLegacyImage, ManifestLabel, TagManifest, RepositoryTag, Image, - TagManifestLabel, TagManifest, TagManifestLabel) + TagManifestLabel, TagManifest, TagManifestLabel, DerivedStorageForImage, + TorrentInfo) from data.registry_model.registry_pre_oci_model import PreOCIModel from data.registry_model.datatypes import RepositoryReference @@ -363,3 +364,160 @@ def test_backfill_manifest_for_tag(repo_namespace, repo_name, clear_rows, pre_oc parsed_manifest = manifest.get_parsed_manifest() assert parsed_manifest.leaf_layer_v1_image_id == legacy_image.docker_image_id assert parsed_manifest.parent_image_ids == {p.docker_image_id for p in legacy_image.parents} + + +@pytest.mark.parametrize('repo_namespace, repo_name', [ + ('devtable', 'simple'), + ('devtable', 'complex'), + ('devtable', 'history'), + ('buynlarge', 'orgrepo'), +]) +def test_backfill_manifest_on_lookup(repo_namespace, repo_name, clear_rows, pre_oci_model): + repository_ref = pre_oci_model.lookup_repository(repo_namespace, repo_name) + tags = pre_oci_model.list_repository_tags(repository_ref) + assert tags + + for tag in tags: + assert not tag.manifest_digest + assert not pre_oci_model.get_manifest_for_tag(tag) + + manifest = pre_oci_model.get_manifest_for_tag(tag, backfill_if_necessary=True) + assert manifest + + updated_tag = pre_oci_model.get_repo_tag(repository_ref, tag.name) + assert updated_tag.manifest_digest == manifest.digest + + +@pytest.mark.parametrize('namespace, expect_enabled', [ + ('devtable', True), + ('buynlarge', True), + + ('disabled', False), +]) +def test_is_namespace_enabled(namespace, expect_enabled, pre_oci_model): + assert pre_oci_model.is_namespace_enabled(namespace) == expect_enabled + + +@pytest.mark.parametrize('repo_namespace, repo_name', [ + ('devtable', 'simple'), + ('devtable', 'complex'), + ('devtable', 'history'), + ('buynlarge', 'orgrepo'), +]) +def test_list_manifest_layers(repo_namespace, repo_name, pre_oci_model): + repository_ref = pre_oci_model.lookup_repository(repo_namespace, repo_name) + tags = pre_oci_model.list_repository_tags(repository_ref) + assert tags + + for tag in tags: + manifest = pre_oci_model.get_manifest_for_tag(tag) + assert manifest + + with assert_query_count(4): + layers = pre_oci_model.list_manifest_layers(manifest) + assert layers + + layers = pre_oci_model.list_manifest_layers(manifest, include_placements=True) + assert layers + + parsed_layers = list(manifest.get_parsed_manifest().layers) + assert len(layers) == len(parsed_layers) + + for index, manifest_layer in enumerate(layers): + assert manifest_layer.layer_info == parsed_layers[index] + assert manifest_layer.blob.digest == str(parsed_layers[index].digest) + assert manifest_layer.blob.storage_path + assert manifest_layer.blob.placements + + repo_blob = pre_oci_model.get_repo_blob_by_digest(repository_ref, manifest_layer.blob.digest) + assert repo_blob.digest == manifest_layer.blob.digest + + assert manifest_layer.estimated_size(1) is not None + + +def test_derived_image(pre_oci_model): + # Clear all existing derived storage. + DerivedStorageForImage.delete().execute() + + repository_ref = pre_oci_model.lookup_repository('devtable', 'simple') + tag = pre_oci_model.get_repo_tag(repository_ref, 'latest') + manifest = pre_oci_model.get_manifest_for_tag(tag) + + # Ensure the squashed image doesn't exist. + assert pre_oci_model.lookup_derived_image(manifest, 'squash', {}) is None + + # Create a new one. + squashed = pre_oci_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us', {}) + assert pre_oci_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us', {}) == squashed + assert squashed.unique_id + + # Check and set the size. + assert squashed.blob.compressed_size is None + pre_oci_model.set_derived_image_size(squashed, 1234) + assert pre_oci_model.lookup_derived_image(manifest, 'squash', {}).blob.compressed_size == 1234 + assert pre_oci_model.lookup_derived_image(manifest, 'squash', {}).unique_id == squashed.unique_id + + # Ensure its returned now. + assert pre_oci_model.lookup_derived_image(manifest, 'squash', {}) == squashed + + # Ensure different metadata results in a different derived image. + assert pre_oci_model.lookup_derived_image(manifest, 'squash', {'foo': 'bar'}) is None + + squashed_foo = pre_oci_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us', + {'foo': 'bar'}) + assert squashed_foo != squashed + assert pre_oci_model.lookup_derived_image(manifest, 'squash', {'foo': 'bar'}) == squashed_foo + + assert squashed.unique_id != squashed_foo.unique_id + + # Lookup with placements. + squashed = pre_oci_model.lookup_or_create_derived_image(manifest, 'squash', 'local_us', {}, + include_placements=True) + assert squashed.blob.placements + + # Delete the derived image. + pre_oci_model.delete_derived_image(squashed) + assert pre_oci_model.lookup_derived_image(manifest, 'squash', {}) is None + + +def test_derived_image_signatures(pre_oci_model): + repository_ref = pre_oci_model.lookup_repository('devtable', 'simple') + tag = pre_oci_model.get_repo_tag(repository_ref, 'latest') + manifest = pre_oci_model.get_manifest_for_tag(tag) + + derived = pre_oci_model.lookup_derived_image(manifest, 'squash', {}) + assert derived + + signature = pre_oci_model.get_derived_image_signature(derived, 'gpg2') + assert signature is None + + pre_oci_model.set_derived_image_signature(derived, 'gpg2', 'foo') + assert pre_oci_model.get_derived_image_signature(derived, 'gpg2') == 'foo' + + +def test_torrent_info(pre_oci_model): + # Remove all existing info. + TorrentInfo.delete().execute() + + repository_ref = pre_oci_model.lookup_repository('devtable', 'simple') + tag = pre_oci_model.get_repo_tag(repository_ref, 'latest') + manifest = pre_oci_model.get_manifest_for_tag(tag) + + layers = pre_oci_model.list_manifest_layers(manifest) + assert layers + + assert pre_oci_model.get_torrent_info(layers[0].blob) is None + pre_oci_model.set_torrent_info(layers[0].blob, 2, 'foo') + + torrent_info = pre_oci_model.get_torrent_info(layers[0].blob) + assert torrent_info is not None + assert torrent_info.piece_length == 2 + assert torrent_info.pieces == 'foo' + + # Try setting it again. Nothing should happen. + pre_oci_model.set_torrent_info(layers[0].blob, 3, 'bar') + + torrent_info = pre_oci_model.get_torrent_info(layers[0].blob) + assert torrent_info is not None + assert torrent_info.piece_length == 2 + assert torrent_info.pieces == 'foo' diff --git a/endpoints/verbs/MAINTAINERS b/endpoints/verbs/MAINTAINERS deleted file mode 100644 index e08d8e55b..000000000 --- a/endpoints/verbs/MAINTAINERS +++ /dev/null @@ -1 +0,0 @@ -Joseph Schorr (@josephschorr)