from peewee import IntegrityError from data import model, database from data.model import DataModelException from endpoints.v2.models_interface import ( Blob, BlobUpload, DockerRegistryV2DataInterface, ManifestJSON, Repository, RepositoryReference, Tag,) from image.docker.v1 import DockerV1Metadata from image.docker.interfaces import ManifestInterface from image.docker.schema1 import DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE _MEDIA_TYPE = DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE class PreOCIModel(DockerRegistryV2DataInterface): """ PreOCIModel implements the data model for the v2 Docker Registry protocol using a database schema before it was changed to support the OCI specification. """ def create_repository(self, namespace_name, repo_name, creating_user=None): return model.repository.create_repository(namespace_name, repo_name, creating_user) def get_repository(self, namespace_name, repo_name): repo = model.repository.get_repository(namespace_name, repo_name) if repo is None: return None return _repository_for_repo(repo) def has_active_tag(self, namespace_name, repo_name, tag_name): try: model.tag.get_active_tag(namespace_name, repo_name, tag_name) return True except database.RepositoryTag.DoesNotExist: return False def has_tag(self, namespace_name, repo_name, tag_name): try: model.tag.get_possibly_expired_tag(namespace_name, repo_name, tag_name) return True except database.RepositoryTag.DoesNotExist: return False def get_manifest_by_tag(self, namespace_name, repo_name, tag_name): try: manifest = model.tag.load_tag_manifest(namespace_name, repo_name, tag_name) return ManifestJSON(digest=manifest.digest, json=manifest.json_data, media_type=_MEDIA_TYPE) except model.InvalidManifestException: return None def get_manifest_by_digest(self, namespace_name, repo_name, digest): try: manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, digest) return ManifestJSON(digest=digest, json=manifest.json_data, media_type=_MEDIA_TYPE) except model.InvalidManifestException: return None def delete_manifest_by_digest(self, namespace_name, repo_name, digest): def _tag_view(tag): return Tag(id=tag.id, name=tag.name, repository=RepositoryReference( id=tag.repository_id, name=repo_name, namespace_name=namespace_name,)) tags = model.tag.delete_manifest_by_digest(namespace_name, repo_name, digest) return [_tag_view(tag) for tag in tags] def get_docker_v1_metadata_by_tag(self, namespace_name, repo_name, tag_name): try: repo_img = model.tag.get_tag_image(namespace_name, repo_name, tag_name, include_storage=True) return _docker_v1_metadata(namespace_name, repo_name, repo_img) except DataModelException: return None def get_docker_v1_metadata_by_image_id(self, repository, docker_image_ids): images_query = model.image.lookup_repository_images(repository.id, docker_image_ids) return { image.docker_image_id: _docker_v1_metadata(repository.namespace_name, repository.name, image) for image in images_query } def get_parents_docker_v1_metadata(self, namespace_name, repo_name, docker_image_id): repo_image = model.image.get_repo_image(namespace_name, repo_name, docker_image_id) if repo_image is None: return [] parents = model.image.get_parent_images(namespace_name, repo_name, repo_image) return [_docker_v1_metadata(namespace_name, repo_name, image) for image in parents] def create_manifest_and_update_tag(self, namespace_name, repo_name, tag_name, manifest): assert isinstance(manifest, ManifestInterface) repo = model.repository.get_repository(namespace_name, repo_name) if repo is None: return blob_map = self.lookup_blobs_by_digest(repo, manifest.checksums) storage_map = {blob.digest: blob.id for blob_digest, blob in blob_map.iteritems()} try: model.tag.associate_generated_tag_manifest(namespace_name, repo_name, tag_name, manifest, storage_map) except IntegrityError: # It's already there! pass def synthesize_v1_image(self, repository, storage, image_id, created, comment, command, compat_json, parent_image_id): parent_image = None if parent_image_id is not None: parent_image = model.image.get_image(repository.id, parent_image_id) if parent_image is None: raise DataModelException('Unknown parent image: %s' % parent_image_id) repo_image = model.image.synthesize_v1_image(repository.id, storage.id, storage.size, image_id, created, comment, command, compat_json, parent_image) return _docker_v1_metadata(repository.namespace_name, repository.name, repo_image) def save_manifest(self, repository, tag_name, manifest, leaf_layer_id, blob_map): assert isinstance(manifest, ManifestInterface) storage_map = {blob.digest: blob.id for blob_digest, blob in blob_map.iteritems()} (_, newly_created) = model.tag.store_tag_manifest_for_repo(repository.id, tag_name, manifest, leaf_layer_id, storage_map) return newly_created def repository_tags(self, namespace_name, repo_name, start_id, limit): def _tag_view(tag): return Tag(id=tag.id, name=tag.name, repository=RepositoryReference( id=tag.repository_id, name=repo_name, namespace_name=namespace_name,)) tags_query = model.tag.list_repository_tags(namespace_name, repo_name) tags_query = (tags_query .order_by(database.RepositoryTag.id) .limit(limit + 1)) if start_id is not None: tags_query = tags_query.where(database.RepositoryTag.id >= start_id) return [_tag_view(tag) for tag in tags_query] def get_visible_repositories(self, username, start_id, limit, include_public=None): if include_public is None: include_public = (username is None) query = model.repository.get_visible_repositories(username, kind_filter='image', include_public=include_public, start_id=start_id, limit=limit + 1) return [_repository_for_repo(repo) for repo in query] def create_blob_upload(self, namespace_name, repo_name, upload_uuid, location_name, storage_metadata): try: model.blob.initiate_upload(namespace_name, repo_name, upload_uuid, location_name, storage_metadata) return True except database.Repository.DoesNotExist: return False def blob_upload_by_uuid(self, namespace_name, repo_name, upload_uuid): try: found = model.blob.get_blob_upload(namespace_name, repo_name, upload_uuid) except model.InvalidBlobUpload: return None return BlobUpload( repo_namespace_name=namespace_name, repo_name=repo_name, uuid=upload_uuid, byte_count=found.byte_count, uncompressed_byte_count=found.uncompressed_byte_count, chunk_count=found.chunk_count, sha_state=found.sha_state, piece_sha_state=found.piece_sha_state, piece_hashes=found.piece_hashes, location_name=found.location.name, storage_metadata=found.storage_metadata,) def update_blob_upload(self, blob_upload): # Lookup the blob upload object. try: blob_upload_record = model.blob.get_blob_upload(blob_upload.repo_namespace_name, blob_upload.repo_name, blob_upload.uuid) except model.InvalidBlobUpload: return blob_upload_record.uncompressed_byte_count = blob_upload.uncompressed_byte_count blob_upload_record.piece_hashes = blob_upload.piece_hashes blob_upload_record.piece_sha_state = blob_upload.piece_sha_state blob_upload_record.storage_metadata = blob_upload.storage_metadata blob_upload_record.byte_count = blob_upload.byte_count blob_upload_record.chunk_count = blob_upload.chunk_count blob_upload_record.sha_state = blob_upload.sha_state blob_upload_record.save() def delete_blob_upload(self, namespace_name, repo_name, uuid): try: found = model.blob.get_blob_upload(namespace_name, repo_name, uuid) found.delete_instance() except model.InvalidBlobUpload: return def mount_blob_and_temp_tag(self, namespace_name, repo_name, existing_blob, expiration_sec): return model.blob.temp_link_blob(namespace_name, repo_name, existing_blob.digest, expiration_sec) def create_blob_and_temp_tag(self, namespace_name, repo_name, blob_digest, blob_upload, expiration_sec): location_obj = model.storage.get_image_location_for_name(blob_upload.location_name) blob_record = model.blob.store_blob_record_and_temp_link( namespace_name, repo_name, blob_digest, location_obj.id, blob_upload.byte_count, expiration_sec, blob_upload.uncompressed_byte_count) return Blob( id=blob_record.id, uuid=blob_record.uuid, digest=blob_digest, size=blob_upload.byte_count, locations=[blob_upload.location_name], cas_path=blob_record.cas_path ) def lookup_blobs_by_digest(self, repository, digests): def _blob_view(blob_record): return Blob( id=blob_record.id, uuid=blob_record.uuid, digest=blob_record.content_checksum, size=blob_record.image_size, cas_path=blob_record.cas_path, locations=None, # Note: Locations is None in this case. ) query = model.storage.lookup_repo_storages_by_content_checksum(repository.id, digests) return {storage.content_checksum: _blob_view(storage) for storage in query} def get_blob_by_digest(self, namespace_name, repo_name, digest): try: blob_record = model.blob.get_repo_blob_by_digest(namespace_name, repo_name, digest) return Blob( id=blob_record.id, uuid=blob_record.uuid, digest=digest, size=blob_record.image_size, locations=list(blob_record.locations), cas_path=blob_record.cas_path ) except model.BlobDoesNotExist: return None def save_bittorrent_pieces(self, blob, piece_size, piece_bytes): blob_record = model.storage.get_storage_by_uuid(blob.uuid) model.storage.save_torrent_info(blob_record, piece_size, piece_bytes) def create_manifest_labels(self, namespace_name, repo_name, manifest_digest, labels): if not labels: # No point in doing anything more. return tag_manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, manifest_digest) for label in labels: model.label.create_manifest_label(tag_manifest, label.key, label.value, label.source_type, label.media_type) def get_blob_path(self, blob): return model.storage.get_layer_path_for_storage(blob.uuid, blob.cas_path, blob.digest) def set_manifest_expires_after(self, namespace_name, repo_name, digest, expires_after_sec): try: manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, digest) manifest.tag.lifetime_end_ts = manifest.tag.lifetime_start_ts + expires_after_sec manifest.tag.save() except model.InvalidManifestException: return def is_namespace_enabled(self, namespace_name): namespace = model.user.get_namespace_user(namespace_name) return namespace is None or namespace.enabled def is_repository_public(self, namespace_name, repo_name): return model.repository.repository_is_public(namespace_name, repo_name) def _docker_v1_metadata(namespace_name, repo_name, repo_image): """ Returns a DockerV1Metadata object for the given Pre-OCI repo_image under the repository with the given namespace and name. Note that the namespace and name are passed here as an optimization, and are *not checked* against the image. """ return DockerV1Metadata( namespace_name=namespace_name, repo_name=repo_name, image_id=repo_image.docker_image_id, checksum=repo_image.v1_checksum, content_checksum=repo_image.storage.content_checksum, compat_json=repo_image.v1_json_metadata, created=repo_image.created, comment=repo_image.comment, command=repo_image.command, # TODO: make sure this isn't needed anywhere, as it is expensive to lookup parent_image_id=None, ) def _repository_for_repo(repo): """ Returns a Repository object representing the Pre-OCI data model repo instance given. """ return Repository( id=repo.id or repo.rid, name=repo.name, namespace_name=repo.namespace_user.username, description=repo.description, is_public=model.repository.is_repository_public(repo), kind=model.repository.get_repo_kind_name(repo), trust_enabled=repo.trust_enabled,) data_model = PreOCIModel()