Convert V2's catalog endpoint to use the new data model interface

This commit is contained in:
Joseph Schorr 2018-10-08 14:26:39 +01:00
parent e91ba98e1b
commit 3a8a913ad3
4 changed files with 17 additions and 641 deletions

View file

@ -1,15 +1,20 @@
import features
from collections import namedtuple
from flask import jsonify
import features
from app import model_cache
from auth.auth_context import get_authenticated_user, get_authenticated_context
from auth.registry_jwt_auth import process_registry_jwt_auth
from data import model
from data.cache import cache_key
from endpoints.decorators import anon_protect
from endpoints.v2 import v2_bp, paginate
from endpoints.v2.models_interface import Repository
from endpoints.v2.models_pre_oci import data_model as model
class Repository(namedtuple('Repository', ['id', 'namespace_name', 'name'])):
pass
@v2_bp.route('/_catalog', methods=['GET'])
@ -26,8 +31,15 @@ def catalog_search(start_id, limit, pagination_callback):
if username and not get_authenticated_user().enabled:
return []
repos = model.get_visible_repositories(username, start_id, limit, include_public=include_public)
return [repo._asdict() for repo in repos]
query = model.repository.get_visible_repositories(username,
kind_filter='image',
include_public=include_public,
start_id=start_id,
limit=limit + 1)
# NOTE: The repository ID is in `rid` (not `id`) here, as per the requirements of
# the `get_visible_repositories` call.
return [Repository(repo.rid, repo.namespace_user.username, repo.name)._asdict()
for repo in query]
context_key = get_authenticated_context().unique_key if get_authenticated_context() else None
catalog_cache_key = cache_key.for_catalog_page(context_key, start_id, limit)

View file

@ -1,288 +0,0 @@
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from namedlist import namedlist
from six import add_metaclass
class Repository(
namedtuple('Repository', [
'id', 'name', 'namespace_name', 'description', 'is_public', 'kind', 'trust_enabled'])):
"""
Repository represents a namespaced collection of tags.
:type id: int
:type name: string
:type namespace_name: string
:type description: string
:type is_public: bool
:type kind: string
:type trust_enabled: bool
"""
class ManifestJSON(namedtuple('ManifestJSON', ['digest', 'json', 'media_type'])):
"""
ManifestJSON represents a Manifest of any format.
"""
class Tag(namedtuple('Tag', ['id', 'name', 'repository'])):
"""
Tag represents a user-facing alias for referencing a set of Manifests.
"""
class BlobUpload(
namedlist('BlobUpload', [
'uuid', 'byte_count', 'uncompressed_byte_count', 'chunk_count', 'sha_state', 'location_name',
'storage_metadata', 'piece_sha_state', 'piece_hashes', 'repo_namespace_name', 'repo_name'])):
"""
BlobUpload represents the current state of an Blob being uploaded.
"""
class Blob(namedtuple('Blob', ['id', 'uuid', 'digest', 'size', 'locations', 'cas_path'])):
"""
Blob represents an opaque binary blob saved to the storage system.
"""
class RepositoryReference(namedtuple('RepositoryReference', ['id', 'name', 'namespace_name'])):
"""
RepositoryReference represents a reference to a Repository, without its full metadata.
"""
class Label(namedtuple('Label', ['key', 'value', 'source_type', 'media_type'])):
"""
Label represents a key-value pair that describes a particular Manifest.
"""
@add_metaclass(ABCMeta)
class DockerRegistryV2DataInterface(object):
"""
Interface that represents all data store interactions required by a Docker Registry v1.
"""
@abstractmethod
def create_repository(self, namespace_name, repo_name, creating_user=None):
"""
Creates a new repository under the specified namespace with the given name. The user supplied is
the user creating the repository, if any.
"""
pass
@abstractmethod
def get_repository(self, namespace_name, repo_name):
"""
Returns a repository tuple for the repository with the given name under the given namespace.
Returns None if no such repository was found.
"""
pass
@abstractmethod
def has_active_tag(self, namespace_name, repo_name, tag_name):
"""
Returns whether there is an active tag for the tag with the given name under the matching
repository, if any, or none if none.
"""
pass
@abstractmethod
def get_manifest_by_tag(self, namespace_name, repo_name, tag_name):
"""
Returns the current manifest for the tag with the given name under the matching repository, if
any, or None if none.
"""
pass
@abstractmethod
def get_manifest_by_digest(self, namespace_name, repo_name, digest):
"""
Returns the manifest matching the given digest under the matching repository, if any, or None if
none.
"""
pass
@abstractmethod
def delete_manifest_by_digest(self, namespace_name, repo_name, digest):
"""
Deletes the manifest with the associated digest (if any) and returns all removed tags that
pointed to that manifest. If the manifest was not found, returns an empty list.
"""
pass
@abstractmethod
def get_docker_v1_metadata_by_tag(self, namespace_name, repo_name, tag_name):
"""
Returns the Docker V1 metadata associated with the tag with the given name under the matching
repository, if any. If none, returns None.
"""
pass
@abstractmethod
def get_docker_v1_metadata_by_image_id(self, repository, docker_image_ids):
"""
Returns a map of Docker V1 metadata for each given image ID, matched under the repository with
the given namespace and name. Returns an empty map if the matching repository was not found.
"""
pass
@abstractmethod
def get_parents_docker_v1_metadata(self, namespace_name, repo_name, docker_image_id):
"""
Returns an ordered list containing the Docker V1 metadata for each parent of the image with the
given docker ID under the matching repository. Returns an empty list if the image was not found.
"""
pass
@abstractmethod
def create_manifest_and_update_tag(self, namespace_name, repo_name, tag_name, manifest):
"""
Creates a new manifest and assigns the tag with the given name under the matching repository to
it.
"""
pass
@abstractmethod
def synthesize_v1_image(self, repository, storage, image_id, created, comment, command,
compat_json, parent_image_id):
"""
Synthesizes a V1 image under the specified repository, pointing to the given storage and returns
the V1 metadata for the synthesized image.
"""
pass
@abstractmethod
def save_manifest(self, repository, tag_name, manifest, blob_map):
"""
Saves a manifest, under the matching repository as a tag with the given name.
Returns a boolean whether or not the tag was newly created or not.
"""
pass
@abstractmethod
def repository_tags(self, namespace_name, repo_name, start_id, limit):
"""
Returns the active tags under the repository with the given name and namespace.
"""
pass
@abstractmethod
def get_visible_repositories(self, username, start_id, limit):
"""
Returns the repositories visible to the user with the given username, if any.
"""
pass
@abstractmethod
def create_blob_upload(self, namespace_name, repo_name, upload_uuid, location_name,
storage_metadata):
"""
Creates a blob upload under the matching repository with the given UUID and metadata.
Returns whether the matching repository exists.
"""
pass
@abstractmethod
def blob_upload_by_uuid(self, namespace_name, repo_name, upload_uuid):
"""
Searches for a blob upload with the given UUID under the given repository and returns it or None
if none.
"""
pass
@abstractmethod
def update_blob_upload(self, blob_upload):
"""
Saves any changes to the blob upload object given to the backing data store.
Fields that can change:
- uncompressed_byte_count
- piece_hashes
- piece_sha_state
- storage_metadata
- byte_count
- chunk_count
- sha_state
"""
pass
@abstractmethod
def delete_blob_upload(self, namespace_name, repo_name, uuid):
"""
Deletes the blob upload with the given uuid under the matching repository. If none, does
nothing.
"""
pass
@abstractmethod
def mount_blob_and_temp_tag(self, namespace_name, repo_name, existing_blob, expiration_sec):
"""
Mounts an existing blob and links a temporary tag with the specified expiration to it under
the matching repository. Returns True on success and False on failure.
"""
pass
@abstractmethod
def create_blob_and_temp_tag(self, namespace_name, repo_name, blob_digest, blob_upload,
expiration_sec):
"""
Creates a blob and links a temporary tag with the specified expiration to it under the matching
repository.
"""
pass
@abstractmethod
def get_blob_by_digest(self, namespace_name, repo_name, digest):
"""
Returns the blob with the given digest under the matching repository or None if none.
"""
pass
@abstractmethod
def save_bittorrent_pieces(self, blob, piece_size, piece_bytes):
"""
Saves the BitTorrent piece hashes for the given blob.
"""
pass
@abstractmethod
def create_manifest_labels(self, namespace_name, repo_name, manifest_digest, labels):
"""
Creates a new labels for the provided manifest.
"""
pass
@abstractmethod
def get_blob_path(self, blob):
"""
Once everything is moved over, this could be in util.registry and not even touch the database.
"""
pass
@abstractmethod
def set_manifest_expires_after(self, namespace_name, repo_name, digest, expires_after_sec):
"""
Sets that the manifest with given digest expires after the number of seconds from *now*.
"""
pass
@abstractmethod
def lookup_blobs_by_digest(self, repository, digests):
"""
Looks up all blobs with the matching digests under the given repository.
"""
pass
@abstractmethod
def is_namespace_enabled(self, namespace_name):
""" Returns whether the given namespace is enabled. If the namespace doesn't exist,
returns True. """
pass
@abstractmethod
def is_repository_public(self, namespace_name, repo_name):
""" Returns True if the repository with the given name exists and is public. """
pass

View file

@ -1,323 +0,0 @@
from peewee import IntegrityError
from data import model, database
from data.model import DataModelException
from endpoints.v2.models_interface import (
Blob,
BlobUpload,
DockerRegistryV2DataInterface,
ManifestJSON,
Repository,
RepositoryReference,
Tag,)
from image.docker.v1 import DockerV1Metadata
from image.docker.interfaces import ManifestInterface
from image.docker.schema1 import DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
_MEDIA_TYPE = DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
class PreOCIModel(DockerRegistryV2DataInterface):
"""
PreOCIModel implements the data model for the v2 Docker Registry protocol using a database schema
before it was changed to support the OCI specification.
"""
def create_repository(self, namespace_name, repo_name, creating_user=None):
return model.repository.create_repository(namespace_name, repo_name, creating_user)
def get_repository(self, namespace_name, repo_name):
repo = model.repository.get_repository(namespace_name, repo_name)
if repo is None:
return None
return _repository_for_repo(repo)
def has_active_tag(self, namespace_name, repo_name, tag_name):
try:
model.tag.get_active_tag(namespace_name, repo_name, tag_name)
return True
except database.RepositoryTag.DoesNotExist:
return False
def has_tag(self, namespace_name, repo_name, tag_name):
try:
model.tag.get_possibly_expired_tag(namespace_name, repo_name, tag_name)
return True
except database.RepositoryTag.DoesNotExist:
return False
def get_manifest_by_tag(self, namespace_name, repo_name, tag_name):
try:
manifest = model.tag.load_tag_manifest(namespace_name, repo_name, tag_name)
return ManifestJSON(digest=manifest.digest, json=manifest.json_data, media_type=_MEDIA_TYPE)
except model.InvalidManifestException:
return None
def get_manifest_by_digest(self, namespace_name, repo_name, digest):
try:
manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, digest)
return ManifestJSON(digest=digest, json=manifest.json_data, media_type=_MEDIA_TYPE)
except model.InvalidManifestException:
return None
def delete_manifest_by_digest(self, namespace_name, repo_name, digest):
def _tag_view(tag):
return Tag(id=tag.id, name=tag.name, repository=RepositoryReference(
id=tag.repository_id,
name=repo_name,
namespace_name=namespace_name,))
tags = model.tag.delete_manifest_by_digest(namespace_name, repo_name, digest)
return [_tag_view(tag) for tag in tags]
def get_docker_v1_metadata_by_tag(self, namespace_name, repo_name, tag_name):
try:
repo_img = model.tag.get_tag_image(namespace_name, repo_name, tag_name, include_storage=True)
return _docker_v1_metadata(namespace_name, repo_name, repo_img)
except DataModelException:
return None
def get_docker_v1_metadata_by_image_id(self, repository, docker_image_ids):
images_query = model.image.lookup_repository_images(repository.id, docker_image_ids)
return {
image.docker_image_id: _docker_v1_metadata(repository.namespace_name, repository.name, image)
for image in images_query
}
def get_parents_docker_v1_metadata(self, namespace_name, repo_name, docker_image_id):
repo_image = model.image.get_repo_image(namespace_name, repo_name, docker_image_id)
if repo_image is None:
return []
parents = model.image.get_parent_images(namespace_name, repo_name, repo_image)
return [_docker_v1_metadata(namespace_name, repo_name, image) for image in parents]
def create_manifest_and_update_tag(self, namespace_name, repo_name, tag_name, manifest):
assert isinstance(manifest, ManifestInterface)
repo = model.repository.get_repository(namespace_name, repo_name)
if repo is None:
return
blob_map = self.lookup_blobs_by_digest(repo, manifest.checksums)
storage_map = {blob.digest: blob.id for blob_digest, blob in blob_map.iteritems()}
try:
model.tag.associate_generated_tag_manifest(namespace_name, repo_name, tag_name, manifest,
storage_map)
except IntegrityError:
# It's already there!
pass
def synthesize_v1_image(self, repository, storage, image_id, created, comment, command,
compat_json, parent_image_id):
parent_image = None
if parent_image_id is not None:
parent_image = model.image.get_image(repository.id, parent_image_id)
if parent_image is None:
raise DataModelException('Unknown parent image: %s' % parent_image_id)
repo_image = model.image.synthesize_v1_image(repository.id, storage.id, storage.size,
image_id, created, comment, command, compat_json,
parent_image)
return _docker_v1_metadata(repository.namespace_name, repository.name, repo_image)
def save_manifest(self, repository, tag_name, manifest, leaf_layer_id, blob_map):
assert isinstance(manifest, ManifestInterface)
storage_map = {blob.digest: blob.id for blob_digest, blob in blob_map.iteritems()}
(_, newly_created) = model.tag.store_tag_manifest_for_repo(repository.id, tag_name, manifest,
leaf_layer_id, storage_map)
return newly_created
def repository_tags(self, namespace_name, repo_name, start_id, limit):
def _tag_view(tag):
return Tag(id=tag.id, name=tag.name, repository=RepositoryReference(
id=tag.repository_id,
name=repo_name,
namespace_name=namespace_name,))
tags_query = model.tag.list_repository_tags(namespace_name, repo_name)
tags_query = (tags_query
.order_by(database.RepositoryTag.id)
.limit(limit + 1))
if start_id is not None:
tags_query = tags_query.where(database.RepositoryTag.id >= start_id)
return [_tag_view(tag) for tag in tags_query]
def get_visible_repositories(self, username, start_id, limit, include_public=None):
if include_public is None:
include_public = (username is None)
query = model.repository.get_visible_repositories(username,
kind_filter='image',
include_public=include_public,
start_id=start_id,
limit=limit + 1)
return [_repository_for_repo(repo) for repo in query]
def create_blob_upload(self, namespace_name, repo_name, upload_uuid, location_name,
storage_metadata):
try:
model.blob.initiate_upload(namespace_name, repo_name, upload_uuid, location_name,
storage_metadata)
return True
except database.Repository.DoesNotExist:
return False
def blob_upload_by_uuid(self, namespace_name, repo_name, upload_uuid):
try:
found = model.blob.get_blob_upload(namespace_name, repo_name, upload_uuid)
except model.InvalidBlobUpload:
return None
return BlobUpload(
repo_namespace_name=namespace_name,
repo_name=repo_name,
uuid=upload_uuid,
byte_count=found.byte_count,
uncompressed_byte_count=found.uncompressed_byte_count,
chunk_count=found.chunk_count,
sha_state=found.sha_state,
piece_sha_state=found.piece_sha_state,
piece_hashes=found.piece_hashes,
location_name=found.location.name,
storage_metadata=found.storage_metadata,)
def update_blob_upload(self, blob_upload):
# Lookup the blob upload object.
try:
blob_upload_record = model.blob.get_blob_upload(blob_upload.repo_namespace_name,
blob_upload.repo_name, blob_upload.uuid)
except model.InvalidBlobUpload:
return
blob_upload_record.uncompressed_byte_count = blob_upload.uncompressed_byte_count
blob_upload_record.piece_hashes = blob_upload.piece_hashes
blob_upload_record.piece_sha_state = blob_upload.piece_sha_state
blob_upload_record.storage_metadata = blob_upload.storage_metadata
blob_upload_record.byte_count = blob_upload.byte_count
blob_upload_record.chunk_count = blob_upload.chunk_count
blob_upload_record.sha_state = blob_upload.sha_state
blob_upload_record.save()
def delete_blob_upload(self, namespace_name, repo_name, uuid):
try:
found = model.blob.get_blob_upload(namespace_name, repo_name, uuid)
found.delete_instance()
except model.InvalidBlobUpload:
return
def mount_blob_and_temp_tag(self, namespace_name, repo_name, existing_blob, expiration_sec):
return model.blob.temp_link_blob(namespace_name, repo_name, existing_blob.digest,
expiration_sec)
def create_blob_and_temp_tag(self, namespace_name, repo_name, blob_digest, blob_upload,
expiration_sec):
location_obj = model.storage.get_image_location_for_name(blob_upload.location_name)
blob_record = model.blob.store_blob_record_and_temp_link(
namespace_name, repo_name, blob_digest, location_obj.id, blob_upload.byte_count,
expiration_sec, blob_upload.uncompressed_byte_count)
return Blob(
id=blob_record.id,
uuid=blob_record.uuid,
digest=blob_digest,
size=blob_upload.byte_count,
locations=[blob_upload.location_name],
cas_path=blob_record.cas_path
)
def lookup_blobs_by_digest(self, repository, digests):
def _blob_view(blob_record):
return Blob(
id=blob_record.id,
uuid=blob_record.uuid,
digest=blob_record.content_checksum,
size=blob_record.image_size,
cas_path=blob_record.cas_path,
locations=None, # Note: Locations is None in this case.
)
query = model.storage.lookup_repo_storages_by_content_checksum(repository.id, digests)
return {storage.content_checksum: _blob_view(storage) for storage in query}
def get_blob_by_digest(self, namespace_name, repo_name, digest):
try:
blob_record = model.blob.get_repo_blob_by_digest(namespace_name, repo_name, digest)
return Blob(
id=blob_record.id,
uuid=blob_record.uuid,
digest=digest,
size=blob_record.image_size,
locations=list(blob_record.locations),
cas_path=blob_record.cas_path
)
except model.BlobDoesNotExist:
return None
def save_bittorrent_pieces(self, blob, piece_size, piece_bytes):
blob_record = model.storage.get_storage_by_uuid(blob.uuid)
model.storage.save_torrent_info(blob_record, piece_size, piece_bytes)
def create_manifest_labels(self, namespace_name, repo_name, manifest_digest, labels):
if not labels:
# No point in doing anything more.
return
tag_manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, manifest_digest)
for label in labels:
model.label.create_manifest_label(tag_manifest, label.key, label.value, label.source_type,
label.media_type)
def get_blob_path(self, blob):
return model.storage.get_layer_path_for_storage(blob.uuid, blob.cas_path, blob.digest)
def set_manifest_expires_after(self, namespace_name, repo_name, digest, expires_after_sec):
try:
manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, digest)
manifest.tag.lifetime_end_ts = manifest.tag.lifetime_start_ts + expires_after_sec
manifest.tag.save()
except model.InvalidManifestException:
return
def is_namespace_enabled(self, namespace_name):
namespace = model.user.get_namespace_user(namespace_name)
return namespace is None or namespace.enabled
def is_repository_public(self, namespace_name, repo_name):
return model.repository.repository_is_public(namespace_name, repo_name)
def _docker_v1_metadata(namespace_name, repo_name, repo_image):
"""
Returns a DockerV1Metadata object for the given Pre-OCI repo_image under the
repository with the given namespace and name. Note that the namespace and
name are passed here as an optimization, and are *not checked* against the
image.
"""
return DockerV1Metadata(
namespace_name=namespace_name,
repo_name=repo_name,
image_id=repo_image.docker_image_id,
checksum=repo_image.v1_checksum,
content_checksum=repo_image.storage.content_checksum,
compat_json=repo_image.v1_json_metadata,
created=repo_image.created,
comment=repo_image.comment,
command=repo_image.command,
# TODO: make sure this isn't needed anywhere, as it is expensive to lookup
parent_image_id=None,
)
def _repository_for_repo(repo):
""" Returns a Repository object representing the Pre-OCI data model repo instance given. """
return Repository(
id=repo.id or repo.rid,
name=repo.name,
namespace_name=repo.namespace_user.username,
description=repo.description,
is_public=model.repository.is_repository_public(repo),
kind=model.repository.get_repo_kind_name(repo),
trust_enabled=repo.trust_enabled,)
data_model = PreOCIModel()

View file

@ -1,25 +0,0 @@
import hashlib
from playhouse.test_utils import assert_query_count
from data import model
from data.database import ImageStorageLocation
from endpoints.v2.models_pre_oci import data_model
from test.fixtures import *
def test_get_blob_path(initialized_db):
# Add a blob.
digest = 'sha256:' + hashlib.sha256("a").hexdigest()
location = ImageStorageLocation.get(name='local_us')
db_blob = model.blob.store_blob_record_and_temp_link('devtable', 'simple', digest, location, 1,
10000000)
with assert_query_count(1):
blob = data_model.get_blob_by_digest('devtable', 'simple', digest)
assert blob.uuid == db_blob.uuid
# The blob tuple should have everything get_blob_path needs, so there should be no queries.
with assert_query_count(0):
assert data_model.get_blob_path(blob)
assert data_model.get_blob_path(blob) == model.storage.get_layer_path(db_blob)