9e16a989f5
Instead of 41 queries now for the simple manifest, we are down to 14. The biggest changes: - Only synthesize the V1 image rows if we haven't already found them in the database - Thread the repository object through to the other model method calls, and use it instead of loading again and again
296 lines
12 KiB
Python
296 lines
12 KiB
Python
from peewee import IntegrityError
|
|
|
|
from data import model, database
|
|
from data.model import DataModelException
|
|
from endpoints.v2.models_interface import (
|
|
Blob,
|
|
BlobUpload,
|
|
DockerRegistryV2DataInterface,
|
|
ManifestJSON,
|
|
Repository,
|
|
RepositoryReference,
|
|
Tag,)
|
|
from image.docker.v1 import DockerV1Metadata
|
|
|
|
_MEDIA_TYPE = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
|
|
|
|
|
class PreOCIModel(DockerRegistryV2DataInterface):
|
|
"""
|
|
PreOCIModel implements the data model for the v2 Docker Registry protocol using a database schema
|
|
before it was changed to support the OCI specification.
|
|
"""
|
|
|
|
def create_repository(self, namespace_name, repo_name, creating_user=None):
|
|
return model.repository.create_repository(namespace_name, repo_name, creating_user)
|
|
|
|
def get_repository(self, namespace_name, repo_name):
|
|
repo = model.repository.get_repository(namespace_name, repo_name)
|
|
if repo is None:
|
|
return None
|
|
return _repository_for_repo(repo)
|
|
|
|
def has_active_tag(self, namespace_name, repo_name, tag_name):
|
|
try:
|
|
model.tag.get_active_tag(namespace_name, repo_name, tag_name)
|
|
return True
|
|
except database.RepositoryTag.DoesNotExist:
|
|
return False
|
|
|
|
def has_tag(self, namespace_name, repo_name, tag_name):
|
|
try:
|
|
model.tag.get_possibly_expired_tag(namespace_name, repo_name, tag_name)
|
|
return True
|
|
except database.RepositoryTag.DoesNotExist:
|
|
return False
|
|
|
|
def get_manifest_by_tag(self, namespace_name, repo_name, tag_name):
|
|
try:
|
|
manifest = model.tag.load_tag_manifest(namespace_name, repo_name, tag_name)
|
|
return ManifestJSON(digest=manifest.digest, json=manifest.json_data, media_type=_MEDIA_TYPE)
|
|
except model.InvalidManifestException:
|
|
return None
|
|
|
|
def get_manifest_by_digest(self, namespace_name, repo_name, digest):
|
|
try:
|
|
manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, digest)
|
|
return ManifestJSON(digest=digest, json=manifest.json_data, media_type=_MEDIA_TYPE)
|
|
except model.InvalidManifestException:
|
|
return None
|
|
|
|
def delete_manifest_by_digest(self, namespace_name, repo_name, digest):
|
|
def _tag_view(tag):
|
|
return Tag(name=tag.name, repository=RepositoryReference(
|
|
id=tag.repository_id,
|
|
name=repo_name,
|
|
namespace_name=namespace_name,))
|
|
|
|
tags = model.tag.delete_manifest_by_digest(namespace_name, repo_name, digest)
|
|
return [_tag_view(tag) for tag in tags]
|
|
|
|
def get_docker_v1_metadata_by_tag(self, namespace_name, repo_name, tag_name):
|
|
try:
|
|
repo_img = model.tag.get_tag_image(namespace_name, repo_name, tag_name, include_storage=True)
|
|
return _docker_v1_metadata(namespace_name, repo_name, repo_img)
|
|
except DataModelException:
|
|
return None
|
|
|
|
def get_docker_v1_metadata_by_image_id(self, repository, docker_image_ids):
|
|
images_query = model.image.lookup_repository_images(repository.id, docker_image_ids)
|
|
return {
|
|
image.docker_image_id: _docker_v1_metadata(repository.namespace_name, repository.name, image)
|
|
for image in images_query
|
|
}
|
|
|
|
def get_parents_docker_v1_metadata(self, namespace_name, repo_name, docker_image_id):
|
|
repo_image = model.image.get_repo_image(namespace_name, repo_name, docker_image_id)
|
|
if repo_image is None:
|
|
return []
|
|
|
|
parents = model.image.get_parent_images(namespace_name, repo_name, repo_image)
|
|
return [_docker_v1_metadata(namespace_name, repo_name, image) for image in parents]
|
|
|
|
def create_manifest_and_update_tag(self, namespace_name, repo_name, tag_name, manifest_digest,
|
|
manifest_bytes):
|
|
try:
|
|
model.tag.associate_generated_tag_manifest(namespace_name, repo_name, tag_name,
|
|
manifest_digest, manifest_bytes)
|
|
except IntegrityError:
|
|
# It's already there!
|
|
pass
|
|
|
|
def synthesize_v1_image(self, repository, storage, image_id, created, comment, command,
|
|
compat_json, parent_image_id):
|
|
parent_image = None
|
|
if parent_image_id is not None:
|
|
parent_image = model.image.get_image(repository.id, parent_image_id)
|
|
if parent_image is None:
|
|
raise DataModelException('Unknown parent image: %s' % parent_image_id)
|
|
|
|
repo_image = model.image.synthesize_v1_image(repository.id, storage.id, storage.size,
|
|
image_id, created, comment, command, compat_json,
|
|
parent_image)
|
|
return _docker_v1_metadata(repository.namespace_name, repository.name, repo_image)
|
|
|
|
def save_manifest(self, repository, tag_name, leaf_layer_docker_id, manifest_digest,
|
|
manifest_bytes):
|
|
(_, newly_created) = model.tag.store_tag_manifest_for_repo(
|
|
repository.id, tag_name, leaf_layer_docker_id, manifest_digest, manifest_bytes)
|
|
return newly_created
|
|
|
|
def repository_tags(self, namespace_name, repo_name, limit, offset):
|
|
def _tag_view(tag):
|
|
return Tag(name=tag.name, repository=RepositoryReference(
|
|
id=tag.repository_id,
|
|
name=repo_name,
|
|
namespace_name=namespace_name,))
|
|
|
|
tags_query = model.tag.list_repository_tags(namespace_name, repo_name)
|
|
tags_query = tags_query.limit(limit).offset(offset)
|
|
return [_tag_view(tag) for tag in tags_query]
|
|
|
|
def get_visible_repositories(self, username, limit, offset, include_public=None):
|
|
if include_public is None:
|
|
include_public = (username is None)
|
|
|
|
query = model.repository.get_visible_repositories(username, kind_filter='image',
|
|
include_public=include_public)
|
|
query = query.limit(limit).offset(offset)
|
|
return [_repository_for_repo(repo) for repo in query]
|
|
|
|
def create_blob_upload(self, namespace_name, repo_name, upload_uuid, location_name,
|
|
storage_metadata):
|
|
try:
|
|
model.blob.initiate_upload(namespace_name, repo_name, upload_uuid, location_name,
|
|
storage_metadata)
|
|
return True
|
|
except database.Repository.DoesNotExist:
|
|
return False
|
|
|
|
def blob_upload_by_uuid(self, namespace_name, repo_name, upload_uuid):
|
|
try:
|
|
found = model.blob.get_blob_upload(namespace_name, repo_name, upload_uuid)
|
|
except model.InvalidBlobUpload:
|
|
return None
|
|
|
|
return BlobUpload(
|
|
repo_namespace_name=namespace_name,
|
|
repo_name=repo_name,
|
|
uuid=upload_uuid,
|
|
byte_count=found.byte_count,
|
|
uncompressed_byte_count=found.uncompressed_byte_count,
|
|
chunk_count=found.chunk_count,
|
|
sha_state=found.sha_state,
|
|
piece_sha_state=found.piece_sha_state,
|
|
piece_hashes=found.piece_hashes,
|
|
location_name=found.location.name,
|
|
storage_metadata=found.storage_metadata,)
|
|
|
|
def update_blob_upload(self, blob_upload):
|
|
# Lookup the blob upload object.
|
|
try:
|
|
blob_upload_record = model.blob.get_blob_upload(blob_upload.repo_namespace_name,
|
|
blob_upload.repo_name, blob_upload.uuid)
|
|
except model.InvalidBlobUpload:
|
|
return
|
|
|
|
blob_upload_record.uncompressed_byte_count = blob_upload.uncompressed_byte_count
|
|
blob_upload_record.piece_hashes = blob_upload.piece_hashes
|
|
blob_upload_record.piece_sha_state = blob_upload.piece_sha_state
|
|
blob_upload_record.storage_metadata = blob_upload.storage_metadata
|
|
blob_upload_record.byte_count = blob_upload.byte_count
|
|
blob_upload_record.chunk_count = blob_upload.chunk_count
|
|
blob_upload_record.sha_state = blob_upload.sha_state
|
|
blob_upload_record.save()
|
|
|
|
def delete_blob_upload(self, namespace_name, repo_name, uuid):
|
|
try:
|
|
found = model.blob.get_blob_upload(namespace_name, repo_name, uuid)
|
|
found.delete_instance()
|
|
except model.InvalidBlobUpload:
|
|
return
|
|
|
|
def create_blob_and_temp_tag(self, namespace_name, repo_name, blob_digest, blob_upload,
|
|
expiration_sec):
|
|
location_obj = model.storage.get_image_location_for_name(blob_upload.location_name)
|
|
blob_record = model.blob.store_blob_record_and_temp_link(
|
|
namespace_name, repo_name, blob_digest, location_obj.id, blob_upload.byte_count,
|
|
expiration_sec, blob_upload.uncompressed_byte_count)
|
|
return Blob(
|
|
id=blob_record.id,
|
|
uuid=blob_record.uuid,
|
|
digest=blob_digest,
|
|
size=blob_upload.byte_count,
|
|
locations=[blob_upload.location_name],
|
|
cas_path=blob_record.cas_path
|
|
)
|
|
|
|
def lookup_blobs_by_digest(self, repository, digests):
|
|
def _blob_view(blob_record):
|
|
return Blob(
|
|
id=blob_record.id,
|
|
uuid=blob_record.uuid,
|
|
digest=blob_record.content_checksum,
|
|
size=blob_record.image_size,
|
|
cas_path=blob_record.cas_path,
|
|
locations=None, # Note: Locations is None in this case.
|
|
)
|
|
|
|
query = model.storage.lookup_repo_storages_by_content_checksum(repository.id, digests)
|
|
return {storage.content_checksum: _blob_view(storage) for storage in query}
|
|
|
|
def get_blob_by_digest(self, namespace_name, repo_name, digest):
|
|
try:
|
|
blob_record = model.blob.get_repo_blob_by_digest(namespace_name, repo_name, digest)
|
|
return Blob(
|
|
id=blob_record.id,
|
|
uuid=blob_record.uuid,
|
|
digest=digest,
|
|
size=blob_record.image_size,
|
|
locations=blob_record.locations,
|
|
cas_path=blob_record.cas_path
|
|
)
|
|
except model.BlobDoesNotExist:
|
|
return None
|
|
|
|
def save_bittorrent_pieces(self, blob, piece_size, piece_bytes):
|
|
blob_record = model.storage.get_storage_by_uuid(blob.uuid)
|
|
model.storage.save_torrent_info(blob_record, piece_size, piece_bytes)
|
|
|
|
def create_manifest_labels(self, namespace_name, repo_name, manifest_digest, labels):
|
|
if not labels:
|
|
# No point in doing anything more.
|
|
return
|
|
|
|
tag_manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, manifest_digest)
|
|
for label in labels:
|
|
model.label.create_manifest_label(tag_manifest, label.key, label.value, label.source_type,
|
|
label.media_type)
|
|
|
|
def get_blob_path(self, blob):
|
|
return model.storage.get_layer_path_for_storage(blob.uuid, blob.cas_path, blob.digest)
|
|
|
|
def set_manifest_expires_after(self, namespace_name, repo_name, digest, expires_after_sec):
|
|
try:
|
|
manifest = model.tag.load_manifest_by_digest(namespace_name, repo_name, digest)
|
|
manifest.tag.lifetime_end_ts = manifest.tag.lifetime_start_ts + expires_after_sec
|
|
manifest.tag.save()
|
|
except model.InvalidManifestException:
|
|
return
|
|
|
|
|
|
def _docker_v1_metadata(namespace_name, repo_name, repo_image):
|
|
"""
|
|
Returns a DockerV1Metadata object for the given Pre-OCI repo_image under the
|
|
repository with the given namespace and name. Note that the namespace and
|
|
name are passed here as an optimization, and are *not checked* against the
|
|
image.
|
|
"""
|
|
return DockerV1Metadata(
|
|
namespace_name=namespace_name,
|
|
repo_name=repo_name,
|
|
image_id=repo_image.docker_image_id,
|
|
checksum=repo_image.v1_checksum,
|
|
content_checksum=repo_image.storage.content_checksum,
|
|
compat_json=repo_image.v1_json_metadata,
|
|
created=repo_image.created,
|
|
comment=repo_image.comment,
|
|
command=repo_image.command,
|
|
# TODO: make sure this isn't needed anywhere, as it is expensive to lookup
|
|
parent_image_id=None,
|
|
)
|
|
|
|
|
|
def _repository_for_repo(repo):
|
|
""" Returns a Repository object representing the Pre-OCI data model repo instance given. """
|
|
return Repository(
|
|
id=repo.id,
|
|
name=repo.name,
|
|
namespace_name=repo.namespace_user.username,
|
|
description=repo.description,
|
|
is_public=model.repository.is_repository_public(repo),
|
|
kind=model.repository.get_repo_kind_name(repo),
|
|
trust_enabled=repo.trust_enabled,)
|
|
|
|
|
|
data_model = PreOCIModel()
|