create interfaces for v1 and v2 data model
This commit is contained in:
parent
b775458d4b
commit
c06d395f96
14 changed files with 1048 additions and 732 deletions
|
@ -8,7 +8,7 @@ import resumablehashlib
|
|||
from app import storage, app
|
||||
from auth.registry_jwt_auth import process_registry_jwt_auth
|
||||
from data import database
|
||||
from data.interfaces import v2
|
||||
from data.interfaces.v2 import PreOCIModel as model
|
||||
from digest import digest_tools
|
||||
from endpoints.common import parse_repository_name
|
||||
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream
|
||||
|
@ -42,7 +42,7 @@ class _InvalidRangeHeader(Exception):
|
|||
@cache_control(max_age=31436000)
|
||||
def check_blob_exists(namespace_name, repo_name, digest):
|
||||
# Find the blob.
|
||||
blob = v2.get_blob_by_digest(namespace_name, repo_name, digest)
|
||||
blob = model.get_blob_by_digest(namespace_name, repo_name, digest)
|
||||
if blob is None:
|
||||
raise BlobUnknown()
|
||||
|
||||
|
@ -69,7 +69,7 @@ def check_blob_exists(namespace_name, repo_name, digest):
|
|||
@cache_control(max_age=31536000)
|
||||
def download_blob(namespace_name, repo_name, digest):
|
||||
# Find the blob.
|
||||
blob = v2.get_blob_by_digest(namespace_name, repo_name, digest)
|
||||
blob = model.get_blob_by_digest(namespace_name, repo_name, digest)
|
||||
if blob is None:
|
||||
raise BlobUnknown()
|
||||
|
||||
|
@ -81,7 +81,7 @@ def download_blob(namespace_name, repo_name, digest):
|
|||
headers['Accept-Ranges'] = 'bytes'
|
||||
|
||||
# Find the storage path for the blob.
|
||||
path = v2.get_blob_path(blob)
|
||||
path = model.get_blob_path(blob)
|
||||
|
||||
# Short-circuit by redirecting if the storage supports it.
|
||||
logger.debug('Looking up the direct download URL for path: %s', path)
|
||||
|
@ -115,8 +115,8 @@ def start_blob_upload(namespace_name, repo_name):
|
|||
# Begin the blob upload process in the database and storage.
|
||||
location_name = storage.preferred_locations[0]
|
||||
new_upload_uuid, upload_metadata = storage.initiate_chunked_upload(location_name)
|
||||
repository_exists = v2.create_blob_upload(namespace_name, repo_name, new_upload_uuid,
|
||||
location_name, upload_metadata)
|
||||
repository_exists = model.create_blob_upload(namespace_name, repo_name, new_upload_uuid,
|
||||
location_name, upload_metadata)
|
||||
if not repository_exists:
|
||||
raise NameUnknown()
|
||||
|
||||
|
@ -135,7 +135,7 @@ def start_blob_upload(namespace_name, repo_name):
|
|||
|
||||
# The user plans to send us the entire body right now.
|
||||
# Find the upload.
|
||||
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, new_upload_uuid)
|
||||
blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, new_upload_uuid)
|
||||
if blob_upload is None:
|
||||
raise BlobUploadUnknown()
|
||||
|
||||
|
@ -146,7 +146,7 @@ def start_blob_upload(namespace_name, repo_name):
|
|||
_abort_range_not_satisfiable(blob_upload.byte_count, new_upload_uuid)
|
||||
|
||||
# Save the upload state to the database.
|
||||
v2.update_blob_upload(updated_blob_upload)
|
||||
model.update_blob_upload(updated_blob_upload)
|
||||
|
||||
# Finalize the upload process in the database and storage.
|
||||
_finish_upload(namespace_name, repo_name, updated_blob_upload, digest)
|
||||
|
@ -168,7 +168,7 @@ def start_blob_upload(namespace_name, repo_name):
|
|||
@require_repo_write
|
||||
@anon_protect
|
||||
def fetch_existing_upload(namespace_name, repo_name, upload_uuid):
|
||||
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
|
||||
blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
|
||||
if blob_upload is None:
|
||||
raise BlobUploadUnknown()
|
||||
|
||||
|
@ -188,7 +188,7 @@ def fetch_existing_upload(namespace_name, repo_name, upload_uuid):
|
|||
@anon_protect
|
||||
def upload_chunk(namespace_name, repo_name, upload_uuid):
|
||||
# Find the upload.
|
||||
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
|
||||
blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
|
||||
if blob_upload is None:
|
||||
raise BlobUploadUnknown()
|
||||
|
||||
|
@ -199,7 +199,7 @@ def upload_chunk(namespace_name, repo_name, upload_uuid):
|
|||
_abort_range_not_satisfiable(blob_upload.byte_count, upload_uuid)
|
||||
|
||||
# Save the upload state to the database.
|
||||
v2.update_blob_upload(updated_blob_upload)
|
||||
model.update_blob_upload(updated_blob_upload)
|
||||
|
||||
# Write the response to the client.
|
||||
return Response(
|
||||
|
@ -224,7 +224,7 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
|
|||
raise BlobUploadInvalid(detail={'reason': 'Missing digest arg on monolithic upload'})
|
||||
|
||||
# Find the upload.
|
||||
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
|
||||
blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
|
||||
if blob_upload is None:
|
||||
raise BlobUploadUnknown()
|
||||
|
||||
|
@ -254,13 +254,13 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
|
|||
@require_repo_write
|
||||
@anon_protect
|
||||
def cancel_upload(namespace_name, repo_name, upload_uuid):
|
||||
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
|
||||
blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
|
||||
if blob_upload is None:
|
||||
raise BlobUploadUnknown()
|
||||
|
||||
# We delete the record for the upload first, since if the partial upload in
|
||||
# storage fails to delete, it doesn't break anything.
|
||||
v2.delete_blob_upload(namespace_name, repo_name, upload_uuid)
|
||||
model.delete_blob_upload(namespace_name, repo_name, upload_uuid)
|
||||
storage.cancel_chunked_upload({blob_upload.location_name}, blob_upload.uuid,
|
||||
blob_upload.storage_metadata)
|
||||
|
||||
|
@ -471,7 +471,7 @@ def _finalize_blob_database(namespace_name, repo_name, blob_upload, digest, alre
|
|||
database's perspective.
|
||||
"""
|
||||
# Create the blob and temporarily tag it.
|
||||
blob_storage = v2.create_blob_and_temp_tag(
|
||||
blob_storage = model.create_blob_and_temp_tag(
|
||||
namespace_name,
|
||||
repo_name,
|
||||
digest,
|
||||
|
@ -482,10 +482,10 @@ def _finalize_blob_database(namespace_name, repo_name, blob_upload, digest, alre
|
|||
# If it doesn't already exist, create the BitTorrent pieces for the blob.
|
||||
if blob_upload.piece_sha_state is not None and not already_existed:
|
||||
piece_bytes = blob_upload.piece_hashes + blob_upload.piece_sha_state.digest()
|
||||
v2.save_bittorrent_pieces(blob_storage, app.config['BITTORRENT_PIECE_SIZE'], piece_bytes)
|
||||
model.save_bittorrent_pieces(blob_storage, app.config['BITTORRENT_PIECE_SIZE'], piece_bytes)
|
||||
|
||||
# Delete the blob upload.
|
||||
v2.delete_blob_upload(namespace_name, repo_name, blob_upload.uuid)
|
||||
model.delete_blob_upload(namespace_name, repo_name, blob_upload.uuid)
|
||||
|
||||
|
||||
def _finish_upload(namespace_name, repo_name, blob_upload, digest):
|
||||
|
|
|
@ -3,7 +3,7 @@ from flask import jsonify
|
|||
from auth.registry_jwt_auth import process_registry_jwt_auth, get_granted_entity
|
||||
from endpoints.decorators import anon_protect
|
||||
from endpoints.v2 import v2_bp, paginate
|
||||
from data.interfaces import v2
|
||||
from data.interfaces.v2 import PreOCIModel as model
|
||||
|
||||
@v2_bp.route('/_catalog', methods=['GET'])
|
||||
@process_registry_jwt_auth()
|
||||
|
@ -15,7 +15,7 @@ def catalog_search(limit, offset, pagination_callback):
|
|||
if entity:
|
||||
username = entity.user.username
|
||||
|
||||
visible_repositories = v2.get_visible_repositories(username, limit+1, offset)
|
||||
visible_repositories = model.get_visible_repositories(username, limit+1, offset)
|
||||
response = jsonify({
|
||||
'repositories': ['%s/%s' % (repo.namespace_name, repo.name)
|
||||
for repo in visible_repositories][0:limit],
|
||||
|
|
|
@ -8,8 +8,7 @@ import features
|
|||
|
||||
from app import docker_v2_signing_key, app, metric_queue
|
||||
from auth.registry_jwt_auth import process_registry_jwt_auth
|
||||
from data import model
|
||||
from data.interfaces import v2
|
||||
from data.interfaces.v2 import PreOCIModel as model
|
||||
from digest import digest_tools
|
||||
from endpoints.common import parse_repository_name
|
||||
from endpoints.decorators import anon_protect
|
||||
|
@ -24,6 +23,7 @@ from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
|
|||
from util.registry.replication import queue_storage_replication
|
||||
from util.names import VALID_TAG_PATTERN
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -37,9 +37,9 @@ MANIFEST_TAGNAME_ROUTE = BASE_MANIFEST_ROUTE.format(VALID_TAG_PATTERN)
|
|||
@require_repo_read
|
||||
@anon_protect
|
||||
def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
|
||||
manifest = v2.get_manifest_by_tag(namespace_name, repo_name, manifest_ref)
|
||||
manifest = model.get_manifest_by_tag(namespace_name, repo_name, manifest_ref)
|
||||
if manifest is None:
|
||||
has_tag = v2.has_active_tag(namespace_name, repo_name, manifest_ref)
|
||||
has_tag = model.has_active_tag(namespace_name, repo_name, manifest_ref)
|
||||
if not has_tag:
|
||||
raise ManifestUnknown()
|
||||
|
||||
|
@ -47,7 +47,7 @@ def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
|
|||
if manifest is None:
|
||||
raise ManifestUnknown()
|
||||
|
||||
repo = v2.get_repository(namespace_name, repo_name)
|
||||
repo = model.get_repository(namespace_name, repo_name)
|
||||
if repo is not None:
|
||||
track_and_log('pull_repo', repo, analytics_name='pull_repo_100x', analytics_sample=0.01)
|
||||
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2'])
|
||||
|
@ -65,12 +65,12 @@ def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
|
|||
@require_repo_read
|
||||
@anon_protect
|
||||
def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref):
|
||||
manifest = v2.get_manifest_by_digest(namespace_name, repo_name, manifest_ref)
|
||||
manifest = model.get_manifest_by_digest(namespace_name, repo_name, manifest_ref)
|
||||
if manifest is None:
|
||||
# Without a tag name to reference, we can't make an attempt to generate the manifest
|
||||
raise ManifestUnknown()
|
||||
|
||||
repo = v2.get_repository(namespace_name, repo_name)
|
||||
repo = model.get_repository(namespace_name, repo_name)
|
||||
if repo is not None:
|
||||
track_and_log('pull_repo', repo)
|
||||
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2'])
|
||||
|
@ -137,7 +137,7 @@ def _write_manifest(namespace_name, repo_name, manifest):
|
|||
raise NameInvalid()
|
||||
|
||||
# Ensure that the repository exists.
|
||||
repo = v2.get_repository(namespace_name, repo_name)
|
||||
repo = model.get_repository(namespace_name, repo_name)
|
||||
if repo is None:
|
||||
raise NameInvalid()
|
||||
|
||||
|
@ -145,7 +145,7 @@ def _write_manifest(namespace_name, repo_name, manifest):
|
|||
raise ManifestInvalid(detail={'message': 'manifest does not reference any layers'})
|
||||
|
||||
# Ensure all the blobs in the manifest exist.
|
||||
storage_map = v2.lookup_blobs_by_digest(namespace_name, repo_name, manifest.checksums)
|
||||
storage_map = model.lookup_blobs_by_digest(namespace_name, repo_name, manifest.checksums)
|
||||
for layer in manifest.layers:
|
||||
digest_str = str(layer.digest)
|
||||
if digest_str not in storage_map:
|
||||
|
@ -154,13 +154,13 @@ def _write_manifest(namespace_name, repo_name, manifest):
|
|||
# Lookup all the images and their parent images (if any) inside the manifest.
|
||||
# This will let us know which v1 images we need to synthesize and which ones are invalid.
|
||||
all_image_ids = list(manifest.parent_image_ids | manifest.image_ids)
|
||||
images_map = v2.get_docker_v1_metadata_by_image_id(namespace_name, repo_name, all_image_ids)
|
||||
images_map = model.get_docker_v1_metadata_by_image_id(namespace_name, repo_name, all_image_ids)
|
||||
|
||||
# Rewrite any v1 image IDs that do not match the checksum in the database.
|
||||
try:
|
||||
rewritten_images = list(manifest.rewrite_invalid_image_ids(images_map))
|
||||
for rewritten_image in rewritten_images:
|
||||
v1_metadata = v2.synthesize_v1_image(
|
||||
model.synthesize_v1_image(
|
||||
repo,
|
||||
storage_map[rewritten_image.content_checksum],
|
||||
rewritten_image.image_id,
|
||||
|
@ -175,8 +175,8 @@ def _write_manifest(namespace_name, repo_name, manifest):
|
|||
|
||||
# Store the manifest pointing to the tag.
|
||||
leaf_layer_id = rewritten_images[-1].image_id
|
||||
v2.save_manifest(namespace_name, repo_name, manifest.tag, leaf_layer_id, manifest.digest,
|
||||
manifest.bytes)
|
||||
model.save_manifest(namespace_name, repo_name, manifest.tag, leaf_layer_id, manifest.digest,
|
||||
manifest.bytes)
|
||||
|
||||
# Queue all blob manifests for replication.
|
||||
# TODO(jschorr): Find a way to optimize this insertion.
|
||||
|
@ -213,7 +213,7 @@ def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref):
|
|||
Note: there is no equivalent method for deleting by tag name because it is
|
||||
forbidden by the spec.
|
||||
"""
|
||||
tags = v2.delete_manifest_by_digest(namespace_name, repo_name, manifest_ref)
|
||||
tags = model.delete_manifest_by_digest(namespace_name, repo_name, manifest_ref)
|
||||
if not tags:
|
||||
raise ManifestUnknown()
|
||||
|
||||
|
@ -225,9 +225,9 @@ def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref):
|
|||
|
||||
def _generate_and_store_manifest(namespace_name, repo_name, tag_name):
|
||||
# Find the v1 metadata for this image and its parents.
|
||||
v1_metadata = v2.get_docker_v1_metadata_by_tag(namespace_name, repo_name, tag_name)
|
||||
parents_v1_metadata = v2.get_parents_docker_v1_metadata(namespace_name, repo_name,
|
||||
v1_metadata.image_id)
|
||||
v1_metadata = model.get_docker_v1_metadata_by_tag(namespace_name, repo_name, tag_name)
|
||||
parents_v1_metadata = model.get_parents_docker_v1_metadata(namespace_name, repo_name,
|
||||
v1_metadata.image_id)
|
||||
|
||||
# If the manifest is being generated under the library namespace, then we make its namespace
|
||||
# empty.
|
||||
|
@ -248,6 +248,6 @@ def _generate_and_store_manifest(namespace_name, repo_name, tag_name):
|
|||
manifest = builder.build(docker_v2_signing_key)
|
||||
|
||||
# Write the manifest to the DB.
|
||||
v2.create_manifest_and_update_tag(namespace_name, repo_name, tag_name, manifest.digest,
|
||||
manifest.bytes)
|
||||
model.create_manifest_and_update_tag(namespace_name, repo_name, tag_name, manifest.digest,
|
||||
manifest.bytes)
|
||||
return manifest
|
||||
|
|
|
@ -5,7 +5,7 @@ from endpoints.common import parse_repository_name
|
|||
from endpoints.v2 import v2_bp, require_repo_read, paginate
|
||||
from endpoints.v2.errors import NameUnknown
|
||||
from endpoints.decorators import anon_protect
|
||||
from data.interfaces import v2
|
||||
from data.interfaces.v2 import PreOCIModel as model
|
||||
|
||||
@v2_bp.route('/<repopath:repository>/tags/list', methods=['GET'])
|
||||
@parse_repository_name()
|
||||
|
@ -14,11 +14,11 @@ from data.interfaces import v2
|
|||
@anon_protect
|
||||
@paginate()
|
||||
def list_all_tags(namespace_name, repo_name, limit, offset, pagination_callback):
|
||||
repo = v2.get_repository(namespace_name, repo_name)
|
||||
repo = model.get_repository(namespace_name, repo_name)
|
||||
if repo is None:
|
||||
raise NameUnknown()
|
||||
|
||||
tags = v2.repository_tags(namespace_name, repo_name, limit, offset)
|
||||
tags = model.repository_tags(namespace_name, repo_name, limit, offset)
|
||||
response = jsonify({
|
||||
'name': '{0}/{1}'.format(namespace_name, repo_name),
|
||||
'tags': [tag.name for tag in tags],
|
||||
|
|
|
@ -11,7 +11,7 @@ from auth.permissions import (ModifyRepositoryPermission, ReadRepositoryPermissi
|
|||
CreateRepositoryPermission)
|
||||
from endpoints.v2 import v2_bp
|
||||
from endpoints.decorators import anon_protect
|
||||
from data.interfaces import v2
|
||||
from data.interfaces.v2 import PreOCIModel as model
|
||||
from util.cache import no_cache
|
||||
from util.names import parse_namespace_repository, REPOSITORY_NAME_REGEX
|
||||
from util.security.registry_jwt import generate_bearer_token, build_context_and_subject
|
||||
|
@ -96,7 +96,7 @@ def generate_registry_jwt():
|
|||
if user is not None or token is not None:
|
||||
# Lookup the repository. If it exists, make sure the entity has modify
|
||||
# permission. Otherwise, make sure the entity has create permission.
|
||||
repo = v2.get_repository(namespace, reponame)
|
||||
repo = model.get_repository(namespace, reponame)
|
||||
if repo:
|
||||
if ModifyRepositoryPermission(namespace, reponame).can():
|
||||
final_actions.append('push')
|
||||
|
@ -105,7 +105,7 @@ def generate_registry_jwt():
|
|||
else:
|
||||
if CreateRepositoryPermission(namespace).can() and user is not None:
|
||||
logger.debug('Creating repository: %s/%s', namespace, reponame)
|
||||
v2.create_repository(namespace, reponame, user)
|
||||
model.create_repository(namespace, reponame, user)
|
||||
final_actions.append('push')
|
||||
else:
|
||||
logger.debug('No permission to create repository %s/%s', namespace, reponame)
|
||||
|
@ -113,7 +113,7 @@ def generate_registry_jwt():
|
|||
if 'pull' in actions:
|
||||
# Grant pull if the user can read the repo or it is public.
|
||||
if (ReadRepositoryPermission(namespace, reponame).can() or
|
||||
v2.repository_is_public(namespace, reponame)):
|
||||
model.repository_is_public(namespace, reponame)):
|
||||
final_actions.append('pull')
|
||||
else:
|
||||
logger.debug('No permission to pull repository %s/%s', namespace, reponame)
|
||||
|
|
Reference in a new issue