create interfaces for v1 and v2 data model

This commit is contained in:
Jimmy Zelinskie 2016-08-30 15:05:15 -04:00
parent b775458d4b
commit c06d395f96
14 changed files with 1048 additions and 732 deletions

View file

@ -8,7 +8,7 @@ import resumablehashlib
from app import storage, app
from auth.registry_jwt_auth import process_registry_jwt_auth
from data import database
from data.interfaces import v2
from data.interfaces.v2 import PreOCIModel as model
from digest import digest_tools
from endpoints.common import parse_repository_name
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream
@ -42,7 +42,7 @@ class _InvalidRangeHeader(Exception):
@cache_control(max_age=31436000)
def check_blob_exists(namespace_name, repo_name, digest):
# Find the blob.
blob = v2.get_blob_by_digest(namespace_name, repo_name, digest)
blob = model.get_blob_by_digest(namespace_name, repo_name, digest)
if blob is None:
raise BlobUnknown()
@ -69,7 +69,7 @@ def check_blob_exists(namespace_name, repo_name, digest):
@cache_control(max_age=31536000)
def download_blob(namespace_name, repo_name, digest):
# Find the blob.
blob = v2.get_blob_by_digest(namespace_name, repo_name, digest)
blob = model.get_blob_by_digest(namespace_name, repo_name, digest)
if blob is None:
raise BlobUnknown()
@ -81,7 +81,7 @@ def download_blob(namespace_name, repo_name, digest):
headers['Accept-Ranges'] = 'bytes'
# Find the storage path for the blob.
path = v2.get_blob_path(blob)
path = model.get_blob_path(blob)
# Short-circuit by redirecting if the storage supports it.
logger.debug('Looking up the direct download URL for path: %s', path)
@ -115,8 +115,8 @@ def start_blob_upload(namespace_name, repo_name):
# Begin the blob upload process in the database and storage.
location_name = storage.preferred_locations[0]
new_upload_uuid, upload_metadata = storage.initiate_chunked_upload(location_name)
repository_exists = v2.create_blob_upload(namespace_name, repo_name, new_upload_uuid,
location_name, upload_metadata)
repository_exists = model.create_blob_upload(namespace_name, repo_name, new_upload_uuid,
location_name, upload_metadata)
if not repository_exists:
raise NameUnknown()
@ -135,7 +135,7 @@ def start_blob_upload(namespace_name, repo_name):
# The user plans to send us the entire body right now.
# Find the upload.
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, new_upload_uuid)
blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, new_upload_uuid)
if blob_upload is None:
raise BlobUploadUnknown()
@ -146,7 +146,7 @@ def start_blob_upload(namespace_name, repo_name):
_abort_range_not_satisfiable(blob_upload.byte_count, new_upload_uuid)
# Save the upload state to the database.
v2.update_blob_upload(updated_blob_upload)
model.update_blob_upload(updated_blob_upload)
# Finalize the upload process in the database and storage.
_finish_upload(namespace_name, repo_name, updated_blob_upload, digest)
@ -168,7 +168,7 @@ def start_blob_upload(namespace_name, repo_name):
@require_repo_write
@anon_protect
def fetch_existing_upload(namespace_name, repo_name, upload_uuid):
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
if blob_upload is None:
raise BlobUploadUnknown()
@ -188,7 +188,7 @@ def fetch_existing_upload(namespace_name, repo_name, upload_uuid):
@anon_protect
def upload_chunk(namespace_name, repo_name, upload_uuid):
# Find the upload.
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
if blob_upload is None:
raise BlobUploadUnknown()
@ -199,7 +199,7 @@ def upload_chunk(namespace_name, repo_name, upload_uuid):
_abort_range_not_satisfiable(blob_upload.byte_count, upload_uuid)
# Save the upload state to the database.
v2.update_blob_upload(updated_blob_upload)
model.update_blob_upload(updated_blob_upload)
# Write the response to the client.
return Response(
@ -224,7 +224,7 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
raise BlobUploadInvalid(detail={'reason': 'Missing digest arg on monolithic upload'})
# Find the upload.
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
if blob_upload is None:
raise BlobUploadUnknown()
@ -254,13 +254,13 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
@require_repo_write
@anon_protect
def cancel_upload(namespace_name, repo_name, upload_uuid):
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
if blob_upload is None:
raise BlobUploadUnknown()
# We delete the record for the upload first, since if the partial upload in
# storage fails to delete, it doesn't break anything.
v2.delete_blob_upload(namespace_name, repo_name, upload_uuid)
model.delete_blob_upload(namespace_name, repo_name, upload_uuid)
storage.cancel_chunked_upload({blob_upload.location_name}, blob_upload.uuid,
blob_upload.storage_metadata)
@ -471,7 +471,7 @@ def _finalize_blob_database(namespace_name, repo_name, blob_upload, digest, alre
database's perspective.
"""
# Create the blob and temporarily tag it.
blob_storage = v2.create_blob_and_temp_tag(
blob_storage = model.create_blob_and_temp_tag(
namespace_name,
repo_name,
digest,
@ -482,10 +482,10 @@ def _finalize_blob_database(namespace_name, repo_name, blob_upload, digest, alre
# If it doesn't already exist, create the BitTorrent pieces for the blob.
if blob_upload.piece_sha_state is not None and not already_existed:
piece_bytes = blob_upload.piece_hashes + blob_upload.piece_sha_state.digest()
v2.save_bittorrent_pieces(blob_storage, app.config['BITTORRENT_PIECE_SIZE'], piece_bytes)
model.save_bittorrent_pieces(blob_storage, app.config['BITTORRENT_PIECE_SIZE'], piece_bytes)
# Delete the blob upload.
v2.delete_blob_upload(namespace_name, repo_name, blob_upload.uuid)
model.delete_blob_upload(namespace_name, repo_name, blob_upload.uuid)
def _finish_upload(namespace_name, repo_name, blob_upload, digest):