mv data/types image
This change also merges formats into the new image module.
This commit is contained in:
parent
a516c08deb
commit
32a6c22b43
14 changed files with 342 additions and 258 deletions
|
@ -54,7 +54,7 @@ def paginate(limit_kwarg_name='limit', offset_kwarg_name='offset',
|
|||
def callback(num_results, response):
|
||||
if num_results <= limit:
|
||||
return
|
||||
next_page_token = encrypt_page_token({'offset': limit+offset})
|
||||
next_page_token = encrypt_page_token({'offset': limit + offset})
|
||||
link = get_app_url() + url_for(request.endpoint, **request.view_args)
|
||||
link += '?%s; rel="next"' % urlencode({'n': limit, 'next_page': next_page_token})
|
||||
response.headers['Link'] = link
|
||||
|
|
|
@ -216,7 +216,7 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
|
|||
# Ensure the digest is present before proceeding.
|
||||
digest = request.args.get('digest', None)
|
||||
if digest is None:
|
||||
raise BlobUploadInvalid()
|
||||
raise BlobUploadInvalid(detail={'reason': 'Missing digest arg on monolithic upload'})
|
||||
|
||||
# Find the upload.
|
||||
blob_upload = v2.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
|
||||
|
@ -271,6 +271,9 @@ def delete_digest(namespace_name, repo_name, upload_uuid):
|
|||
|
||||
|
||||
def _render_range(num_uploaded_bytes, with_bytes_prefix=True):
|
||||
"""
|
||||
Returns a string formatted to be used in the Range header.
|
||||
"""
|
||||
return '{0}0-{1}'.format('bytes=' if with_bytes_prefix else '', num_uploaded_bytes - 1)
|
||||
|
||||
|
||||
|
@ -327,6 +330,7 @@ def _start_offset_and_length(headers):
|
|||
start_offset, length = _parse_range_header(range_header)
|
||||
except _InvalidRangeHeader:
|
||||
return None, None
|
||||
|
||||
return start_offset, length
|
||||
|
||||
|
||||
|
@ -339,6 +343,7 @@ def _upload_chunk(blob_upload, start_offset, length):
|
|||
# Check for invalidate arguments.
|
||||
if None in {blob_upload, start_offset, length}:
|
||||
return None
|
||||
|
||||
if start_offset > 0 and start_offset > blob_upload.byte_count:
|
||||
return None
|
||||
|
||||
|
@ -425,7 +430,7 @@ def _validate_digest(blob_upload, expected_digest):
|
|||
computed_digest = digest_tools.sha256_digest_from_hashlib(blob_upload.sha_state)
|
||||
if not digest_tools.digests_equal(computed_digest, expected_digest):
|
||||
logger.error('Digest mismatch for upload %s: Expected digest %s, found digest %s',
|
||||
upload_obj.uuid, expected_digest, computed_digest)
|
||||
blob_upload.uuid, expected_digest, computed_digest)
|
||||
raise BlobUploadInvalid(detail={'reason': 'Digest mismatch on uploaded blob'})
|
||||
|
||||
|
||||
|
|
|
@ -9,13 +9,6 @@ import features
|
|||
from app import docker_v2_signing_key, app, metric_queue
|
||||
from auth.registry_jwt_auth import process_registry_jwt_auth
|
||||
from data import model
|
||||
from data.types import (
|
||||
DockerSchema1Manifest,
|
||||
DockerSchema1ManifestBuilder,
|
||||
ManifestException,
|
||||
DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE,
|
||||
DOCKER_SCHEMA2_CONTENT_TYPES,
|
||||
)
|
||||
from digest import digest_tools
|
||||
from endpoints.common import parse_repository_name
|
||||
from endpoints.decorators import anon_protect
|
||||
|
@ -24,6 +17,9 @@ from endpoints.v2.errors import (BlobUnknown, ManifestInvalid, ManifestUnknown,
|
|||
NameInvalid)
|
||||
from endpoints.trackhelper import track_and_log
|
||||
from endpoints.notificationhelper import spawn_notification
|
||||
from image.docker import ManifestException
|
||||
from image.docker.schema1 import DockerSchema1Manifest, DockerSchema1ManifestBuilder
|
||||
from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
|
||||
from util.registry.replication import queue_storage_replication
|
||||
from util.names import VALID_TAG_PATTERN
|
||||
|
||||
|
@ -56,7 +52,7 @@ def fetch_manifest_by_tagname(namespace_name, repo_name, tag_name):
|
|||
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2'])
|
||||
|
||||
response = make_response(manifest.bytes, 200)
|
||||
response.headers['Content-Type'] = DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||
response.headers['Content-Type'] = manifest.content_type
|
||||
response.headers['Docker-Content-Digest'] = manifest.digest
|
||||
return response
|
||||
|
||||
|
@ -78,7 +74,7 @@ def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref):
|
|||
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2'])
|
||||
|
||||
response = make_response(manifest.json, 200)
|
||||
response.headers['Content-Type'] = DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||
response.headers['Content-Type'] = manifest.content_type
|
||||
response.headers['Docker-Content-Digest'] = manifest.digest
|
||||
return response
|
||||
|
||||
|
@ -151,16 +147,15 @@ def _write_manifest(namespace_name, repo_name, manifest):
|
|||
# Ensure all the blobs in the manifest exist.
|
||||
storage_query = model.storage.lookup_repo_storages_by_content_checksum(repo, manifest.checksums)
|
||||
storage_map = {storage.content_checksum: storage for storage in storage_query}
|
||||
for extracted_layer_metadata in manifest.layers:
|
||||
digest_str = str(extracted_layer_metadata.digest)
|
||||
for layer in manifest.layers:
|
||||
digest_str = str(layer.digest)
|
||||
if digest_str not in storage_map:
|
||||
raise BlobUnknown(detail={'digest': digest_str})
|
||||
|
||||
# Lookup all the images and their parent images (if any) inside the manifest.
|
||||
# This will let us know which v1 images we need to synthesize and which ones are invalid.
|
||||
all_image_ids = list(manifest.docker_image_ids | manifest.parent_image_ids)
|
||||
images = v2.docker_v1_metadata_by_image_id(namespace_name, repo_name, all_image_ids)
|
||||
images_map = {image.image_id: image for image in images}
|
||||
images_map = v2.docker_v1_metadata_by_image_id(namespace_name, repo_name, all_image_ids)
|
||||
|
||||
# Rewrite any v1 image IDs that do not match the checksum in the database.
|
||||
try:
|
||||
|
@ -181,14 +176,14 @@ def _write_manifest(namespace_name, repo_name, manifest):
|
|||
raise ManifestInvalid(detail={'message': me.message})
|
||||
|
||||
# Store the manifest pointing to the tag.
|
||||
leaf_layer_id = images_map[manifest.layers[-1].v1_metadata.image_id].image_id
|
||||
leaf_layer_id = images_map[manifest.leaf_layer.v1_metadata.image_id].image_id
|
||||
v2.save_manifest(namespace_name, repo_name, tag_name, leaf_layer_id, manifest.digest, manifest.bytes)
|
||||
|
||||
# Queue all blob manifests for replication.
|
||||
# TODO(jschorr): Find a way to optimize this insertion.
|
||||
if features.STORAGE_REPLICATION:
|
||||
for extracted_v1_metadata in manifest.layers:
|
||||
digest_str = str(extracted_v1_metadata.digest)
|
||||
for layer in manifest.layers:
|
||||
digest_str = str(layer.digest)
|
||||
queue_storage_replication(namespace_name, storage_map[digest_str])
|
||||
|
||||
track_and_log('push_repo', repo, tag=manifest.tag)
|
||||
|
|
|
@ -11,18 +11,18 @@ from auth.auth import process_auth
|
|||
from auth.auth_context import get_authenticated_user
|
||||
from auth.permissions import ReadRepositoryPermission
|
||||
from data import model, database
|
||||
from endpoints.trackhelper import track_and_log
|
||||
from endpoints.common import route_show_if, parse_repository_name
|
||||
from endpoints.decorators import anon_protect
|
||||
from endpoints.trackhelper import track_and_log
|
||||
from endpoints.v2.blob import BLOB_DIGEST_ROUTE
|
||||
from image.appc import AppCImageFormatter
|
||||
from image.docker.squashed import SquashedDockerImageFormatter
|
||||
from storage import Storage
|
||||
from util.registry.filelike import wrap_with_handler
|
||||
from util.registry.queuefile import QueueFile
|
||||
from util.registry.queueprocess import QueueProcess
|
||||
from util.registry.torrent import (make_torrent, per_user_torrent_filename, public_torrent_filename,
|
||||
PieceHasher)
|
||||
from util.registry.filelike import wrap_with_handler
|
||||
from formats.squashed import SquashedDockerImage
|
||||
from formats.aci import ACIImage
|
||||
from storage import Storage
|
||||
from endpoints.v2.blob import BLOB_DIGEST_ROUTE
|
||||
from endpoints.common import route_show_if, parse_repository_name
|
||||
|
||||
|
||||
verbs = Blueprint('verbs', __name__)
|
||||
|
@ -372,7 +372,7 @@ def get_aci_signature(server, namespace, repository, tag, os, arch):
|
|||
@verbs.route('/aci/<server>/<namespace>/<repository>/<tag>/aci/<os>/<arch>/', methods=['GET', 'HEAD'])
|
||||
@process_auth
|
||||
def get_aci_image(server, namespace, repository, tag, os, arch):
|
||||
return _repo_verb(namespace, repository, tag, 'aci', ACIImage(),
|
||||
return _repo_verb(namespace, repository, tag, 'aci', AppCImageFormatter(),
|
||||
sign=True, checker=os_arch_checker(os, arch), os=os, arch=arch)
|
||||
|
||||
|
||||
|
@ -380,7 +380,7 @@ def get_aci_image(server, namespace, repository, tag, os, arch):
|
|||
@verbs.route('/squash/<namespace>/<repository>/<tag>', methods=['GET'])
|
||||
@process_auth
|
||||
def get_squashed_tag(namespace, repository, tag):
|
||||
return _repo_verb(namespace, repository, tag, 'squash', SquashedDockerImage())
|
||||
return _repo_verb(namespace, repository, tag, 'squash', SquashedDockerImageFormatter())
|
||||
|
||||
|
||||
@route_show_if(features.BITTORRENT)
|
||||
|
|
Reference in a new issue