initial import for Open Source 🎉
This commit is contained in:
parent
1898c361f3
commit
9c0dd3b722
2048 changed files with 218743 additions and 0 deletions
171
endpoints/v2/__init__.py
Normal file
171
endpoints/v2/__init__.py
Normal file
|
@ -0,0 +1,171 @@
|
|||
import logging
|
||||
import os.path
|
||||
|
||||
from functools import wraps
|
||||
from urlparse import urlparse
|
||||
from urllib import urlencode
|
||||
|
||||
from flask import Blueprint, make_response, url_for, request, jsonify
|
||||
from semantic_version import Spec
|
||||
|
||||
import features
|
||||
|
||||
from app import app, metric_queue, get_app_url
|
||||
from auth.auth_context import get_authenticated_context
|
||||
from auth.permissions import (
|
||||
ReadRepositoryPermission, ModifyRepositoryPermission, AdministerRepositoryPermission)
|
||||
from auth.registry_jwt_auth import process_registry_jwt_auth, get_auth_headers
|
||||
from data.registry_model import registry_model
|
||||
from data.readreplica import ReadOnlyModeException
|
||||
from endpoints.decorators import anon_protect, anon_allowed, route_show_if
|
||||
from endpoints.v2.errors import (V2RegistryException, Unauthorized, Unsupported, NameUnknown,
|
||||
ReadOnlyMode)
|
||||
from util.http import abort
|
||||
from util.metrics.metricqueue import time_blueprint
|
||||
from util.registry.dockerver import docker_version
|
||||
from util.pagination import encrypt_page_token, decrypt_page_token
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
v2_bp = Blueprint('v2', __name__)
|
||||
time_blueprint(v2_bp, metric_queue)
|
||||
|
||||
|
||||
@v2_bp.app_errorhandler(V2RegistryException)
|
||||
def handle_registry_v2_exception(error):
|
||||
response = jsonify({'errors': [error.as_dict()]})
|
||||
|
||||
response.status_code = error.http_status_code
|
||||
if response.status_code == 401:
|
||||
response.headers.extend(get_auth_headers(repository=error.repository, scopes=error.scopes))
|
||||
logger.debug('sending response: %s', response.get_data())
|
||||
return response
|
||||
|
||||
|
||||
@v2_bp.app_errorhandler(ReadOnlyModeException)
|
||||
def handle_readonly(ex):
|
||||
error = ReadOnlyMode()
|
||||
response = jsonify({'errors': [error.as_dict()]})
|
||||
response.status_code = error.http_status_code
|
||||
logger.debug('sending response: %s', response.get_data())
|
||||
return response
|
||||
|
||||
|
||||
_MAX_RESULTS_PER_PAGE = app.config.get('V2_PAGINATION_SIZE', 100)
|
||||
|
||||
|
||||
def paginate(start_id_kwarg_name='start_id', limit_kwarg_name='limit',
|
||||
callback_kwarg_name='pagination_callback'):
|
||||
"""
|
||||
Decorates a handler adding a parsed pagination token and a callback to encode a response token.
|
||||
"""
|
||||
|
||||
def wrapper(func):
|
||||
@wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
try:
|
||||
requested_limit = int(request.args.get('n', _MAX_RESULTS_PER_PAGE))
|
||||
except ValueError:
|
||||
requested_limit = 0
|
||||
|
||||
limit = max(min(requested_limit, _MAX_RESULTS_PER_PAGE), 1)
|
||||
next_page_token = request.args.get('next_page', request.args.get('last', None))
|
||||
|
||||
# Decrypt the next page token, if any.
|
||||
start_id = None
|
||||
page_info = decrypt_page_token(next_page_token)
|
||||
if page_info is not None:
|
||||
start_id = page_info.get('start_id', None)
|
||||
|
||||
def callback(results, response):
|
||||
if len(results) <= limit:
|
||||
return
|
||||
|
||||
next_page_token = encrypt_page_token({'start_id': max([obj.id for obj in results])})
|
||||
|
||||
link_url = os.path.join(get_app_url(), url_for(request.endpoint, **request.view_args))
|
||||
link_param = urlencode({'n': limit, 'next_page': next_page_token})
|
||||
link = '<%s?%s>; rel="next"' % (link_url, link_param)
|
||||
response.headers['Link'] = link
|
||||
|
||||
kwargs[limit_kwarg_name] = limit
|
||||
kwargs[start_id_kwarg_name] = start_id
|
||||
kwargs[callback_kwarg_name] = callback
|
||||
return func(*args, **kwargs)
|
||||
return wrapped
|
||||
return wrapper
|
||||
|
||||
|
||||
def _require_repo_permission(permission_class, scopes=None, allow_public=False):
|
||||
def wrapper(func):
|
||||
@wraps(func)
|
||||
def wrapped(namespace_name, repo_name, *args, **kwargs):
|
||||
logger.debug('Checking permission %s for repo: %s/%s', permission_class, namespace_name,
|
||||
repo_name)
|
||||
|
||||
permission = permission_class(namespace_name, repo_name)
|
||||
if permission.can():
|
||||
return func(namespace_name, repo_name, *args, **kwargs)
|
||||
|
||||
repository = namespace_name + '/' + repo_name
|
||||
if allow_public:
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None or not repository_ref.is_public:
|
||||
raise Unauthorized(repository=repository, scopes=scopes)
|
||||
|
||||
if repository_ref.kind != 'image':
|
||||
msg = 'This repository is for managing %s and not container images.' % repository_ref.kind
|
||||
raise Unsupported(detail=msg)
|
||||
|
||||
if repository_ref.is_public:
|
||||
if not features.ANONYMOUS_ACCESS:
|
||||
raise Unauthorized(repository=repository, scopes=scopes)
|
||||
|
||||
return func(namespace_name, repo_name, *args, **kwargs)
|
||||
|
||||
raise Unauthorized(repository=repository, scopes=scopes)
|
||||
return wrapped
|
||||
return wrapper
|
||||
|
||||
|
||||
require_repo_read = _require_repo_permission(ReadRepositoryPermission, scopes=['pull'],
|
||||
allow_public=True)
|
||||
require_repo_write = _require_repo_permission(ModifyRepositoryPermission, scopes=['pull', 'push'])
|
||||
require_repo_admin = _require_repo_permission(AdministerRepositoryPermission, scopes=[
|
||||
'pull', 'push'])
|
||||
|
||||
|
||||
def get_input_stream(flask_request):
|
||||
if flask_request.headers.get('transfer-encoding') == 'chunked':
|
||||
return flask_request.environ['wsgi.input']
|
||||
return flask_request.stream
|
||||
|
||||
|
||||
@v2_bp.route('/')
|
||||
@route_show_if(features.ADVERTISE_V2)
|
||||
@process_registry_jwt_auth()
|
||||
@anon_allowed
|
||||
def v2_support_enabled():
|
||||
docker_ver = docker_version(request.user_agent.string)
|
||||
|
||||
# Check if our version is one of the blacklisted versions, if we can't
|
||||
# identify the version (None) we will fail open and assume that it is
|
||||
# newer and therefore should not be blacklisted.
|
||||
if docker_ver is not None and Spec(app.config['BLACKLIST_V2_SPEC']).match(docker_ver):
|
||||
abort(404)
|
||||
|
||||
response = make_response('true', 200)
|
||||
|
||||
if get_authenticated_context() is None:
|
||||
response = make_response('true', 401)
|
||||
|
||||
response.headers.extend(get_auth_headers())
|
||||
return response
|
||||
|
||||
|
||||
from endpoints.v2 import (
|
||||
blob,
|
||||
catalog,
|
||||
manifest,
|
||||
tag,
|
||||
v2auth,)
|
450
endpoints/v2/blob.py
Normal file
450
endpoints/v2/blob.py
Normal file
|
@ -0,0 +1,450 @@
|
|||
import logging
|
||||
import re
|
||||
|
||||
from flask import url_for, request, redirect, Response, abort as flask_abort
|
||||
|
||||
from app import storage, app, get_app_url, metric_queue, model_cache
|
||||
from auth.registry_jwt_auth import process_registry_jwt_auth
|
||||
from auth.permissions import ReadRepositoryPermission
|
||||
from data import database
|
||||
from data.registry_model import registry_model
|
||||
from data.registry_model.blobuploader import (create_blob_upload, retrieve_blob_upload_manager,
|
||||
complete_when_uploaded, BlobUploadSettings,
|
||||
BlobUploadException, BlobTooLargeException,
|
||||
BlobRangeMismatchException)
|
||||
from digest import digest_tools
|
||||
from endpoints.decorators import (anon_protect, anon_allowed, parse_repository_name,
|
||||
check_region_blacklisted, check_readonly)
|
||||
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream
|
||||
from endpoints.v2.errors import (
|
||||
BlobUnknown, BlobUploadInvalid, BlobUploadUnknown, Unsupported, NameUnknown, LayerTooLarge,
|
||||
InvalidRequest, BlobDownloadGeoBlocked)
|
||||
from util.cache import cache_control
|
||||
from util.names import parse_namespace_repository
|
||||
from util.request import get_request_ip
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BASE_BLOB_ROUTE = '/<repopath:repository>/blobs/<regex("{0}"):digest>'
|
||||
BLOB_DIGEST_ROUTE = BASE_BLOB_ROUTE.format(digest_tools.DIGEST_PATTERN)
|
||||
RANGE_HEADER_REGEX = re.compile(r'^bytes=([0-9]+)-([0-9]+)$')
|
||||
BLOB_CONTENT_TYPE = 'application/octet-stream'
|
||||
|
||||
|
||||
class _InvalidRangeHeader(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@v2_bp.route(BLOB_DIGEST_ROUTE, methods=['HEAD'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull'])
|
||||
@require_repo_read
|
||||
@anon_allowed
|
||||
@cache_control(max_age=31436000)
|
||||
def check_blob_exists(namespace_name, repo_name, digest):
|
||||
# Find the blob.
|
||||
blob = registry_model.get_cached_repo_blob(model_cache, namespace_name, repo_name, digest)
|
||||
if blob is None:
|
||||
raise BlobUnknown()
|
||||
|
||||
# Build the response headers.
|
||||
headers = {
|
||||
'Docker-Content-Digest': digest,
|
||||
'Content-Length': blob.compressed_size,
|
||||
'Content-Type': BLOB_CONTENT_TYPE,
|
||||
}
|
||||
|
||||
# If our storage supports range requests, let the client know.
|
||||
if storage.get_supports_resumable_downloads(blob.placements):
|
||||
headers['Accept-Ranges'] = 'bytes'
|
||||
|
||||
# Write the response to the client.
|
||||
return Response(headers=headers)
|
||||
|
||||
|
||||
@v2_bp.route(BLOB_DIGEST_ROUTE, methods=['GET'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull'])
|
||||
@require_repo_read
|
||||
@anon_allowed
|
||||
@check_region_blacklisted(BlobDownloadGeoBlocked)
|
||||
@cache_control(max_age=31536000)
|
||||
def download_blob(namespace_name, repo_name, digest):
|
||||
# Find the blob.
|
||||
blob = registry_model.get_cached_repo_blob(model_cache, namespace_name, repo_name, digest)
|
||||
if blob is None:
|
||||
raise BlobUnknown()
|
||||
|
||||
# Build the response headers.
|
||||
headers = {'Docker-Content-Digest': digest}
|
||||
|
||||
# If our storage supports range requests, let the client know.
|
||||
if storage.get_supports_resumable_downloads(blob.placements):
|
||||
headers['Accept-Ranges'] = 'bytes'
|
||||
|
||||
metric_queue.pull_byte_count.Inc(blob.compressed_size, labelvalues=['v2'])
|
||||
|
||||
# Short-circuit by redirecting if the storage supports it.
|
||||
path = blob.storage_path
|
||||
logger.debug('Looking up the direct download URL for path: %s', path)
|
||||
direct_download_url = storage.get_direct_download_url(blob.placements, path, get_request_ip())
|
||||
if direct_download_url:
|
||||
logger.debug('Returning direct download URL')
|
||||
resp = redirect(direct_download_url)
|
||||
resp.headers.extend(headers)
|
||||
return resp
|
||||
|
||||
# Close the database connection before we stream the download.
|
||||
logger.debug('Closing database connection before streaming layer data')
|
||||
with database.CloseForLongOperation(app.config):
|
||||
# Stream the response to the client.
|
||||
return Response(
|
||||
storage.stream_read(blob.placements, path),
|
||||
headers=headers.update({
|
||||
'Content-Length': blob.compressed_size,
|
||||
'Content-Type': BLOB_CONTENT_TYPE,
|
||||
}),
|
||||
)
|
||||
|
||||
|
||||
def _try_to_mount_blob(repository_ref, mount_blob_digest):
|
||||
""" Attempts to mount a blob requested by the user from another repository. """
|
||||
logger.debug('Got mount request for blob `%s` into `%s`', mount_blob_digest, repository_ref)
|
||||
from_repo = request.args.get('from', None)
|
||||
if from_repo is None:
|
||||
raise InvalidRequest(message='Missing `from` repository argument')
|
||||
|
||||
# Ensure the user has access to the repository.
|
||||
logger.debug('Got mount request for blob `%s` under repository `%s` into `%s`',
|
||||
mount_blob_digest, from_repo, repository_ref)
|
||||
from_namespace, from_repo_name = parse_namespace_repository(from_repo,
|
||||
app.config['LIBRARY_NAMESPACE'],
|
||||
include_tag=False)
|
||||
|
||||
from_repository_ref = registry_model.lookup_repository(from_namespace, from_repo_name)
|
||||
if from_repository_ref is None:
|
||||
logger.debug('Could not find from repo: `%s/%s`', from_namespace, from_repo_name)
|
||||
return None
|
||||
|
||||
# First check permission.
|
||||
read_permission = ReadRepositoryPermission(from_namespace, from_repo_name).can()
|
||||
if not read_permission:
|
||||
# If no direct permission, check if the repostory is public.
|
||||
if not from_repository_ref.is_public:
|
||||
logger.debug('No permission to mount blob `%s` under repository `%s` into `%s`',
|
||||
mount_blob_digest, from_repo, repository_ref)
|
||||
return None
|
||||
|
||||
# Lookup if the mount blob's digest exists in the repository.
|
||||
mount_blob = registry_model.get_cached_repo_blob(model_cache, from_namespace, from_repo_name,
|
||||
mount_blob_digest)
|
||||
if mount_blob is None:
|
||||
logger.debug('Blob `%s` under repository `%s` not found', mount_blob_digest, from_repo)
|
||||
return None
|
||||
|
||||
logger.debug('Mounting blob `%s` under repository `%s` into `%s`', mount_blob_digest,
|
||||
from_repo, repository_ref)
|
||||
|
||||
# Mount the blob into the current repository and return that we've completed the operation.
|
||||
expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
|
||||
mounted = registry_model.mount_blob_into_repository(mount_blob, repository_ref, expiration_sec)
|
||||
if not mounted:
|
||||
logger.debug('Could not mount blob `%s` under repository `%s` not found', mount_blob_digest,
|
||||
from_repo)
|
||||
return
|
||||
|
||||
# Return the response for the blob indicating that it was mounted, and including its content
|
||||
# digest.
|
||||
logger.debug('Mounted blob `%s` under repository `%s` into `%s`', mount_blob_digest,
|
||||
from_repo, repository_ref)
|
||||
|
||||
namespace_name = repository_ref.namespace_name
|
||||
repo_name = repository_ref.name
|
||||
|
||||
return Response(
|
||||
status=201,
|
||||
headers={
|
||||
'Docker-Content-Digest': mount_blob_digest,
|
||||
'Location':
|
||||
get_app_url() + url_for('v2.download_blob',
|
||||
repository='%s/%s' % (namespace_name, repo_name),
|
||||
digest=mount_blob_digest),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@v2_bp.route('/<repopath:repository>/blobs/uploads/', methods=['POST'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull', 'push'])
|
||||
@require_repo_write
|
||||
@anon_protect
|
||||
@check_readonly
|
||||
def start_blob_upload(namespace_name, repo_name):
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
# Check for mounting of a blob from another repository.
|
||||
mount_blob_digest = request.args.get('mount', None)
|
||||
if mount_blob_digest is not None:
|
||||
response = _try_to_mount_blob(repository_ref, mount_blob_digest)
|
||||
if response is not None:
|
||||
return response
|
||||
|
||||
# Begin the blob upload process.
|
||||
blob_uploader = create_blob_upload(repository_ref, storage, _upload_settings())
|
||||
if blob_uploader is None:
|
||||
logger.debug('Could not create a blob upload for `%s/%s`', namespace_name, repo_name)
|
||||
raise InvalidRequest(message='Unable to start blob upload for unknown repository')
|
||||
|
||||
# Check if the blob will be uploaded now or in followup calls. If the `digest` is given, then
|
||||
# the upload will occur as a monolithic chunk in this call. Otherwise, we return a redirect
|
||||
# for the client to upload the chunks as distinct operations.
|
||||
digest = request.args.get('digest', None)
|
||||
if digest is None:
|
||||
# Short-circuit because the user will send the blob data in another request.
|
||||
return Response(
|
||||
status=202,
|
||||
headers={
|
||||
'Docker-Upload-UUID': blob_uploader.blob_upload_id,
|
||||
'Range': _render_range(0),
|
||||
'Location':
|
||||
get_app_url() + url_for('v2.upload_chunk',
|
||||
repository='%s/%s' % (namespace_name, repo_name),
|
||||
upload_uuid=blob_uploader.blob_upload_id)
|
||||
},
|
||||
)
|
||||
|
||||
# Upload the data sent and commit it to a blob.
|
||||
with complete_when_uploaded(blob_uploader):
|
||||
_upload_chunk(blob_uploader, digest)
|
||||
|
||||
# Write the response to the client.
|
||||
return Response(
|
||||
status=201,
|
||||
headers={
|
||||
'Docker-Content-Digest': digest,
|
||||
'Location':
|
||||
get_app_url() + url_for('v2.download_blob',
|
||||
repository='%s/%s' % (namespace_name, repo_name),
|
||||
digest=digest),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@v2_bp.route('/<repopath:repository>/blobs/uploads/<upload_uuid>', methods=['GET'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull'])
|
||||
@require_repo_write
|
||||
@anon_protect
|
||||
def fetch_existing_upload(namespace_name, repo_name, upload_uuid):
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
uploader = retrieve_blob_upload_manager(repository_ref, upload_uuid, storage, _upload_settings())
|
||||
if uploader is None:
|
||||
raise BlobUploadUnknown()
|
||||
|
||||
return Response(
|
||||
status=204,
|
||||
headers={
|
||||
'Docker-Upload-UUID': upload_uuid,
|
||||
'Range': _render_range(uploader.blob_upload.byte_count + 1), # byte ranges are exclusive
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@v2_bp.route('/<repopath:repository>/blobs/uploads/<upload_uuid>', methods=['PATCH'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull', 'push'])
|
||||
@require_repo_write
|
||||
@anon_protect
|
||||
@check_readonly
|
||||
def upload_chunk(namespace_name, repo_name, upload_uuid):
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
uploader = retrieve_blob_upload_manager(repository_ref, upload_uuid, storage, _upload_settings())
|
||||
if uploader is None:
|
||||
raise BlobUploadUnknown()
|
||||
|
||||
# Upload the chunk for the blob.
|
||||
_upload_chunk(uploader)
|
||||
|
||||
# Write the response to the client.
|
||||
return Response(
|
||||
status=204,
|
||||
headers={
|
||||
'Location': _current_request_url(),
|
||||
'Range': _render_range(uploader.blob_upload.byte_count, with_bytes_prefix=False),
|
||||
'Docker-Upload-UUID': upload_uuid,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@v2_bp.route('/<repopath:repository>/blobs/uploads/<upload_uuid>', methods=['PUT'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull', 'push'])
|
||||
@require_repo_write
|
||||
@anon_protect
|
||||
@check_readonly
|
||||
def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
|
||||
# Ensure the digest is present before proceeding.
|
||||
digest = request.args.get('digest', None)
|
||||
if digest is None:
|
||||
raise BlobUploadInvalid(detail={'reason': 'Missing digest arg on monolithic upload'})
|
||||
|
||||
# Find the upload.
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
uploader = retrieve_blob_upload_manager(repository_ref, upload_uuid, storage, _upload_settings())
|
||||
if uploader is None:
|
||||
raise BlobUploadUnknown()
|
||||
|
||||
# Upload the chunk for the blob and commit it once complete.
|
||||
with complete_when_uploaded(uploader):
|
||||
_upload_chunk(uploader, digest)
|
||||
|
||||
# Write the response to the client.
|
||||
return Response(status=201, headers={
|
||||
'Docker-Content-Digest': digest,
|
||||
'Location':
|
||||
get_app_url() + url_for('v2.download_blob',
|
||||
repository='%s/%s' % (namespace_name, repo_name),
|
||||
digest=digest),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@v2_bp.route('/<repopath:repository>/blobs/uploads/<upload_uuid>', methods=['DELETE'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull', 'push'])
|
||||
@require_repo_write
|
||||
@anon_protect
|
||||
@check_readonly
|
||||
def cancel_upload(namespace_name, repo_name, upload_uuid):
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
uploader = retrieve_blob_upload_manager(repository_ref, upload_uuid, storage, _upload_settings())
|
||||
if uploader is None:
|
||||
raise BlobUploadUnknown()
|
||||
|
||||
uploader.cancel_upload()
|
||||
return Response(status=204)
|
||||
|
||||
|
||||
@v2_bp.route('/<repopath:repository>/blobs/<digest>', methods=['DELETE'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull', 'push'])
|
||||
@require_repo_write
|
||||
@anon_protect
|
||||
@check_readonly
|
||||
def delete_digest(namespace_name, repo_name, upload_uuid):
|
||||
# We do not support deleting arbitrary digests, as they break repo images.
|
||||
raise Unsupported()
|
||||
|
||||
|
||||
def _render_range(num_uploaded_bytes, with_bytes_prefix=True):
|
||||
"""
|
||||
Returns a string formatted to be used in the Range header.
|
||||
"""
|
||||
return '{0}0-{1}'.format('bytes=' if with_bytes_prefix else '', num_uploaded_bytes - 1)
|
||||
|
||||
|
||||
def _current_request_url():
|
||||
return '{0}{1}{2}'.format(get_app_url(), request.script_root, request.path)
|
||||
|
||||
|
||||
def _abort_range_not_satisfiable(valid_end, upload_uuid):
|
||||
"""
|
||||
Writes a failure response for scenarios where the registry cannot function
|
||||
with the provided range.
|
||||
|
||||
TODO: Unify this with the V2RegistryException class.
|
||||
"""
|
||||
flask_abort(
|
||||
Response(status=416, headers={
|
||||
'Location': _current_request_url(),
|
||||
'Range': '0-{0}'.format(valid_end),
|
||||
'Docker-Upload-UUID': upload_uuid}))
|
||||
|
||||
|
||||
def _parse_range_header(range_header_text):
|
||||
"""
|
||||
Parses the range header.
|
||||
|
||||
Returns a tuple of the start offset and the length.
|
||||
If the parse fails, raises _InvalidRangeHeader.
|
||||
"""
|
||||
found = RANGE_HEADER_REGEX.match(range_header_text)
|
||||
if found is None:
|
||||
raise _InvalidRangeHeader()
|
||||
|
||||
start = int(found.group(1))
|
||||
length = int(found.group(2)) - start
|
||||
|
||||
if length <= 0:
|
||||
raise _InvalidRangeHeader()
|
||||
|
||||
return (start, length)
|
||||
|
||||
|
||||
def _start_offset_and_length(range_header):
|
||||
"""
|
||||
Returns a tuple of the start offset and the length.
|
||||
If the range header doesn't exist, defaults to (0, -1).
|
||||
If parsing fails, returns (None, None).
|
||||
"""
|
||||
start_offset, length = 0, -1
|
||||
if range_header is not None:
|
||||
try:
|
||||
start_offset, length = _parse_range_header(range_header)
|
||||
except _InvalidRangeHeader:
|
||||
return None, None
|
||||
|
||||
return start_offset, length
|
||||
|
||||
|
||||
def _upload_settings():
|
||||
""" Returns the settings for instantiating a blob upload manager. """
|
||||
expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
|
||||
settings = BlobUploadSettings(maximum_blob_size=app.config['MAXIMUM_LAYER_SIZE'],
|
||||
bittorrent_piece_size=app.config['BITTORRENT_PIECE_SIZE'],
|
||||
committed_blob_expiration=expiration_sec)
|
||||
return settings
|
||||
|
||||
|
||||
def _upload_chunk(blob_uploader, commit_digest=None):
|
||||
""" Performs uploading of a chunk of data in the current request's stream, via the blob uploader
|
||||
given. If commit_digest is specified, the upload is committed to a blob once the stream's
|
||||
data has been read and stored.
|
||||
"""
|
||||
start_offset, length = _start_offset_and_length(request.headers.get('range'))
|
||||
if None in {start_offset, length}:
|
||||
raise InvalidRequest(message='Invalid range header')
|
||||
|
||||
input_fp = get_input_stream(request)
|
||||
|
||||
try:
|
||||
# Upload the data received.
|
||||
blob_uploader.upload_chunk(app.config, input_fp, start_offset, length, metric_queue)
|
||||
|
||||
if commit_digest is not None:
|
||||
# Commit the upload to a blob.
|
||||
return blob_uploader.commit_to_blob(app.config, commit_digest)
|
||||
except BlobTooLargeException as ble:
|
||||
raise LayerTooLarge(uploaded=ble.uploaded, max_allowed=ble.max_allowed)
|
||||
except BlobRangeMismatchException:
|
||||
logger.exception('Exception when uploading blob to %s', blob_uploader.blob_upload_id)
|
||||
_abort_range_not_satisfiable(blob_uploader.blob_upload.byte_count,
|
||||
blob_uploader.blob_upload_id)
|
||||
except BlobUploadException:
|
||||
logger.exception('Exception when uploading blob to %s', blob_uploader.blob_upload_id)
|
||||
raise BlobUploadInvalid()
|
55
endpoints/v2/catalog.py
Normal file
55
endpoints/v2/catalog.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
from collections import namedtuple
|
||||
|
||||
from flask import jsonify
|
||||
|
||||
import features
|
||||
|
||||
from app import model_cache
|
||||
from auth.auth_context import get_authenticated_user, get_authenticated_context
|
||||
from auth.registry_jwt_auth import process_registry_jwt_auth
|
||||
from data import model
|
||||
from data.cache import cache_key
|
||||
from endpoints.decorators import anon_protect
|
||||
from endpoints.v2 import v2_bp, paginate
|
||||
|
||||
|
||||
class Repository(namedtuple('Repository', ['id', 'namespace_name', 'name'])):
|
||||
pass
|
||||
|
||||
|
||||
@v2_bp.route('/_catalog', methods=['GET'])
|
||||
@process_registry_jwt_auth()
|
||||
@anon_protect
|
||||
@paginate()
|
||||
def catalog_search(start_id, limit, pagination_callback):
|
||||
def _load_catalog():
|
||||
include_public = bool(features.PUBLIC_CATALOG)
|
||||
if not include_public and not get_authenticated_user():
|
||||
return []
|
||||
|
||||
username = get_authenticated_user().username if get_authenticated_user() else None
|
||||
if username and not get_authenticated_user().enabled:
|
||||
return []
|
||||
|
||||
query = model.repository.get_visible_repositories(username,
|
||||
kind_filter='image',
|
||||
include_public=include_public,
|
||||
start_id=start_id,
|
||||
limit=limit + 1)
|
||||
# NOTE: The repository ID is in `rid` (not `id`) here, as per the requirements of
|
||||
# the `get_visible_repositories` call.
|
||||
return [Repository(repo.rid, repo.namespace_user.username, repo.name)._asdict()
|
||||
for repo in query]
|
||||
|
||||
context_key = get_authenticated_context().unique_key if get_authenticated_context() else None
|
||||
catalog_cache_key = cache_key.for_catalog_page(context_key, start_id, limit)
|
||||
visible_repositories = [Repository(**repo_dict) for repo_dict
|
||||
in model_cache.retrieve(catalog_cache_key, _load_catalog)]
|
||||
|
||||
response = jsonify({
|
||||
'repositories': ['%s/%s' % (repo.namespace_name, repo.name)
|
||||
for repo in visible_repositories][0:limit],
|
||||
})
|
||||
|
||||
pagination_callback(visible_repositories, response)
|
||||
return response
|
168
endpoints/v2/errors.py
Normal file
168
endpoints/v2/errors.py
Normal file
|
@ -0,0 +1,168 @@
|
|||
import bitmath
|
||||
|
||||
|
||||
class V2RegistryException(Exception):
|
||||
def __init__(self, error_code_str, message, detail, http_status_code=400, repository=None,
|
||||
scopes=None, is_read_only=False):
|
||||
super(V2RegistryException, self).__init__(message)
|
||||
self.http_status_code = http_status_code
|
||||
self.repository = repository
|
||||
self.scopes = scopes
|
||||
self.is_read_only = is_read_only
|
||||
|
||||
self._error_code_str = error_code_str
|
||||
self._detail = detail
|
||||
|
||||
def as_dict(self):
|
||||
error_dict = {
|
||||
'code': self._error_code_str,
|
||||
'message': str(self),
|
||||
'detail': self._detail if self._detail is not None else {},
|
||||
}
|
||||
|
||||
if self.is_read_only:
|
||||
error_dict['is_readonly'] = True
|
||||
|
||||
return error_dict
|
||||
|
||||
|
||||
class BlobUnknown(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
super(BlobUnknown, self).__init__('BLOB_UNKNOWN', 'blob unknown to registry', detail, 404)
|
||||
|
||||
|
||||
class BlobUploadInvalid(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
super(BlobUploadInvalid, self).__init__('BLOB_UPLOAD_INVALID', 'blob upload invalid', detail)
|
||||
|
||||
|
||||
class BlobUploadUnknown(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
super(BlobUploadUnknown, self).__init__('BLOB_UPLOAD_UNKNOWN',
|
||||
'blob upload unknown to registry', detail, 404)
|
||||
|
||||
|
||||
class DigestInvalid(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
super(DigestInvalid, self).__init__('DIGEST_INVALID',
|
||||
'provided digest did not match uploaded content', detail)
|
||||
|
||||
|
||||
class ManifestBlobUnknown(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
super(ManifestBlobUnknown, self).__init__('MANIFEST_BLOB_UNKNOWN',
|
||||
'manifest blob unknown to registry', detail)
|
||||
|
||||
|
||||
class ManifestInvalid(V2RegistryException):
|
||||
def __init__(self, detail=None, http_status_code=400):
|
||||
super(ManifestInvalid, self).__init__('MANIFEST_INVALID', 'manifest invalid', detail,
|
||||
http_status_code)
|
||||
|
||||
|
||||
class ManifestUnknown(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
super(ManifestUnknown, self).__init__('MANIFEST_UNKNOWN', 'manifest unknown', detail, 404)
|
||||
|
||||
|
||||
class TagExpired(V2RegistryException):
|
||||
def __init__(self, message=None, detail=None):
|
||||
super(TagExpired, self).__init__('TAG_EXPIRED',
|
||||
message or 'Tag has expired',
|
||||
detail,
|
||||
404)
|
||||
|
||||
|
||||
class ManifestUnverified(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
super(ManifestUnverified, self).__init__('MANIFEST_UNVERIFIED',
|
||||
'manifest failed signature verification', detail)
|
||||
|
||||
|
||||
class NameInvalid(V2RegistryException):
|
||||
def __init__(self, detail=None, message=None):
|
||||
super(NameInvalid, self).__init__('NAME_INVALID', message or 'invalid repository name', detail)
|
||||
|
||||
|
||||
class NameUnknown(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
super(NameUnknown, self).__init__('NAME_UNKNOWN', 'repository name not known to registry',
|
||||
detail, 404)
|
||||
|
||||
|
||||
class SizeInvalid(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
super(SizeInvalid, self).__init__('SIZE_INVALID',
|
||||
'provided length did not match content length', detail)
|
||||
|
||||
|
||||
class TagAlreadyExists(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
super(TagAlreadyExists, self).__init__('TAG_ALREADY_EXISTS', 'tag was already pushed', detail,
|
||||
409)
|
||||
|
||||
|
||||
class TagInvalid(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
super(TagInvalid, self).__init__('TAG_INVALID', 'manifest tag did not match URI', detail)
|
||||
|
||||
|
||||
class LayerTooLarge(V2RegistryException):
|
||||
def __init__(self, uploaded=None, max_allowed=None):
|
||||
detail = {}
|
||||
message = 'Uploaded blob is larger than allowed by this registry'
|
||||
|
||||
if uploaded is not None and max_allowed is not None:
|
||||
detail = {
|
||||
'reason': '%s is greater than maximum allowed size %s' % (uploaded, max_allowed),
|
||||
'max_allowed': max_allowed,
|
||||
'uploaded': uploaded,}
|
||||
|
||||
up_str = bitmath.Byte(uploaded).best_prefix().format("{value:.2f} {unit}")
|
||||
max_str = bitmath.Byte(max_allowed).best_prefix().format("{value:.2f} {unit}")
|
||||
message = 'Uploaded blob of %s is larger than %s allowed by this registry' % (up_str,
|
||||
max_str)
|
||||
|
||||
|
||||
class Unauthorized(V2RegistryException):
|
||||
def __init__(self, detail=None, repository=None, scopes=None):
|
||||
super(Unauthorized,
|
||||
self).__init__('UNAUTHORIZED', 'access to the requested resource is not authorized',
|
||||
detail, 401, repository=repository, scopes=scopes)
|
||||
|
||||
|
||||
class Unsupported(V2RegistryException):
|
||||
def __init__(self, detail=None, message=None):
|
||||
super(Unsupported, self).__init__('UNSUPPORTED', message or 'The operation is unsupported.',
|
||||
detail, 405)
|
||||
|
||||
|
||||
class InvalidLogin(V2RegistryException):
|
||||
def __init__(self, message=None):
|
||||
super(InvalidLogin, self).__init__('UNAUTHORIZED', message or
|
||||
'Specified credentials are invalid', {}, 401)
|
||||
|
||||
|
||||
class InvalidRequest(V2RegistryException):
|
||||
def __init__(self, message=None):
|
||||
super(InvalidRequest, self).__init__('INVALID_REQUEST', message or 'Invalid request', {}, 400)
|
||||
|
||||
|
||||
class NamespaceDisabled(V2RegistryException):
|
||||
def __init__(self, message=None):
|
||||
message = message or 'This namespace is disabled. Please contact your system administrator.'
|
||||
super(NamespaceDisabled, self).__init__('DENIED', message, {}, 405)
|
||||
|
||||
|
||||
class BlobDownloadGeoBlocked(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
message = ('The region from which you are pulling has been geo-ip blocked. ' +
|
||||
'Please contact the namespace owner.')
|
||||
super(BlobDownloadGeoBlocked, self).__init__('DENIED', message, detail, 403)
|
||||
|
||||
|
||||
class ReadOnlyMode(V2RegistryException):
|
||||
def __init__(self, detail=None):
|
||||
message = ('System is currently read-only. Pulls will succeed but all write operations ' +
|
||||
'are currently suspended.')
|
||||
super(ReadOnlyMode, self).__init__('DENIED', message, detail, 405, is_read_only=True)
|
324
endpoints/v2/manifest.py
Normal file
324
endpoints/v2/manifest.py
Normal file
|
@ -0,0 +1,324 @@
|
|||
import logging
|
||||
|
||||
from functools import wraps
|
||||
|
||||
from flask import request, url_for, Response
|
||||
|
||||
import features
|
||||
|
||||
from app import app, metric_queue, storage
|
||||
from auth.registry_jwt_auth import process_registry_jwt_auth
|
||||
from digest import digest_tools
|
||||
from data.registry_model import registry_model
|
||||
from data.model.oci.manifest import CreateManifestException
|
||||
from endpoints.decorators import anon_protect, parse_repository_name, check_readonly
|
||||
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write
|
||||
from endpoints.v2.errors import (ManifestInvalid, ManifestUnknown, NameInvalid, TagExpired,
|
||||
NameUnknown)
|
||||
from image.docker import ManifestException
|
||||
from image.docker.schema1 import DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE, DOCKER_SCHEMA1_CONTENT_TYPES
|
||||
from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES, OCI_CONTENT_TYPES
|
||||
from image.docker.schemas import parse_manifest_from_bytes
|
||||
from notifications import spawn_notification
|
||||
from util.audit import track_and_log
|
||||
from util.bytes import Bytes
|
||||
from util.names import VALID_TAG_PATTERN
|
||||
from util.registry.replication import queue_replication_batch
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BASE_MANIFEST_ROUTE = '/<repopath:repository>/manifests/<regex("{0}"):manifest_ref>'
|
||||
MANIFEST_DIGEST_ROUTE = BASE_MANIFEST_ROUTE.format(digest_tools.DIGEST_PATTERN)
|
||||
MANIFEST_TAGNAME_ROUTE = BASE_MANIFEST_ROUTE.format(VALID_TAG_PATTERN)
|
||||
|
||||
|
||||
@v2_bp.route(MANIFEST_TAGNAME_ROUTE, methods=['GET'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull'])
|
||||
@require_repo_read
|
||||
@anon_protect
|
||||
def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
tag = registry_model.get_repo_tag(repository_ref, manifest_ref)
|
||||
if tag is None:
|
||||
if registry_model.has_expired_tag(repository_ref, manifest_ref):
|
||||
logger.debug('Found expired tag %s for repository %s/%s', manifest_ref, namespace_name,
|
||||
repo_name)
|
||||
msg = 'Tag %s was deleted or has expired. To pull, revive via time machine' % manifest_ref
|
||||
raise TagExpired(msg)
|
||||
|
||||
raise ManifestUnknown()
|
||||
|
||||
manifest = registry_model.get_manifest_for_tag(tag, backfill_if_necessary=True)
|
||||
if manifest is None:
|
||||
# Something went wrong.
|
||||
raise ManifestInvalid()
|
||||
|
||||
manifest_bytes, manifest_digest, manifest_media_type = _rewrite_schema_if_necessary(
|
||||
namespace_name, repo_name, manifest_ref, manifest)
|
||||
if manifest_bytes is None:
|
||||
raise ManifestUnknown()
|
||||
|
||||
track_and_log('pull_repo', repository_ref, analytics_name='pull_repo_100x', analytics_sample=0.01,
|
||||
tag=manifest_ref)
|
||||
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
|
||||
|
||||
return Response(
|
||||
manifest_bytes.as_unicode(),
|
||||
status=200,
|
||||
headers={
|
||||
'Content-Type': manifest_media_type,
|
||||
'Docker-Content-Digest': manifest_digest,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['GET'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull'])
|
||||
@require_repo_read
|
||||
@anon_protect
|
||||
def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref):
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
manifest = registry_model.lookup_manifest_by_digest(repository_ref, manifest_ref)
|
||||
if manifest is None:
|
||||
raise ManifestUnknown()
|
||||
|
||||
manifest_bytes, manifest_digest, manifest_media_type = _rewrite_schema_if_necessary(
|
||||
namespace_name, repo_name, '$digest', manifest)
|
||||
if manifest_digest is None:
|
||||
raise ManifestUnknown()
|
||||
|
||||
track_and_log('pull_repo', repository_ref, manifest_digest=manifest_ref)
|
||||
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
|
||||
|
||||
return Response(manifest_bytes.as_unicode(), status=200, headers={
|
||||
'Content-Type': manifest_media_type,
|
||||
'Docker-Content-Digest': manifest_digest,
|
||||
})
|
||||
|
||||
|
||||
def _rewrite_schema_if_necessary(namespace_name, repo_name, tag_name, manifest):
|
||||
# As per the Docker protocol, if the manifest is not schema version 1 and the manifest's
|
||||
# media type is not in the Accept header, we return a schema 1 version of the manifest for
|
||||
# the amd64+linux platform, if any, or None if none.
|
||||
# See: https://docs.docker.com/registry/spec/manifest-v2-2
|
||||
mimetypes = [mimetype for mimetype, _ in request.accept_mimetypes]
|
||||
if manifest.media_type in mimetypes:
|
||||
return manifest.internal_manifest_bytes, manifest.digest, manifest.media_type
|
||||
|
||||
# Short-circuit check: If the mimetypes is empty or just `application/json`, verify we have
|
||||
# a schema 1 manifest and return it.
|
||||
if not mimetypes or mimetypes == ['application/json']:
|
||||
if manifest.media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
|
||||
return manifest.internal_manifest_bytes, manifest.digest, manifest.media_type
|
||||
|
||||
logger.debug('Manifest `%s` not compatible against %s; checking for conversion', manifest.digest,
|
||||
request.accept_mimetypes)
|
||||
converted = registry_model.convert_manifest(manifest, namespace_name, repo_name, tag_name,
|
||||
mimetypes, storage)
|
||||
if converted is not None:
|
||||
return converted.bytes, converted.digest, converted.media_type
|
||||
|
||||
# For back-compat, we always default to schema 1 if the manifest could not be converted.
|
||||
schema1 = registry_model.get_schema1_parsed_manifest(manifest, namespace_name, repo_name,
|
||||
tag_name, storage)
|
||||
if schema1 is None:
|
||||
return None, None, None
|
||||
|
||||
return schema1.bytes, schema1.digest, schema1.media_type
|
||||
|
||||
|
||||
def _reject_manifest2_schema2(func):
|
||||
@wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
namespace_name = kwargs['namespace_name']
|
||||
if registry_model.supports_schema2(namespace_name):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
if _doesnt_accept_schema_v1() or \
|
||||
request.content_type in DOCKER_SCHEMA2_CONTENT_TYPES | OCI_CONTENT_TYPES:
|
||||
raise ManifestInvalid(detail={'message': 'manifest schema version not supported'},
|
||||
http_status_code=415)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def _doesnt_accept_schema_v1():
|
||||
# If the client doesn't specify anything, still give them Schema v1.
|
||||
return len(request.accept_mimetypes) != 0 and \
|
||||
DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE not in request.accept_mimetypes
|
||||
|
||||
|
||||
@v2_bp.route(MANIFEST_TAGNAME_ROUTE, methods=['PUT'])
|
||||
@parse_repository_name()
|
||||
@_reject_manifest2_schema2
|
||||
@process_registry_jwt_auth(scopes=['pull', 'push'])
|
||||
@require_repo_write
|
||||
@anon_protect
|
||||
@check_readonly
|
||||
def write_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
|
||||
parsed = _parse_manifest()
|
||||
return _write_manifest_and_log(namespace_name, repo_name, manifest_ref, parsed)
|
||||
|
||||
|
||||
@v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['PUT'])
|
||||
@parse_repository_name()
|
||||
@_reject_manifest2_schema2
|
||||
@process_registry_jwt_auth(scopes=['pull', 'push'])
|
||||
@require_repo_write
|
||||
@anon_protect
|
||||
@check_readonly
|
||||
def write_manifest_by_digest(namespace_name, repo_name, manifest_ref):
|
||||
parsed = _parse_manifest()
|
||||
if parsed.digest != manifest_ref:
|
||||
raise ManifestInvalid(detail={'message': 'manifest digest mismatch'})
|
||||
|
||||
if parsed.schema_version != 2:
|
||||
return _write_manifest_and_log(namespace_name, repo_name, parsed.tag, parsed)
|
||||
|
||||
# If the manifest is schema version 2, then this cannot be a normal tag-based push, as the
|
||||
# manifest does not contain the tag and this call was not given a tag name. Instead, we write the
|
||||
# manifest with a temporary tag, as it is being pushed as part of a call for a manifest list.
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
|
||||
manifest = registry_model.create_manifest_with_temp_tag(repository_ref, parsed, expiration_sec,
|
||||
storage)
|
||||
if manifest is None:
|
||||
raise ManifestInvalid()
|
||||
|
||||
return Response(
|
||||
'OK',
|
||||
status=202,
|
||||
headers={
|
||||
'Docker-Content-Digest': manifest.digest,
|
||||
'Location':
|
||||
url_for('v2.fetch_manifest_by_digest',
|
||||
repository='%s/%s' % (namespace_name, repo_name),
|
||||
manifest_ref=manifest.digest),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _parse_manifest():
|
||||
content_type = request.content_type or DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||
if content_type == 'application/json':
|
||||
# For back-compat.
|
||||
content_type = DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
|
||||
|
||||
try:
|
||||
return parse_manifest_from_bytes(Bytes.for_string_or_unicode(request.data), content_type)
|
||||
except ManifestException as me:
|
||||
logger.exception("failed to parse manifest when writing by tagname")
|
||||
raise ManifestInvalid(detail={'message': 'failed to parse manifest: %s' % me})
|
||||
|
||||
|
||||
@v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['DELETE'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull', 'push'])
|
||||
@require_repo_write
|
||||
@anon_protect
|
||||
@check_readonly
|
||||
def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref):
|
||||
"""
|
||||
Delete the manifest specified by the digest.
|
||||
|
||||
Note: there is no equivalent method for deleting by tag name because it is
|
||||
forbidden by the spec.
|
||||
"""
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
manifest = registry_model.lookup_manifest_by_digest(repository_ref, manifest_ref)
|
||||
if manifest is None:
|
||||
raise ManifestUnknown()
|
||||
|
||||
tags = registry_model.delete_tags_for_manifest(manifest)
|
||||
if not tags:
|
||||
raise ManifestUnknown()
|
||||
|
||||
for tag in tags:
|
||||
track_and_log('delete_tag', repository_ref, tag=tag.name, digest=manifest_ref)
|
||||
|
||||
return Response(status=202)
|
||||
|
||||
|
||||
def _write_manifest_and_log(namespace_name, repo_name, tag_name, manifest_impl):
|
||||
repository_ref, manifest, tag = _write_manifest(namespace_name, repo_name, tag_name,
|
||||
manifest_impl)
|
||||
|
||||
# Queue all blob manifests for replication.
|
||||
if features.STORAGE_REPLICATION:
|
||||
blobs = registry_model.get_manifest_local_blobs(manifest)
|
||||
if blobs is None:
|
||||
logger.error('Could not lookup blobs for manifest `%s`', manifest.digest)
|
||||
else:
|
||||
with queue_replication_batch(namespace_name) as queue_storage_replication:
|
||||
for blob_digest in blobs:
|
||||
queue_storage_replication(blob_digest)
|
||||
|
||||
track_and_log('push_repo', repository_ref, tag=tag_name)
|
||||
spawn_notification(repository_ref, 'repo_push', {'updated_tags': [tag_name]})
|
||||
metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
|
||||
|
||||
return Response(
|
||||
'OK',
|
||||
status=202,
|
||||
headers={
|
||||
'Docker-Content-Digest': manifest.digest,
|
||||
'Location':
|
||||
url_for('v2.fetch_manifest_by_digest',
|
||||
repository='%s/%s' % (namespace_name, repo_name),
|
||||
manifest_ref=manifest.digest),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _write_manifest(namespace_name, repo_name, tag_name, manifest_impl):
|
||||
# NOTE: These extra checks are needed for schema version 1 because the manifests
|
||||
# contain the repo namespace, name and tag name.
|
||||
if manifest_impl.schema_version == 1:
|
||||
if (manifest_impl.namespace == '' and features.LIBRARY_SUPPORT and
|
||||
namespace_name == app.config['LIBRARY_NAMESPACE']):
|
||||
pass
|
||||
elif manifest_impl.namespace != namespace_name:
|
||||
raise NameInvalid()
|
||||
|
||||
if manifest_impl.repo_name != repo_name:
|
||||
raise NameInvalid()
|
||||
|
||||
try:
|
||||
if not manifest_impl.layers:
|
||||
raise ManifestInvalid(detail={'message': 'manifest does not reference any layers'})
|
||||
except ManifestException as me:
|
||||
raise ManifestInvalid(detail={'message': str(me)})
|
||||
|
||||
# Ensure that the repository exists.
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
# Create the manifest(s) and retarget the tag to point to it.
|
||||
try:
|
||||
manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref, manifest_impl,
|
||||
tag_name, storage,
|
||||
raise_on_error=True)
|
||||
except CreateManifestException as cme:
|
||||
raise ManifestInvalid(detail={'message': str(cme)})
|
||||
|
||||
if manifest is None:
|
||||
raise ManifestInvalid()
|
||||
|
||||
return repository_ref, manifest, tag
|
32
endpoints/v2/tag.py
Normal file
32
endpoints/v2/tag.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
from flask import jsonify
|
||||
|
||||
from app import model_cache
|
||||
from auth.registry_jwt_auth import process_registry_jwt_auth
|
||||
from data.registry_model import registry_model
|
||||
from endpoints.decorators import anon_protect, parse_repository_name
|
||||
from endpoints.v2 import v2_bp, require_repo_read, paginate
|
||||
from endpoints.v2.errors import NameUnknown
|
||||
|
||||
|
||||
@v2_bp.route('/<repopath:repository>/tags/list', methods=['GET'])
|
||||
@parse_repository_name()
|
||||
@process_registry_jwt_auth(scopes=['pull'])
|
||||
@require_repo_read
|
||||
@anon_protect
|
||||
@paginate()
|
||||
def list_all_tags(namespace_name, repo_name, start_id, limit, pagination_callback):
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None:
|
||||
raise NameUnknown()
|
||||
|
||||
# NOTE: We add 1 to the limit because that's how pagination_callback knows if there are
|
||||
# additional tags.
|
||||
tags = registry_model.lookup_cached_active_repository_tags(model_cache, repository_ref, start_id,
|
||||
limit + 1)
|
||||
response = jsonify({
|
||||
'name': '{0}/{1}'.format(namespace_name, repo_name),
|
||||
'tags': [tag.name for tag in tags][0:limit],
|
||||
})
|
||||
|
||||
pagination_callback(tags, response)
|
||||
return response
|
127
endpoints/v2/test/test_blob.py
Normal file
127
endpoints/v2/test/test_blob.py
Normal file
|
@ -0,0 +1,127 @@
|
|||
import hashlib
|
||||
import pytest
|
||||
|
||||
from mock import patch
|
||||
from flask import url_for
|
||||
from playhouse.test_utils import assert_query_count
|
||||
|
||||
from app import instance_keys, app as realapp
|
||||
from auth.auth_context_type import ValidatedAuthContext
|
||||
from data import model
|
||||
from data.cache import InMemoryDataModelCache
|
||||
from data.database import ImageStorageLocation
|
||||
from endpoints.test.shared import conduct_call
|
||||
from util.security.registry_jwt import generate_bearer_token, build_context_and_subject
|
||||
from test.fixtures import *
|
||||
|
||||
@pytest.mark.parametrize('method, endpoint', [
|
||||
('GET', 'download_blob'),
|
||||
('HEAD', 'check_blob_exists'),
|
||||
])
|
||||
def test_blob_caching(method, endpoint, client, app):
|
||||
digest = 'sha256:' + hashlib.sha256("a").hexdigest()
|
||||
location = ImageStorageLocation.get(name='local_us')
|
||||
model.blob.store_blob_record_and_temp_link('devtable', 'simple', digest, location, 1, 10000000)
|
||||
|
||||
params = {
|
||||
'repository': 'devtable/simple',
|
||||
'digest': digest,
|
||||
}
|
||||
|
||||
user = model.user.get_user('devtable')
|
||||
access = [{
|
||||
'type': 'repository',
|
||||
'name': 'devtable/simple',
|
||||
'actions': ['pull'],
|
||||
}]
|
||||
|
||||
context, subject = build_context_and_subject(ValidatedAuthContext(user=user))
|
||||
token = generate_bearer_token(realapp.config['SERVER_HOSTNAME'], subject, context, access, 600,
|
||||
instance_keys)
|
||||
|
||||
headers = {
|
||||
'Authorization': 'Bearer %s' % token,
|
||||
}
|
||||
|
||||
# Run without caching to make sure the request works. This also preloads some of
|
||||
# our global model caches.
|
||||
conduct_call(client, 'v2.' + endpoint, url_for, method, params, expected_code=200,
|
||||
headers=headers)
|
||||
|
||||
with patch('endpoints.v2.blob.model_cache', InMemoryDataModelCache()):
|
||||
# First request should make a DB query to retrieve the blob.
|
||||
conduct_call(client, 'v2.' + endpoint, url_for, method, params, expected_code=200,
|
||||
headers=headers)
|
||||
|
||||
# Subsequent requests should use the cached blob.
|
||||
with assert_query_count(0):
|
||||
conduct_call(client, 'v2.' + endpoint, url_for, method, params, expected_code=200,
|
||||
headers=headers)
|
||||
|
||||
@pytest.mark.parametrize('mount_digest, source_repo, username, expect_success', [
|
||||
# Unknown blob.
|
||||
('sha256:unknown', 'devtable/simple', 'devtable', False),
|
||||
|
||||
# Blob not in repo.
|
||||
('sha256:' + hashlib.sha256("a").hexdigest(), 'devtable/complex', 'devtable', False),
|
||||
|
||||
# Blob in repo.
|
||||
('sha256:' + hashlib.sha256("b").hexdigest(), 'devtable/complex', 'devtable', True),
|
||||
|
||||
# No access to repo.
|
||||
('sha256:' + hashlib.sha256("b").hexdigest(), 'devtable/complex', 'public', False),
|
||||
|
||||
# Public repo.
|
||||
('sha256:' + hashlib.sha256("c").hexdigest(), 'public/publicrepo', 'devtable', True),
|
||||
])
|
||||
def test_blob_mounting(mount_digest, source_repo, username, expect_success, client, app):
|
||||
location = ImageStorageLocation.get(name='local_us')
|
||||
|
||||
# Store and link some blobs.
|
||||
digest = 'sha256:' + hashlib.sha256("a").hexdigest()
|
||||
model.blob.store_blob_record_and_temp_link('devtable', 'simple', digest, location, 1, 10000000)
|
||||
|
||||
digest = 'sha256:' + hashlib.sha256("b").hexdigest()
|
||||
model.blob.store_blob_record_and_temp_link('devtable', 'complex', digest, location, 1, 10000000)
|
||||
|
||||
digest = 'sha256:' + hashlib.sha256("c").hexdigest()
|
||||
model.blob.store_blob_record_and_temp_link('public', 'publicrepo', digest, location, 1, 10000000)
|
||||
|
||||
params = {
|
||||
'repository': 'devtable/building',
|
||||
'mount': mount_digest,
|
||||
'from': source_repo,
|
||||
}
|
||||
|
||||
user = model.user.get_user(username)
|
||||
access = [{
|
||||
'type': 'repository',
|
||||
'name': 'devtable/building',
|
||||
'actions': ['pull', 'push'],
|
||||
}]
|
||||
|
||||
if source_repo.find(username) == 0:
|
||||
access.append({
|
||||
'type': 'repository',
|
||||
'name': source_repo,
|
||||
'actions': ['pull'],
|
||||
})
|
||||
|
||||
context, subject = build_context_and_subject(ValidatedAuthContext(user=user))
|
||||
token = generate_bearer_token(realapp.config['SERVER_HOSTNAME'], subject, context, access, 600,
|
||||
instance_keys)
|
||||
|
||||
headers = {
|
||||
'Authorization': 'Bearer %s' % token,
|
||||
}
|
||||
|
||||
expected_code = 201 if expect_success else 202
|
||||
conduct_call(client, 'v2.start_blob_upload', url_for, 'POST', params, expected_code=expected_code,
|
||||
headers=headers)
|
||||
|
||||
if expect_success:
|
||||
# Ensure the blob now exists under the repo.
|
||||
model.blob.get_repo_blob_by_digest('devtable', 'building', mount_digest)
|
||||
else:
|
||||
with pytest.raises(model.blob.BlobDoesNotExist):
|
||||
model.blob.get_repo_blob_by_digest('devtable', 'building', mount_digest)
|
55
endpoints/v2/test/test_manifest.py
Normal file
55
endpoints/v2/test/test_manifest.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
import hashlib
|
||||
import pytest
|
||||
import time
|
||||
|
||||
from mock import patch
|
||||
|
||||
from flask import url_for
|
||||
from playhouse.test_utils import count_queries
|
||||
|
||||
from app import instance_keys, app as realapp
|
||||
from auth.auth_context_type import ValidatedAuthContext
|
||||
from data import model
|
||||
from endpoints.test.shared import conduct_call
|
||||
from util.security.registry_jwt import generate_bearer_token, build_context_and_subject
|
||||
from test.fixtures import *
|
||||
|
||||
def test_e2e_query_count_manifest_norewrite(client, app):
|
||||
tag_manifest = model.tag.load_tag_manifest('devtable', 'simple', 'latest')
|
||||
|
||||
params = {
|
||||
'repository': 'devtable/simple',
|
||||
'manifest_ref': tag_manifest.digest,
|
||||
}
|
||||
|
||||
user = model.user.get_user('devtable')
|
||||
access = [{
|
||||
'type': 'repository',
|
||||
'name': 'devtable/simple',
|
||||
'actions': ['pull', 'push'],
|
||||
}]
|
||||
|
||||
context, subject = build_context_and_subject(ValidatedAuthContext(user=user))
|
||||
token = generate_bearer_token(realapp.config['SERVER_HOSTNAME'], subject, context, access, 600,
|
||||
instance_keys)
|
||||
|
||||
headers = {
|
||||
'Authorization': 'Bearer %s' % token,
|
||||
}
|
||||
|
||||
# Conduct a call to prime the instance key and other caches.
|
||||
conduct_call(client, 'v2.write_manifest_by_digest', url_for, 'PUT', params, expected_code=202,
|
||||
headers=headers, raw_body=tag_manifest.json_data)
|
||||
|
||||
timecode = time.time()
|
||||
def get_time():
|
||||
return timecode + 10
|
||||
|
||||
with patch('time.time', get_time):
|
||||
# Necessary in order to have the tag updates not occur in the same second, which is the
|
||||
# granularity supported currently.
|
||||
with count_queries() as counter:
|
||||
conduct_call(client, 'v2.write_manifest_by_digest', url_for, 'PUT', params, expected_code=202,
|
||||
headers=headers, raw_body=tag_manifest.json_data)
|
||||
|
||||
assert counter.count <= 27
|
138
endpoints/v2/test/test_manifest_cornercases.py
Normal file
138
endpoints/v2/test/test_manifest_cornercases.py
Normal file
|
@ -0,0 +1,138 @@
|
|||
import hashlib
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
from app import storage, docker_v2_signing_key
|
||||
from data import model, database
|
||||
from data.registry_model import registry_model
|
||||
from endpoints.v2.manifest import _write_manifest
|
||||
from image.docker.schema1 import DockerSchema1ManifestBuilder
|
||||
|
||||
from test.fixtures import *
|
||||
|
||||
|
||||
ADMIN_ACCESS_USER = 'devtable'
|
||||
REPO = 'simple'
|
||||
FIRST_TAG = 'first'
|
||||
SECOND_TAG = 'second'
|
||||
THIRD_TAG = 'third'
|
||||
|
||||
|
||||
@contextmanager
|
||||
def set_tag_expiration_policy(namespace, expiration_s=0):
|
||||
namespace_user = model.user.get_user(namespace)
|
||||
model.user.change_user_tag_expiration(namespace_user, expiration_s)
|
||||
yield
|
||||
|
||||
|
||||
def _perform_cleanup():
|
||||
database.RepositoryTag.delete().where(database.RepositoryTag.hidden == True).execute()
|
||||
repo_object = model.repository.get_repository(ADMIN_ACCESS_USER, REPO)
|
||||
model.gc.garbage_collect_repo(repo_object)
|
||||
|
||||
|
||||
def test_missing_link(initialized_db):
|
||||
""" Tests for a corner case that could result in missing a link to a blob referenced by a
|
||||
manifest. The test exercises the case as follows:
|
||||
|
||||
1) Push a manifest of a single layer with a Docker ID `FIRST_ID`, pointing
|
||||
to blob `FIRST_BLOB`. The database should contain the tag referencing the layer, with
|
||||
no changed ID and the blob not being GCed.
|
||||
|
||||
2) Push a manifest of two layers:
|
||||
|
||||
Layer 1: `FIRST_ID` with blob `SECOND_BLOB`: Will result in a new synthesized ID
|
||||
Layer 2: `SECOND_ID` with blob `THIRD_BLOB`: Will result in `SECOND_ID` pointing to the
|
||||
`THIRD_BLOB`, with a parent pointing to the new synthesized ID's layer.
|
||||
|
||||
3) Push a manifest of two layers:
|
||||
|
||||
Layer 1: `THIRD_ID` with blob `FOURTH_BLOB`: Will result in a new `THIRD_ID` layer
|
||||
Layer 2: `FIRST_ID` with blob `THIRD_BLOB`: Since `FIRST_ID` already points to `SECOND_BLOB`,
|
||||
this will synthesize a new ID. With the current bug, the synthesized ID will match
|
||||
that of `SECOND_ID`, leaving `THIRD_ID` unlinked and therefore, after a GC, missing
|
||||
`FOURTH_BLOB`.
|
||||
"""
|
||||
with set_tag_expiration_policy('devtable', 0):
|
||||
location_name = storage.preferred_locations[0]
|
||||
location = database.ImageStorageLocation.get(name=location_name)
|
||||
|
||||
# Create first blob.
|
||||
first_blob_sha = 'sha256:' + hashlib.sha256("FIRST").hexdigest()
|
||||
model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, first_blob_sha, location, 0, 0, 0)
|
||||
|
||||
# Push the first manifest.
|
||||
first_manifest = (DockerSchema1ManifestBuilder(ADMIN_ACCESS_USER, REPO, FIRST_TAG)
|
||||
.add_layer(first_blob_sha, '{"id": "first"}')
|
||||
.build(docker_v2_signing_key))
|
||||
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, FIRST_TAG, first_manifest)
|
||||
|
||||
# Delete all temp tags and perform GC.
|
||||
_perform_cleanup()
|
||||
|
||||
# Ensure that the first blob still exists, along with the first tag.
|
||||
assert model.blob.get_repo_blob_by_digest(ADMIN_ACCESS_USER, REPO, first_blob_sha) is not None
|
||||
|
||||
repository_ref = registry_model.lookup_repository(ADMIN_ACCESS_USER, REPO)
|
||||
found_tag = registry_model.get_repo_tag(repository_ref, FIRST_TAG, include_legacy_image=True)
|
||||
assert found_tag is not None
|
||||
assert found_tag.legacy_image.docker_image_id == 'first'
|
||||
|
||||
# Create the second and third blobs.
|
||||
second_blob_sha = 'sha256:' + hashlib.sha256("SECOND").hexdigest()
|
||||
third_blob_sha = 'sha256:' + hashlib.sha256("THIRD").hexdigest()
|
||||
|
||||
model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, second_blob_sha, location, 0, 0, 0)
|
||||
model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, third_blob_sha, location, 0, 0, 0)
|
||||
|
||||
# Push the second manifest.
|
||||
second_manifest = (DockerSchema1ManifestBuilder(ADMIN_ACCESS_USER, REPO, SECOND_TAG)
|
||||
.add_layer(third_blob_sha, '{"id": "second", "parent": "first"}')
|
||||
.add_layer(second_blob_sha, '{"id": "first"}')
|
||||
.build(docker_v2_signing_key))
|
||||
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, SECOND_TAG, second_manifest)
|
||||
|
||||
# Delete all temp tags and perform GC.
|
||||
_perform_cleanup()
|
||||
|
||||
# Ensure that the first and second blobs still exists, along with the second tag.
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, first_blob_sha) is not None
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, second_blob_sha) is not None
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, third_blob_sha) is not None
|
||||
|
||||
found_tag = registry_model.get_repo_tag(repository_ref, FIRST_TAG, include_legacy_image=True)
|
||||
assert found_tag is not None
|
||||
assert found_tag.legacy_image.docker_image_id == 'first'
|
||||
|
||||
# Ensure the IDs have changed.
|
||||
found_tag = registry_model.get_repo_tag(repository_ref, SECOND_TAG, include_legacy_image=True)
|
||||
assert found_tag is not None
|
||||
assert found_tag.legacy_image.docker_image_id != 'second'
|
||||
|
||||
# Create the fourth blob.
|
||||
fourth_blob_sha = 'sha256:' + hashlib.sha256("FOURTH").hexdigest()
|
||||
model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, fourth_blob_sha, location, 0, 0, 0)
|
||||
|
||||
# Push the third manifest.
|
||||
third_manifest = (DockerSchema1ManifestBuilder(ADMIN_ACCESS_USER, REPO, THIRD_TAG)
|
||||
.add_layer(third_blob_sha, '{"id": "second", "parent": "first"}')
|
||||
.add_layer(fourth_blob_sha, '{"id": "first"}') # Note the change in BLOB from the second manifest.
|
||||
.build(docker_v2_signing_key))
|
||||
|
||||
_write_manifest(ADMIN_ACCESS_USER, REPO, THIRD_TAG, third_manifest)
|
||||
|
||||
# Delete all temp tags and perform GC.
|
||||
_perform_cleanup()
|
||||
|
||||
# Ensure all blobs are present.
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, first_blob_sha) is not None
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, second_blob_sha) is not None
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, third_blob_sha) is not None
|
||||
assert registry_model.get_repo_blob_by_digest(repository_ref, fourth_blob_sha) is not None
|
||||
|
||||
# Ensure new synthesized IDs were created.
|
||||
second_tag = registry_model.get_repo_tag(repository_ref, SECOND_TAG, include_legacy_image=True)
|
||||
third_tag = registry_model.get_repo_tag(repository_ref, THIRD_TAG, include_legacy_image=True)
|
||||
assert second_tag.legacy_image.docker_image_id != third_tag.legacy_image.docker_image_id
|
68
endpoints/v2/test/test_v2_tuf.py
Normal file
68
endpoints/v2/test/test_v2_tuf.py
Normal file
|
@ -0,0 +1,68 @@
|
|||
import pytest
|
||||
import flask
|
||||
|
||||
from flask_principal import Identity, Principal
|
||||
from mock import Mock
|
||||
|
||||
from auth import permissions
|
||||
from endpoints.v2.v2auth import _get_tuf_root
|
||||
from test import testconfig
|
||||
from util.security.registry_jwt import QUAY_TUF_ROOT, SIGNER_TUF_ROOT, DISABLED_TUF_ROOT
|
||||
|
||||
|
||||
|
||||
def admin_identity(namespace, reponame):
|
||||
identity = Identity('admin')
|
||||
identity.provides.add(permissions._RepositoryNeed(namespace, reponame, 'admin'))
|
||||
identity.provides.add(permissions._OrganizationRepoNeed(namespace, 'admin'))
|
||||
return identity
|
||||
|
||||
|
||||
def write_identity(namespace, reponame):
|
||||
identity = Identity('writer')
|
||||
identity.provides.add(permissions._RepositoryNeed(namespace, reponame, 'write'))
|
||||
identity.provides.add(permissions._OrganizationRepoNeed(namespace, 'write'))
|
||||
return identity
|
||||
|
||||
|
||||
def read_identity(namespace, reponame):
|
||||
identity = Identity('reader')
|
||||
identity.provides.add(permissions._RepositoryNeed(namespace, reponame, 'read'))
|
||||
identity.provides.add(permissions._OrganizationRepoNeed(namespace, 'read'))
|
||||
return identity
|
||||
|
||||
|
||||
def app_with_principal():
|
||||
app = flask.Flask(__name__)
|
||||
app.config.from_object(testconfig.TestConfig())
|
||||
principal = Principal(app)
|
||||
return app, principal
|
||||
|
||||
|
||||
@pytest.mark.parametrize('identity,expected', [
|
||||
(Identity('anon'), QUAY_TUF_ROOT),
|
||||
(read_identity("namespace", "repo"), QUAY_TUF_ROOT),
|
||||
(read_identity("different", "repo"), QUAY_TUF_ROOT),
|
||||
(admin_identity("different", "repo"), QUAY_TUF_ROOT),
|
||||
(write_identity("different", "repo"), QUAY_TUF_ROOT),
|
||||
(admin_identity("namespace", "repo"), SIGNER_TUF_ROOT),
|
||||
(write_identity("namespace", "repo"), SIGNER_TUF_ROOT),
|
||||
])
|
||||
def test_get_tuf_root(identity, expected):
|
||||
app, principal = app_with_principal()
|
||||
with app.test_request_context('/'):
|
||||
principal.set_identity(identity)
|
||||
actual = _get_tuf_root(Mock(), "namespace", "repo")
|
||||
assert actual == expected, "should be %s, but was %s" % (expected, actual)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('trust_enabled,tuf_root', [
|
||||
(True, QUAY_TUF_ROOT),
|
||||
(False, DISABLED_TUF_ROOT),
|
||||
])
|
||||
def test_trust_disabled(trust_enabled,tuf_root):
|
||||
app, principal = app_with_principal()
|
||||
with app.test_request_context('/'):
|
||||
principal.set_identity(read_identity("namespace", "repo"))
|
||||
actual = _get_tuf_root(Mock(trust_enabled=trust_enabled), "namespace", "repo")
|
||||
assert actual == tuf_root, "should be %s, but was %s" % (tuf_root, actual)
|
150
endpoints/v2/test/test_v2auth.py
Normal file
150
endpoints/v2/test/test_v2auth.py
Normal file
|
@ -0,0 +1,150 @@
|
|||
import base64
|
||||
|
||||
from flask import url_for
|
||||
|
||||
from app import instance_keys, app as original_app
|
||||
from data.model.user import regenerate_robot_token, get_robot_and_metadata, get_user
|
||||
from endpoints.test.shared import conduct_call
|
||||
from util.security.registry_jwt import decode_bearer_token, CLAIM_TUF_ROOTS
|
||||
|
||||
from test.fixtures import *
|
||||
|
||||
|
||||
def get_robot_password(username):
|
||||
parent_name, robot_shortname = username.split('+', 1)
|
||||
parent = get_user(parent_name)
|
||||
_, token, _ = get_robot_and_metadata(robot_shortname, parent)
|
||||
return token
|
||||
|
||||
|
||||
@pytest.mark.parametrize('scope, username, password, expected_code, expected_scopes', [
|
||||
# Invalid repository.
|
||||
('repository:devtable/simple/foo/bar/baz:pull', 'devtable', 'password', 400, []),
|
||||
|
||||
# Invalid scopes.
|
||||
('some_invalid_scope', 'devtable', 'password', 400, []),
|
||||
|
||||
# Invalid credentials.
|
||||
('repository:devtable/simple:pull', 'devtable', 'invalid', 401, []),
|
||||
|
||||
# Valid credentials.
|
||||
('repository:devtable/simple:pull', 'devtable', 'password', 200,
|
||||
['devtable/simple:pull']),
|
||||
|
||||
('repository:devtable/simple:push', 'devtable', 'password', 200,
|
||||
['devtable/simple:push']),
|
||||
|
||||
('repository:devtable/simple:pull,push', 'devtable', 'password', 200,
|
||||
['devtable/simple:push,pull']),
|
||||
|
||||
('repository:devtable/simple:pull,push,*', 'devtable', 'password', 200,
|
||||
['devtable/simple:push,pull,*']),
|
||||
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'devtable', 'password', 200,
|
||||
['buynlarge/orgrepo:push,pull,*']),
|
||||
|
||||
('', 'devtable', 'password', 200, []),
|
||||
|
||||
# No credentials, non-public repo.
|
||||
('repository:devtable/simple:pull', None, None, 200, ['devtable/simple:']),
|
||||
|
||||
# No credentials, public repo.
|
||||
('repository:public/publicrepo:pull', None, None, 200, ['public/publicrepo:pull']),
|
||||
|
||||
# Reader only.
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'reader', 'password', 200,
|
||||
['buynlarge/orgrepo:pull']),
|
||||
|
||||
# Unknown repository.
|
||||
('repository:devtable/unknownrepo:pull,push', 'devtable', 'password', 200,
|
||||
['devtable/unknownrepo:push,pull']),
|
||||
|
||||
# Unknown repository in another namespace.
|
||||
('repository:somenamespace/unknownrepo:pull,push', 'devtable', 'password', 200,
|
||||
['somenamespace/unknownrepo:']),
|
||||
|
||||
# Disabled namespace.
|
||||
(['repository:devtable/simple:pull,push', 'repository:disabled/complex:pull'],
|
||||
'devtable', 'password', 405,
|
||||
[]),
|
||||
|
||||
# Multiple scopes.
|
||||
(['repository:devtable/simple:pull,push', 'repository:devtable/complex:pull'],
|
||||
'devtable', 'password', 200,
|
||||
['devtable/simple:push,pull', 'devtable/complex:pull']),
|
||||
|
||||
# Multiple scopes with restricted behavior.
|
||||
(['repository:devtable/simple:pull,push', 'repository:public/publicrepo:pull,push'],
|
||||
'devtable', 'password', 200,
|
||||
['devtable/simple:push,pull', 'public/publicrepo:pull']),
|
||||
|
||||
(['repository:devtable/simple:pull,push,*', 'repository:public/publicrepo:pull,push,*'],
|
||||
'devtable', 'password', 200,
|
||||
['devtable/simple:push,pull,*', 'public/publicrepo:pull']),
|
||||
|
||||
# Read Only State
|
||||
('repository:devtable/readonly:pull,push,*', 'devtable', 'password', 200,
|
||||
['devtable/readonly:pull']),
|
||||
|
||||
# Mirror State as a typical User
|
||||
('repository:devtable/mirrored:pull,push,*', 'devtable', 'password', 200,
|
||||
['devtable/mirrored:pull']),
|
||||
|
||||
# Mirror State as the robot User should have write access
|
||||
('repository:devtable/mirrored:pull,push,*', 'devtable+dtrobot', get_robot_password, 200,
|
||||
['devtable/mirrored:push,pull']),
|
||||
|
||||
# Organization repository, org admin
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'devtable', 'password', 200,
|
||||
['buynlarge/orgrepo:push,pull,*']),
|
||||
|
||||
# Organization repository, org creator
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'creator', 'password', 200,
|
||||
['buynlarge/orgrepo:']),
|
||||
|
||||
# Organization repository, org reader
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'reader', 'password', 200,
|
||||
['buynlarge/orgrepo:pull']),
|
||||
|
||||
# Organization repository, freshuser
|
||||
('repository:buynlarge/orgrepo:pull,push,*', 'freshuser', 'password', 200,
|
||||
['buynlarge/orgrepo:']),
|
||||
])
|
||||
def test_generate_registry_jwt(scope, username, password, expected_code, expected_scopes,
|
||||
app, client):
|
||||
params = {
|
||||
'service': original_app.config['SERVER_HOSTNAME'],
|
||||
'scope': scope,
|
||||
}
|
||||
|
||||
if callable(password):
|
||||
password = password(username)
|
||||
|
||||
headers = {}
|
||||
if username and password:
|
||||
headers['Authorization'] = 'Basic %s' % (base64.b64encode('%s:%s' % (username, password)))
|
||||
|
||||
resp = conduct_call(client, 'v2.generate_registry_jwt', url_for, 'GET', params, {}, expected_code,
|
||||
headers=headers)
|
||||
if expected_code != 200:
|
||||
return
|
||||
|
||||
token = resp.json['token']
|
||||
decoded = decode_bearer_token(token, instance_keys, original_app.config)
|
||||
assert decoded['iss'] == 'quay'
|
||||
assert decoded['aud'] == original_app.config['SERVER_HOSTNAME']
|
||||
assert decoded['sub'] == username if username else '(anonymous)'
|
||||
|
||||
expected_access = []
|
||||
for scope in expected_scopes:
|
||||
name, actions_str = scope.split(':')
|
||||
actions = actions_str.split(',') if actions_str else []
|
||||
|
||||
expected_access.append({
|
||||
'type': 'repository',
|
||||
'name': name,
|
||||
'actions': actions,
|
||||
})
|
||||
|
||||
assert decoded['access'] == expected_access
|
||||
assert len(decoded['context'][CLAIM_TUF_ROOTS]) == len(expected_scopes)
|
261
endpoints/v2/v2auth.py
Normal file
261
endpoints/v2/v2auth.py
Normal file
|
@ -0,0 +1,261 @@
|
|||
import logging
|
||||
import re
|
||||
|
||||
from collections import namedtuple
|
||||
from cachetools.func import lru_cache
|
||||
from flask import request, jsonify
|
||||
|
||||
import features
|
||||
from app import app, userevents, instance_keys
|
||||
from auth.auth_context import get_authenticated_context, get_authenticated_user
|
||||
from auth.decorators import process_basic_auth
|
||||
from auth.permissions import (ModifyRepositoryPermission, ReadRepositoryPermission,
|
||||
CreateRepositoryPermission, AdministerRepositoryPermission)
|
||||
from data import model
|
||||
from data.database import RepositoryState
|
||||
from data.registry_model import registry_model
|
||||
from data.registry_model.datatypes import RepositoryReference
|
||||
from data.model.repo_mirror import get_mirroring_robot
|
||||
from endpoints.decorators import anon_protect
|
||||
from endpoints.v2 import v2_bp
|
||||
from endpoints.v2.errors import (InvalidLogin, NameInvalid, InvalidRequest, Unsupported,
|
||||
Unauthorized, NamespaceDisabled)
|
||||
from util.cache import no_cache
|
||||
from util.names import parse_namespace_repository, REPOSITORY_NAME_REGEX
|
||||
from util.security.registry_jwt import (generate_bearer_token, build_context_and_subject,
|
||||
QUAY_TUF_ROOT, SIGNER_TUF_ROOT, DISABLED_TUF_ROOT)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TOKEN_VALIDITY_LIFETIME_S = 60 * 60 # 1 hour
|
||||
SCOPE_REGEX_TEMPLATE = r'^repository:((?:{}\/)?((?:[\.a-zA-Z0-9_\-]+\/)*[\.a-zA-Z0-9_\-]+)):((?:push|pull|\*)(?:,(?:push|pull|\*))*)$'
|
||||
|
||||
scopeResult = namedtuple('scopeResult', ['actions', 'namespace', 'repository', 'registry_and_repo',
|
||||
'tuf_root'])
|
||||
|
||||
@v2_bp.route('/auth')
|
||||
@process_basic_auth
|
||||
@no_cache
|
||||
@anon_protect
|
||||
def generate_registry_jwt(auth_result):
|
||||
"""
|
||||
This endpoint will generate a JWT conforming to the Docker Registry v2 Auth Spec:
|
||||
https://docs.docker.com/registry/spec/auth/token/
|
||||
"""
|
||||
audience_param = request.args.get('service')
|
||||
logger.debug('Request audience: %s', audience_param)
|
||||
|
||||
scope_params = request.args.getlist('scope') or []
|
||||
logger.debug('Scope request: %s', scope_params)
|
||||
|
||||
auth_header = request.headers.get('authorization', '')
|
||||
auth_credentials_sent = bool(auth_header)
|
||||
|
||||
# Load the auth context and verify thatg we've directly received credentials.
|
||||
has_valid_auth_context = False
|
||||
if get_authenticated_context():
|
||||
has_valid_auth_context = not get_authenticated_context().is_anonymous
|
||||
|
||||
if auth_credentials_sent and not has_valid_auth_context:
|
||||
# The auth credentials sent for the user are invalid.
|
||||
raise InvalidLogin(auth_result.error_message)
|
||||
|
||||
if not has_valid_auth_context and len(scope_params) == 0:
|
||||
# In this case, we are doing an auth flow, and it's not an anonymous pull.
|
||||
logger.debug('No user and no token sent for empty scope list')
|
||||
raise Unauthorized()
|
||||
|
||||
# Build the access list for the authenticated context.
|
||||
access = []
|
||||
scope_results = []
|
||||
for scope_param in scope_params:
|
||||
scope_result = _authorize_or_downscope_request(scope_param, has_valid_auth_context)
|
||||
if scope_result is None:
|
||||
continue
|
||||
|
||||
scope_results.append(scope_result)
|
||||
access.append({
|
||||
'type': 'repository',
|
||||
'name': scope_result.registry_and_repo,
|
||||
'actions': scope_result.actions,
|
||||
})
|
||||
|
||||
# Issue user events.
|
||||
user_event_data = {
|
||||
'action': 'login',
|
||||
}
|
||||
|
||||
# Set the user event data for when authed.
|
||||
if len(scope_results) > 0:
|
||||
if 'push' in scope_results[0].actions:
|
||||
user_action = 'push_start'
|
||||
elif 'pull' in scope_results[0].actions:
|
||||
user_action = 'pull_start'
|
||||
else:
|
||||
user_action = 'login'
|
||||
|
||||
user_event_data = {
|
||||
'action': user_action,
|
||||
'namespace': scope_results[0].namespace,
|
||||
'repository': scope_results[0].repository,
|
||||
}
|
||||
|
||||
# Send the user event.
|
||||
if get_authenticated_user() is not None:
|
||||
event = userevents.get_event(get_authenticated_user().username)
|
||||
event.publish_event_data('docker-cli', user_event_data)
|
||||
|
||||
# Build the signed JWT.
|
||||
tuf_roots = {'%s/%s' % (scope_result.namespace, scope_result.repository): scope_result.tuf_root
|
||||
for scope_result in scope_results}
|
||||
context, subject = build_context_and_subject(get_authenticated_context(), tuf_roots=tuf_roots)
|
||||
token = generate_bearer_token(audience_param, subject, context, access,
|
||||
TOKEN_VALIDITY_LIFETIME_S, instance_keys)
|
||||
return jsonify({'token': token})
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def _get_scope_regex():
|
||||
hostname = re.escape(app.config['SERVER_HOSTNAME'])
|
||||
scope_regex_string = SCOPE_REGEX_TEMPLATE.format(hostname)
|
||||
return re.compile(scope_regex_string)
|
||||
|
||||
|
||||
def _get_tuf_root(repository_ref, namespace, reponame):
|
||||
if not features.SIGNING or repository_ref is None or not repository_ref.trust_enabled:
|
||||
return DISABLED_TUF_ROOT
|
||||
|
||||
# Users with write access to a repository will see signer-rooted TUF metadata
|
||||
if ModifyRepositoryPermission(namespace, reponame).can():
|
||||
return SIGNER_TUF_ROOT
|
||||
return QUAY_TUF_ROOT
|
||||
|
||||
|
||||
def _authorize_or_downscope_request(scope_param, has_valid_auth_context):
|
||||
# TODO: The complexity of this function is difficult to follow and maintain. Refactor/Cleanup.
|
||||
if len(scope_param) == 0:
|
||||
if not has_valid_auth_context:
|
||||
# In this case, we are doing an auth flow, and it's not an anonymous pull.
|
||||
logger.debug('No user and no token sent for empty scope list')
|
||||
raise Unauthorized()
|
||||
|
||||
return None
|
||||
|
||||
match = _get_scope_regex().match(scope_param)
|
||||
if match is None:
|
||||
logger.debug('Match: %s', match)
|
||||
logger.debug('len: %s', len(scope_param))
|
||||
logger.warning('Unable to decode repository and actions: %s', scope_param)
|
||||
raise InvalidRequest('Unable to decode repository and actions: %s' % scope_param)
|
||||
|
||||
logger.debug('Match: %s', match.groups())
|
||||
|
||||
registry_and_repo = match.group(1)
|
||||
namespace_and_repo = match.group(2)
|
||||
requested_actions = match.group(3).split(',')
|
||||
|
||||
lib_namespace = app.config['LIBRARY_NAMESPACE']
|
||||
namespace, reponame = parse_namespace_repository(namespace_and_repo, lib_namespace)
|
||||
|
||||
# Ensure that we are never creating an invalid repository.
|
||||
if not REPOSITORY_NAME_REGEX.match(reponame):
|
||||
logger.debug('Found invalid repository name in auth flow: %s', reponame)
|
||||
if len(namespace_and_repo.split('/')) > 1:
|
||||
msg = 'Nested repositories are not supported. Found: %s' % namespace_and_repo
|
||||
raise NameInvalid(message=msg)
|
||||
|
||||
raise NameInvalid(message='Invalid repository name: %s' % namespace_and_repo)
|
||||
|
||||
# Ensure the namespace is enabled.
|
||||
if registry_model.is_existing_disabled_namespace(namespace):
|
||||
msg = 'Namespace %s has been disabled. Please contact a system administrator.' % namespace
|
||||
raise NamespaceDisabled(message=msg)
|
||||
|
||||
final_actions = []
|
||||
|
||||
repository_ref = registry_model.lookup_repository(namespace, reponame)
|
||||
repo_is_public = repository_ref is not None and repository_ref.is_public
|
||||
invalid_repo_message = ''
|
||||
if repository_ref is not None and repository_ref.kind != 'image':
|
||||
invalid_repo_message = ((
|
||||
'This repository is for managing %s ' + 'and not container images.') % repository_ref.kind)
|
||||
|
||||
if 'push' in requested_actions:
|
||||
# Check if there is a valid user or token, as otherwise the repository cannot be
|
||||
# accessed.
|
||||
if has_valid_auth_context:
|
||||
user = get_authenticated_user()
|
||||
|
||||
# Lookup the repository. If it exists, make sure the entity has modify
|
||||
# permission. Otherwise, make sure the entity has create permission.
|
||||
if repository_ref:
|
||||
if ModifyRepositoryPermission(namespace, reponame).can():
|
||||
if repository_ref is not None and repository_ref.kind != 'image':
|
||||
raise Unsupported(message=invalid_repo_message)
|
||||
|
||||
# Check for different repository states.
|
||||
if repository_ref.state == RepositoryState.NORMAL:
|
||||
# In NORMAL mode, if the user has permission, then they can push.
|
||||
final_actions.append('push')
|
||||
elif repository_ref.state == RepositoryState.MIRROR:
|
||||
# In MIRROR mode, only the mirroring robot can push.
|
||||
mirror = model.repo_mirror.get_mirror(repository_ref.id)
|
||||
robot = mirror.internal_robot if mirror is not None else None
|
||||
if robot is not None and user is not None and robot == user:
|
||||
assert robot.robot
|
||||
final_actions.append('push')
|
||||
else:
|
||||
logger.debug('Repository %s/%s push requested for non-mirror robot %s: %s', namespace,
|
||||
reponame, robot, user)
|
||||
elif repository_ref.state == RepositoryState.READ_ONLY:
|
||||
# No pushing allowed in read-only state.
|
||||
pass
|
||||
else:
|
||||
logger.warning('Unknown state for repository %s: %s', repository_ref, repository_ref.state)
|
||||
else:
|
||||
logger.debug('No permission to modify repository %s/%s', namespace, reponame)
|
||||
else:
|
||||
# TODO: Push-to-create functionality should be configurable
|
||||
if CreateRepositoryPermission(namespace).can() and user is not None:
|
||||
logger.debug('Creating repository: %s/%s', namespace, reponame)
|
||||
repository_ref = RepositoryReference.for_repo_obj(
|
||||
model.repository.create_repository(namespace, reponame, user))
|
||||
final_actions.append('push')
|
||||
else:
|
||||
logger.debug('No permission to create repository %s/%s', namespace, reponame)
|
||||
|
||||
if 'pull' in requested_actions:
|
||||
# Grant pull if the user can read the repo or it is public.
|
||||
if ReadRepositoryPermission(namespace, reponame).can() or repo_is_public:
|
||||
if repository_ref is not None and repository_ref.kind != 'image':
|
||||
raise Unsupported(message=invalid_repo_message)
|
||||
|
||||
final_actions.append('pull')
|
||||
else:
|
||||
logger.debug('No permission to pull repository %s/%s', namespace, reponame)
|
||||
|
||||
if '*' in requested_actions:
|
||||
# Grant * user is admin
|
||||
if AdministerRepositoryPermission(namespace, reponame).can():
|
||||
if repository_ref is not None and repository_ref.kind != 'image':
|
||||
raise Unsupported(message=invalid_repo_message)
|
||||
|
||||
if repository_ref and repository_ref.state in (RepositoryState.MIRROR,
|
||||
RepositoryState.READ_ONLY):
|
||||
logger.debug('No permission to administer repository %s/%s', namespace, reponame)
|
||||
else:
|
||||
assert repository_ref.state == RepositoryState.NORMAL
|
||||
final_actions.append('*')
|
||||
else:
|
||||
logger.debug("No permission to administer repository %s/%s", namespace, reponame)
|
||||
|
||||
# Final sanity checks.
|
||||
if 'push' in final_actions:
|
||||
assert repository_ref.state != RepositoryState.READ_ONLY
|
||||
|
||||
if '*' in final_actions:
|
||||
assert repository_ref.state == RepositoryState.NORMAL
|
||||
|
||||
return scopeResult(actions=final_actions, namespace=namespace, repository=reponame,
|
||||
registry_and_repo=registry_and_repo,
|
||||
tuf_root=_get_tuf_root(repository_ref, namespace, reponame))
|
Reference in a new issue