initial import for Open Source 🎉
This commit is contained in:
parent
1898c361f3
commit
9c0dd3b722
2048 changed files with 218743 additions and 0 deletions
76
endpoints/v1/__init__.py
Normal file
76
endpoints/v1/__init__.py
Normal file
|
@ -0,0 +1,76 @@
|
|||
import logging
|
||||
|
||||
from functools import wraps
|
||||
|
||||
from flask import Blueprint, make_response, jsonify
|
||||
|
||||
import features
|
||||
|
||||
from app import metric_queue, app
|
||||
from data.readreplica import ReadOnlyModeException
|
||||
from endpoints.decorators import anon_protect, anon_allowed
|
||||
from util.metrics.metricqueue import time_blueprint
|
||||
from util.http import abort
|
||||
|
||||
v1_bp = Blueprint('v1', __name__)
|
||||
time_blueprint(v1_bp, metric_queue)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Note: This is *not* part of the Docker index spec. This is here for our own health check,
|
||||
# since we have nginx handle the _ping below.
|
||||
@v1_bp.route('/_internal_ping')
|
||||
@anon_allowed
|
||||
def internal_ping():
|
||||
return make_response('true', 200)
|
||||
|
||||
|
||||
@v1_bp.route('/_ping')
|
||||
@anon_allowed
|
||||
def ping():
|
||||
# NOTE: any changes made here must also be reflected in the nginx config
|
||||
response = make_response('true', 200)
|
||||
response.headers['X-Docker-Registry-Version'] = '0.6.0'
|
||||
response.headers['X-Docker-Registry-Standalone'] = '0'
|
||||
return response
|
||||
|
||||
|
||||
@v1_bp.app_errorhandler(ReadOnlyModeException)
|
||||
def handle_readonly(ex):
|
||||
response = jsonify({'message': 'System is currently read-only. Pulls will succeed but all ' +
|
||||
'write operations are currently suspended.',
|
||||
'is_readonly': True})
|
||||
response.status_code = 503
|
||||
return response
|
||||
|
||||
|
||||
def check_v1_push_enabled(namespace_name_kwarg='namespace_name'):
|
||||
""" Decorator which checks if V1 push is enabled for the current namespace. The first argument
|
||||
to the wrapped function must be the namespace name or there must be a kwarg with the
|
||||
name `namespace_name`.
|
||||
"""
|
||||
def wrapper(wrapped):
|
||||
@wraps(wrapped)
|
||||
def decorated(*args, **kwargs):
|
||||
if namespace_name_kwarg in kwargs:
|
||||
namespace_name = kwargs[namespace_name_kwarg]
|
||||
else:
|
||||
namespace_name = args[0]
|
||||
|
||||
if features.RESTRICTED_V1_PUSH:
|
||||
whitelist = app.config.get('V1_PUSH_WHITELIST') or []
|
||||
logger.debug('V1 push is restricted to whitelist: %s', whitelist)
|
||||
if namespace_name not in whitelist:
|
||||
abort(405,
|
||||
message=('V1 push support has been deprecated. To enable for this ' +
|
||||
'namespace, please contact support.'))
|
||||
|
||||
return wrapped(*args, **kwargs)
|
||||
return decorated
|
||||
return wrapper
|
||||
|
||||
|
||||
from endpoints.v1 import (
|
||||
index,
|
||||
registry,
|
||||
tag,)
|
388
endpoints/v1/index.py
Normal file
388
endpoints/v1/index.py
Normal file
|
@ -0,0 +1,388 @@
|
|||
import json
|
||||
import logging
|
||||
import urlparse
|
||||
|
||||
from functools import wraps
|
||||
|
||||
from flask import request, make_response, jsonify, session
|
||||
|
||||
from app import userevents, metric_queue, storage, docker_v2_signing_key
|
||||
from auth.auth_context import get_authenticated_context, get_authenticated_user
|
||||
from auth.credentials import validate_credentials, CredentialKind
|
||||
from auth.decorators import process_auth
|
||||
from auth.permissions import (
|
||||
ModifyRepositoryPermission, UserAdminPermission, ReadRepositoryPermission,
|
||||
CreateRepositoryPermission, repository_read_grant, repository_write_grant)
|
||||
from auth.signedgrant import generate_signed_token
|
||||
from data import model
|
||||
from data.registry_model import registry_model
|
||||
from data.registry_model.manifestbuilder import create_manifest_builder, lookup_manifest_builder
|
||||
from endpoints.decorators import (anon_protect, anon_allowed, parse_repository_name,
|
||||
check_repository_state, check_readonly)
|
||||
from endpoints.v1 import v1_bp, check_v1_push_enabled
|
||||
from notifications import spawn_notification
|
||||
from util.audit import track_and_log
|
||||
from util.http import abort
|
||||
from util.names import REPOSITORY_NAME_REGEX
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GrantType(object):
|
||||
READ_REPOSITORY = 'read'
|
||||
WRITE_REPOSITORY = 'write'
|
||||
|
||||
|
||||
def ensure_namespace_enabled(f):
|
||||
@wraps(f)
|
||||
def wrapper(namespace_name, repo_name, *args, **kwargs):
|
||||
namespace = model.user.get_namespace_user(namespace_name)
|
||||
is_namespace_enabled = namespace is not None and namespace.enabled
|
||||
if not is_namespace_enabled:
|
||||
abort(400, message='Namespace is disabled. Please contact your system administrator.')
|
||||
|
||||
return f(namespace_name, repo_name, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def generate_headers(scope=GrantType.READ_REPOSITORY, add_grant_for_status=None):
|
||||
def decorator_method(f):
|
||||
@wraps(f)
|
||||
def wrapper(namespace_name, repo_name, *args, **kwargs):
|
||||
response = f(namespace_name, repo_name, *args, **kwargs)
|
||||
|
||||
# Setting session namespace and repository
|
||||
session['namespace'] = namespace_name
|
||||
session['repository'] = repo_name
|
||||
|
||||
# We run our index and registry on the same hosts for now
|
||||
registry_server = urlparse.urlparse(request.url).netloc
|
||||
response.headers['X-Docker-Endpoints'] = registry_server
|
||||
|
||||
has_token_request = request.headers.get('X-Docker-Token', '')
|
||||
force_grant = (add_grant_for_status == response.status_code)
|
||||
|
||||
if has_token_request or force_grant:
|
||||
grants = []
|
||||
|
||||
if scope == GrantType.READ_REPOSITORY:
|
||||
if force_grant or ReadRepositoryPermission(namespace_name, repo_name).can():
|
||||
grants.append(repository_read_grant(namespace_name, repo_name))
|
||||
elif scope == GrantType.WRITE_REPOSITORY:
|
||||
if force_grant or ModifyRepositoryPermission(namespace_name, repo_name).can():
|
||||
grants.append(repository_write_grant(namespace_name, repo_name))
|
||||
|
||||
# Generate a signed token for the user (if any) and the grants (if any)
|
||||
if grants or get_authenticated_user():
|
||||
user_context = get_authenticated_user() and get_authenticated_user().username
|
||||
signature = generate_signed_token(grants, user_context)
|
||||
response.headers['WWW-Authenticate'] = signature
|
||||
response.headers['X-Docker-Token'] = signature
|
||||
|
||||
return response
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator_method
|
||||
|
||||
|
||||
@v1_bp.route('/users', methods=['POST'])
|
||||
@v1_bp.route('/users/', methods=['POST'])
|
||||
@anon_allowed
|
||||
@check_readonly
|
||||
def create_user():
|
||||
user_data = request.get_json()
|
||||
if not user_data or not 'username' in user_data:
|
||||
abort(400, 'Missing username')
|
||||
|
||||
username = user_data['username']
|
||||
password = user_data.get('password', '')
|
||||
|
||||
# UGH! we have to use this response when the login actually worked, in order
|
||||
# to get the CLI to try again with a get, and then tell us login succeeded.
|
||||
success = make_response('"Username or email already exists"', 400)
|
||||
result, kind = validate_credentials(username, password)
|
||||
if not result.auth_valid:
|
||||
if kind == CredentialKind.token:
|
||||
abort(400, 'Invalid access token.', issue='invalid-access-token')
|
||||
|
||||
if kind == CredentialKind.robot:
|
||||
abort(400, 'Invalid robot account or password.', issue='robot-login-failure')
|
||||
|
||||
if kind == CredentialKind.oauth_token:
|
||||
abort(400, 'Invalid oauth access token.', issue='invalid-oauth-access-token')
|
||||
|
||||
if kind == CredentialKind.user:
|
||||
# Mark that the login failed.
|
||||
event = userevents.get_event(username)
|
||||
event.publish_event_data('docker-cli', {'action': 'loginfailure'})
|
||||
abort(400, result.error_message, issue='login-failure')
|
||||
|
||||
# Default case: Just fail.
|
||||
abort(400, result.error_message, issue='login-failure')
|
||||
|
||||
if result.has_nonrobot_user:
|
||||
# Mark that the user was logged in.
|
||||
event = userevents.get_event(username)
|
||||
event.publish_event_data('docker-cli', {'action': 'login'})
|
||||
|
||||
return success
|
||||
|
||||
|
||||
@v1_bp.route('/users', methods=['GET'])
|
||||
@v1_bp.route('/users/', methods=['GET'])
|
||||
@process_auth
|
||||
@anon_allowed
|
||||
def get_user():
|
||||
context = get_authenticated_context()
|
||||
if not context or context.is_anonymous:
|
||||
abort(404)
|
||||
|
||||
return jsonify({
|
||||
'username': context.credential_username,
|
||||
'email': None,
|
||||
})
|
||||
|
||||
|
||||
@v1_bp.route('/users/<username>/', methods=['PUT'])
|
||||
@process_auth
|
||||
@anon_allowed
|
||||
@check_readonly
|
||||
def update_user(username):
|
||||
permission = UserAdminPermission(username)
|
||||
if permission.can():
|
||||
update_request = request.get_json()
|
||||
|
||||
if 'password' in update_request:
|
||||
logger.debug('Updating user password')
|
||||
model.user.change_password(get_authenticated_user(), update_request['password'])
|
||||
|
||||
return jsonify({
|
||||
'username': get_authenticated_user().username,
|
||||
'email': get_authenticated_user().email,
|
||||
})
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@v1_bp.route('/repositories/<repopath:repository>/', methods=['PUT'])
|
||||
@process_auth
|
||||
@parse_repository_name()
|
||||
@check_v1_push_enabled()
|
||||
@ensure_namespace_enabled
|
||||
@check_repository_state
|
||||
@generate_headers(scope=GrantType.WRITE_REPOSITORY, add_grant_for_status=201)
|
||||
@anon_allowed
|
||||
@check_readonly
|
||||
def create_repository(namespace_name, repo_name):
|
||||
# Verify that the repository name is valid.
|
||||
if not REPOSITORY_NAME_REGEX.match(repo_name):
|
||||
abort(400, message='Invalid repository name. Repository names cannot contain slashes.')
|
||||
|
||||
logger.debug('Looking up repository %s/%s', namespace_name, repo_name)
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
|
||||
if repository_ref is None and get_authenticated_user() is None:
|
||||
logger.debug('Attempt to create repository %s/%s without user auth', namespace_name, repo_name)
|
||||
abort(401,
|
||||
message='Cannot create a repository as a guest. Please login via "docker login" first.',
|
||||
issue='no-login')
|
||||
elif repository_ref:
|
||||
modify_perm = ModifyRepositoryPermission(namespace_name, repo_name)
|
||||
if not modify_perm.can():
|
||||
abort(403,
|
||||
message='You do not have permission to modify repository %(namespace)s/%(repository)s',
|
||||
issue='no-repo-write-permission', namespace=namespace_name, repository=repo_name)
|
||||
elif repository_ref.kind != 'image':
|
||||
msg = ('This repository is for managing %s resources and not container images.' %
|
||||
repository_ref.kind)
|
||||
abort(405, message=msg, namespace=namespace_name)
|
||||
else:
|
||||
create_perm = CreateRepositoryPermission(namespace_name)
|
||||
if not create_perm.can():
|
||||
logger.warning('Attempt to create a new repo %s/%s with insufficient perms', namespace_name,
|
||||
repo_name)
|
||||
msg = 'You do not have permission to create repositories in namespace "%(namespace)s"'
|
||||
abort(403, message=msg, issue='no-create-permission', namespace=namespace_name)
|
||||
|
||||
# Attempt to create the new repository.
|
||||
logger.debug('Creating repository %s/%s with owner: %s', namespace_name, repo_name,
|
||||
get_authenticated_user().username)
|
||||
|
||||
repository_ref = model.repository.create_repository(namespace_name, repo_name,
|
||||
get_authenticated_user())
|
||||
|
||||
if get_authenticated_user():
|
||||
user_event_data = {
|
||||
'action': 'push_start',
|
||||
'repository': repo_name,
|
||||
'namespace': namespace_name,
|
||||
}
|
||||
|
||||
event = userevents.get_event(get_authenticated_user().username)
|
||||
event.publish_event_data('docker-cli', user_event_data)
|
||||
|
||||
# Start a new builder for the repository and save its ID in the session.
|
||||
assert repository_ref
|
||||
builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key)
|
||||
logger.debug('Started repo push with manifest builder %s', builder)
|
||||
if builder is None:
|
||||
abort(404, message='Unknown repository', issue='unknown-repo')
|
||||
|
||||
session['manifest_builder'] = builder.builder_id
|
||||
return make_response('Created', 201)
|
||||
|
||||
|
||||
@v1_bp.route('/repositories/<repopath:repository>/images', methods=['PUT'])
|
||||
@process_auth
|
||||
@parse_repository_name()
|
||||
@check_v1_push_enabled()
|
||||
@ensure_namespace_enabled
|
||||
@check_repository_state
|
||||
@generate_headers(scope=GrantType.WRITE_REPOSITORY)
|
||||
@anon_allowed
|
||||
@check_readonly
|
||||
def update_images(namespace_name, repo_name):
|
||||
permission = ModifyRepositoryPermission(namespace_name, repo_name)
|
||||
|
||||
if permission.can():
|
||||
logger.debug('Looking up repository')
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name,
|
||||
kind_filter='image')
|
||||
if repository_ref is None:
|
||||
# Make sure the repo actually exists.
|
||||
abort(404, message='Unknown repository', issue='unknown-repo')
|
||||
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), storage,
|
||||
docker_v2_signing_key)
|
||||
if builder is None:
|
||||
abort(400)
|
||||
|
||||
# Generate a job for each notification that has been added to this repo
|
||||
logger.debug('Adding notifications for repository')
|
||||
event_data = {
|
||||
'updated_tags': [tag.name for tag in builder.committed_tags],
|
||||
}
|
||||
|
||||
builder.done()
|
||||
|
||||
track_and_log('push_repo', repository_ref)
|
||||
spawn_notification(repository_ref, 'repo_push', event_data)
|
||||
metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, 'v1', True])
|
||||
return make_response('Updated', 204)
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@v1_bp.route('/repositories/<repopath:repository>/images', methods=['GET'])
|
||||
@process_auth
|
||||
@parse_repository_name()
|
||||
@ensure_namespace_enabled
|
||||
@generate_headers(scope=GrantType.READ_REPOSITORY)
|
||||
@anon_protect
|
||||
def get_repository_images(namespace_name, repo_name):
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name,
|
||||
kind_filter='image')
|
||||
|
||||
permission = ReadRepositoryPermission(namespace_name, repo_name)
|
||||
if permission.can() or (repository_ref and repository_ref.is_public):
|
||||
# We can't rely on permissions to tell us if a repo exists anymore
|
||||
if repository_ref is None:
|
||||
abort(404, message='Unknown repository', issue='unknown-repo')
|
||||
|
||||
logger.debug('Building repository image response')
|
||||
resp = make_response(json.dumps([]), 200)
|
||||
resp.mimetype = 'application/json'
|
||||
|
||||
track_and_log('pull_repo', repository_ref,
|
||||
analytics_name='pull_repo_100x',
|
||||
analytics_sample=0.01)
|
||||
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v1', True])
|
||||
return resp
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@v1_bp.route('/repositories/<repopath:repository>/images', methods=['DELETE'])
|
||||
@process_auth
|
||||
@parse_repository_name()
|
||||
@check_v1_push_enabled()
|
||||
@ensure_namespace_enabled
|
||||
@check_repository_state
|
||||
@generate_headers(scope=GrantType.WRITE_REPOSITORY)
|
||||
@anon_allowed
|
||||
@check_readonly
|
||||
def delete_repository_images(namespace_name, repo_name):
|
||||
abort(501, 'Not Implemented', issue='not-implemented')
|
||||
|
||||
|
||||
@v1_bp.route('/repositories/<repopath:repository>/auth', methods=['PUT'])
|
||||
@parse_repository_name()
|
||||
@check_v1_push_enabled()
|
||||
@ensure_namespace_enabled
|
||||
@check_repository_state
|
||||
@anon_allowed
|
||||
@check_readonly
|
||||
def put_repository_auth(namespace_name, repo_name):
|
||||
abort(501, 'Not Implemented', issue='not-implemented')
|
||||
|
||||
|
||||
@v1_bp.route('/search', methods=['GET'])
|
||||
@process_auth
|
||||
@anon_protect
|
||||
def get_search():
|
||||
query = request.args.get('q') or ''
|
||||
|
||||
try:
|
||||
limit = min(100, max(1, int(request.args.get('n', 25))))
|
||||
except ValueError:
|
||||
limit = 25
|
||||
|
||||
try:
|
||||
page = max(0, int(request.args.get('page', 1)))
|
||||
except ValueError:
|
||||
page = 1
|
||||
|
||||
username = None
|
||||
user = get_authenticated_user()
|
||||
if user is not None:
|
||||
username = user.username
|
||||
|
||||
data = _conduct_repo_search(username, query, limit, page)
|
||||
resp = make_response(json.dumps(data), 200)
|
||||
resp.mimetype = 'application/json'
|
||||
return resp
|
||||
|
||||
|
||||
def _conduct_repo_search(username, query, limit=25, page=1):
|
||||
""" Finds matching repositories. """
|
||||
# Note that we put a maximum limit of five pages here, because this API should only really ever
|
||||
# be used by the Docker CLI, and it doesn't even paginate.
|
||||
page = min(page, 5)
|
||||
offset = (page - 1) * limit
|
||||
|
||||
if query:
|
||||
matching_repos = model.repository.get_filtered_matching_repositories(query,
|
||||
filter_username=username,
|
||||
offset=offset,
|
||||
limit=limit + 1)
|
||||
else:
|
||||
matching_repos = []
|
||||
|
||||
results = []
|
||||
for repo in matching_repos[0:limit]:
|
||||
results.append({
|
||||
'name': repo.namespace_user.username + '/' + repo.name,
|
||||
'description': repo.description,
|
||||
'is_public': model.repository.is_repository_public(repo),
|
||||
'href': '/repository/' + repo.namespace_user.username + '/' + repo.name
|
||||
})
|
||||
|
||||
# Defined: https://docs.docker.com/v1.6/reference/api/registry_api/
|
||||
return {
|
||||
'query': query,
|
||||
'num_results': len(results),
|
||||
'num_pages': page + 1 if len(matching_repos) > limit else page,
|
||||
'page': page,
|
||||
'page_size': limit,
|
||||
'results': results,
|
||||
}
|
407
endpoints/v1/registry.py
Normal file
407
endpoints/v1/registry.py
Normal file
|
@ -0,0 +1,407 @@
|
|||
import logging
|
||||
import json
|
||||
|
||||
from functools import wraps
|
||||
from datetime import datetime
|
||||
from time import time
|
||||
|
||||
from flask import make_response, request, session, Response, redirect, abort as flask_abort
|
||||
|
||||
from app import storage as store, app, docker_v2_signing_key, metric_queue
|
||||
from auth.auth_context import get_authenticated_user
|
||||
from auth.decorators import extract_namespace_repo_from_session, process_auth
|
||||
from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission)
|
||||
from data import database
|
||||
from data.registry_model import registry_model
|
||||
from data.registry_model.blobuploader import upload_blob, BlobUploadSettings, BlobUploadException
|
||||
from data.registry_model.manifestbuilder import lookup_manifest_builder
|
||||
from digest import checksums
|
||||
from endpoints.v1 import v1_bp, check_v1_push_enabled
|
||||
from endpoints.v1.index import ensure_namespace_enabled
|
||||
from endpoints.decorators import (anon_protect, check_region_blacklisted, check_repository_state,
|
||||
check_readonly)
|
||||
from util.http import abort, exact_abort
|
||||
from util.registry.replication import queue_storage_replication
|
||||
from util.request import get_request_ip
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def require_completion(f):
|
||||
""" This make sure that the image push correctly finished. """
|
||||
@wraps(f)
|
||||
def wrapper(namespace, repository, *args, **kwargs):
|
||||
image_id = kwargs['image_id']
|
||||
repository_ref = registry_model.lookup_repository(namespace, repository)
|
||||
if repository_ref is not None:
|
||||
legacy_image = registry_model.get_legacy_image(repository_ref, image_id)
|
||||
if legacy_image is not None and legacy_image.uploading:
|
||||
abort(400, 'Image %(image_id)s is being uploaded, retry later', issue='upload-in-progress',
|
||||
image_id=image_id)
|
||||
|
||||
return f(namespace, repository, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def set_cache_headers(f):
|
||||
"""Returns HTTP headers suitable for caching."""
|
||||
|
||||
@wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
# Set TTL to 1 year by default
|
||||
ttl = 31536000
|
||||
expires = datetime.fromtimestamp(int(time()) + ttl)
|
||||
expires = expires.strftime('%a, %d %b %Y %H:%M:%S GMT')
|
||||
headers = {
|
||||
'Cache-Control': 'public, max-age={0}'.format(ttl),
|
||||
'Expires': expires,
|
||||
'Last-Modified': 'Thu, 01 Jan 1970 00:00:00 GMT',}
|
||||
if 'If-Modified-Since' in request.headers:
|
||||
response = make_response('Not modified', 304)
|
||||
response.headers.extend(headers)
|
||||
return response
|
||||
kwargs['headers'] = headers
|
||||
# Prevent the Cookie to be sent when the object is cacheable
|
||||
session.modified = False
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@v1_bp.route('/images/<image_id>/layer', methods=['HEAD'])
|
||||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
@ensure_namespace_enabled
|
||||
@require_completion
|
||||
@set_cache_headers
|
||||
@anon_protect
|
||||
def head_image_layer(namespace, repository, image_id, headers):
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
|
||||
|
||||
logger.debug('Checking repo permissions')
|
||||
if permission.can() or (repository_ref is not None and repository_ref.is_public):
|
||||
if repository_ref is None:
|
||||
abort(404)
|
||||
|
||||
logger.debug('Looking up placement locations')
|
||||
legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
|
||||
if legacy_image is None:
|
||||
logger.debug('Could not find any blob placement locations')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
|
||||
|
||||
# Add the Accept-Ranges header if the storage engine supports resumable
|
||||
# downloads.
|
||||
extra_headers = {}
|
||||
if store.get_supports_resumable_downloads(legacy_image.blob.placements):
|
||||
logger.debug('Storage supports resumable downloads')
|
||||
extra_headers['Accept-Ranges'] = 'bytes'
|
||||
|
||||
resp = make_response('')
|
||||
resp.headers.extend(headers)
|
||||
resp.headers.extend(extra_headers)
|
||||
return resp
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@v1_bp.route('/images/<image_id>/layer', methods=['GET'])
|
||||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
@ensure_namespace_enabled
|
||||
@require_completion
|
||||
@set_cache_headers
|
||||
@check_region_blacklisted()
|
||||
@anon_protect
|
||||
def get_image_layer(namespace, repository, image_id, headers):
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
|
||||
|
||||
logger.debug('Checking repo permissions')
|
||||
if permission.can() or (repository_ref is not None and repository_ref.is_public):
|
||||
if repository_ref is None:
|
||||
abort(404)
|
||||
|
||||
legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
|
||||
if legacy_image is None:
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
|
||||
|
||||
path = legacy_image.blob.storage_path
|
||||
metric_queue.pull_byte_count.Inc(legacy_image.blob.compressed_size, labelvalues=['v1'])
|
||||
|
||||
try:
|
||||
logger.debug('Looking up the direct download URL for path: %s', path)
|
||||
direct_download_url = store.get_direct_download_url(legacy_image.blob.placements, path,
|
||||
get_request_ip())
|
||||
if direct_download_url:
|
||||
logger.debug('Returning direct download URL')
|
||||
resp = redirect(direct_download_url)
|
||||
return resp
|
||||
|
||||
# Close the database handle here for this process before we send the long download.
|
||||
database.close_db_filter(None)
|
||||
logger.debug('Streaming layer data')
|
||||
return Response(store.stream_read(legacy_image.blob.placements, path), headers=headers)
|
||||
except (IOError, AttributeError):
|
||||
logger.exception('Image layer data not found')
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@v1_bp.route('/images/<image_id>/layer', methods=['PUT'])
|
||||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
@check_v1_push_enabled()
|
||||
@ensure_namespace_enabled
|
||||
@check_repository_state
|
||||
@anon_protect
|
||||
@check_readonly
|
||||
def put_image_layer(namespace, repository, image_id):
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
||||
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
|
||||
if repository_ref is None:
|
||||
abort(403)
|
||||
|
||||
logger.debug('Checking for image in manifest builder')
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), store,
|
||||
docker_v2_signing_key)
|
||||
if builder is None:
|
||||
abort(400)
|
||||
|
||||
layer = builder.lookup_layer(image_id)
|
||||
if layer is None:
|
||||
abort(404)
|
||||
|
||||
logger.debug('Storing layer data')
|
||||
input_stream = request.stream
|
||||
if request.headers.get('transfer-encoding') == 'chunked':
|
||||
# Careful, might work only with WSGI servers supporting chunked
|
||||
# encoding (Gunicorn)
|
||||
input_stream = request.environ['wsgi.input']
|
||||
|
||||
expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
|
||||
settings = BlobUploadSettings(maximum_blob_size=app.config['MAXIMUM_LAYER_SIZE'],
|
||||
bittorrent_piece_size=app.config['BITTORRENT_PIECE_SIZE'],
|
||||
committed_blob_expiration=expiration_sec)
|
||||
|
||||
extra_handlers = []
|
||||
|
||||
# Add a handler that copies the data into a temp file. This is used to calculate the tarsum,
|
||||
# which is only needed for older versions of Docker.
|
||||
requires_tarsum = bool(builder.get_layer_checksums(layer))
|
||||
if requires_tarsum:
|
||||
tmp, tmp_hndlr = store.temp_store_handler()
|
||||
extra_handlers.append(tmp_hndlr)
|
||||
|
||||
# Add a handler which computes the simple Docker V1 checksum.
|
||||
h, sum_hndlr = checksums.simple_checksum_handler(layer.v1_metadata_string)
|
||||
extra_handlers.append(sum_hndlr)
|
||||
|
||||
uploaded_blob = None
|
||||
try:
|
||||
with upload_blob(repository_ref, store, settings,
|
||||
extra_blob_stream_handlers=extra_handlers) as manager:
|
||||
manager.upload_chunk(app.config, input_stream)
|
||||
uploaded_blob = manager.commit_to_blob(app.config)
|
||||
except BlobUploadException:
|
||||
logger.exception('Exception when writing image data')
|
||||
abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id)
|
||||
|
||||
# Compute the final checksum
|
||||
csums = []
|
||||
csums.append('sha256:{0}'.format(h.hexdigest()))
|
||||
|
||||
try:
|
||||
if requires_tarsum:
|
||||
tmp.seek(0)
|
||||
csums.append(checksums.compute_tarsum(tmp, layer.v1_metadata_string))
|
||||
tmp.close()
|
||||
except (IOError, checksums.TarError) as exc:
|
||||
logger.debug('put_image_layer: Error when computing tarsum %s', exc)
|
||||
|
||||
# If there was already a precomputed checksum, validate against it now.
|
||||
if builder.get_layer_checksums(layer):
|
||||
checksum = builder.get_layer_checksums(layer)[0]
|
||||
if not builder.validate_layer_checksum(layer, checksum):
|
||||
logger.debug('put_image_checksum: Wrong checksum. Given: %s and expected: %s', checksum,
|
||||
builder.get_layer_checksums(layer))
|
||||
abort(400, 'Checksum mismatch for image: %(image_id)s', issue='checksum-mismatch',
|
||||
image_id=image_id)
|
||||
|
||||
# Assign the blob to the layer in the manifest.
|
||||
if not builder.assign_layer_blob(layer, uploaded_blob, csums):
|
||||
abort(500, 'Something went wrong')
|
||||
|
||||
# Send a job to the work queue to replicate the image layer.
|
||||
# TODO: move this into a better place.
|
||||
queue_storage_replication(namespace, uploaded_blob)
|
||||
|
||||
return make_response('true', 200)
|
||||
|
||||
|
||||
@v1_bp.route('/images/<image_id>/checksum', methods=['PUT'])
|
||||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
@check_v1_push_enabled()
|
||||
@ensure_namespace_enabled
|
||||
@check_repository_state
|
||||
@anon_protect
|
||||
@check_readonly
|
||||
def put_image_checksum(namespace, repository, image_id):
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
||||
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
|
||||
if repository_ref is None:
|
||||
abort(403)
|
||||
|
||||
# Docker Version < 0.10 (tarsum+sha):
|
||||
old_checksum = request.headers.get('X-Docker-Checksum')
|
||||
|
||||
# Docker Version >= 0.10 (sha):
|
||||
new_checksum = request.headers.get('X-Docker-Checksum-Payload')
|
||||
|
||||
checksum = new_checksum or old_checksum
|
||||
if not checksum:
|
||||
abort(400, "Missing checksum for image %(image_id)s", issue='missing-checksum',
|
||||
image_id=image_id)
|
||||
|
||||
logger.debug('Checking for image in manifest builder')
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), store,
|
||||
docker_v2_signing_key)
|
||||
if builder is None:
|
||||
abort(400)
|
||||
|
||||
layer = builder.lookup_layer(image_id)
|
||||
if layer is None:
|
||||
abort(404)
|
||||
|
||||
if old_checksum:
|
||||
builder.save_precomputed_checksum(layer, checksum)
|
||||
return make_response('true', 200)
|
||||
|
||||
if not builder.validate_layer_checksum(layer, checksum):
|
||||
logger.debug('put_image_checksum: Wrong checksum. Given: %s and expected: %s', checksum,
|
||||
builder.get_layer_checksums(layer))
|
||||
abort(400, 'Checksum mismatch for image: %(image_id)s', issue='checksum-mismatch',
|
||||
image_id=image_id)
|
||||
|
||||
return make_response('true', 200)
|
||||
|
||||
|
||||
@v1_bp.route('/images/<image_id>/json', methods=['GET'])
|
||||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
@ensure_namespace_enabled
|
||||
@require_completion
|
||||
@set_cache_headers
|
||||
@anon_protect
|
||||
def get_image_json(namespace, repository, image_id, headers):
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
|
||||
if not permission.can() and not (repository_ref is not None and repository_ref.is_public):
|
||||
abort(403)
|
||||
|
||||
logger.debug('Looking up repo image')
|
||||
legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
|
||||
if legacy_image is None:
|
||||
flask_abort(404)
|
||||
|
||||
size = legacy_image.blob.compressed_size
|
||||
if size is not None:
|
||||
# Note: X-Docker-Size is optional and we *can* end up with a NULL image_size,
|
||||
# so handle this case rather than failing.
|
||||
headers['X-Docker-Size'] = str(size)
|
||||
|
||||
response = make_response(legacy_image.v1_metadata_string, 200)
|
||||
response.headers.extend(headers)
|
||||
return response
|
||||
|
||||
|
||||
@v1_bp.route('/images/<image_id>/ancestry', methods=['GET'])
|
||||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
@ensure_namespace_enabled
|
||||
@require_completion
|
||||
@set_cache_headers
|
||||
@anon_protect
|
||||
def get_image_ancestry(namespace, repository, image_id, headers):
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ReadRepositoryPermission(namespace, repository)
|
||||
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
|
||||
if not permission.can() and not (repository_ref is not None and repository_ref.is_public):
|
||||
abort(403)
|
||||
|
||||
logger.debug('Looking up repo image')
|
||||
legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_parents=True)
|
||||
if legacy_image is None:
|
||||
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
|
||||
|
||||
# NOTE: We can not use jsonify here because we are returning a list not an object.
|
||||
ancestor_ids = ([legacy_image.docker_image_id] +
|
||||
[a.docker_image_id for a in legacy_image.parents])
|
||||
response = make_response(json.dumps(ancestor_ids), 200)
|
||||
response.headers.extend(headers)
|
||||
return response
|
||||
|
||||
|
||||
@v1_bp.route('/images/<image_id>/json', methods=['PUT'])
|
||||
@process_auth
|
||||
@extract_namespace_repo_from_session
|
||||
@check_v1_push_enabled()
|
||||
@ensure_namespace_enabled
|
||||
@check_repository_state
|
||||
@anon_protect
|
||||
@check_readonly
|
||||
def put_image_json(namespace, repository, image_id):
|
||||
logger.debug('Checking repo permissions')
|
||||
permission = ModifyRepositoryPermission(namespace, repository)
|
||||
if not permission.can():
|
||||
abort(403)
|
||||
|
||||
repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
|
||||
if repository_ref is None:
|
||||
abort(403)
|
||||
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), store,
|
||||
docker_v2_signing_key)
|
||||
if builder is None:
|
||||
abort(400)
|
||||
|
||||
logger.debug('Parsing image JSON')
|
||||
try:
|
||||
uploaded_metadata = request.data
|
||||
data = json.loads(uploaded_metadata.decode('utf8'))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if not data or not isinstance(data, dict):
|
||||
abort(400, 'Invalid JSON for image: %(image_id)s\nJSON: %(json)s', issue='invalid-request',
|
||||
image_id=image_id, json=request.data)
|
||||
|
||||
if 'id' not in data:
|
||||
abort(400, 'Missing key `id` in JSON for image: %(image_id)s', issue='invalid-request',
|
||||
image_id=image_id)
|
||||
|
||||
if image_id != data['id']:
|
||||
abort(400, 'JSON data contains invalid id for image: %(image_id)s', issue='invalid-request',
|
||||
image_id=image_id)
|
||||
|
||||
logger.debug('Looking up repo image')
|
||||
location_pref = store.preferred_locations[0]
|
||||
username = get_authenticated_user() and get_authenticated_user().username
|
||||
layer = builder.start_layer(image_id, uploaded_metadata, location_pref, username,
|
||||
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
|
||||
if layer is None:
|
||||
abort(400, 'Image %(image_id)s has invalid metadata',
|
||||
issue='invalid-request', image_id=image_id)
|
||||
|
||||
return make_response('true', 200)
|
121
endpoints/v1/tag.py
Normal file
121
endpoints/v1/tag.py
Normal file
|
@ -0,0 +1,121 @@
|
|||
import logging
|
||||
import json
|
||||
|
||||
from flask import abort, request, jsonify, make_response, session
|
||||
|
||||
from app import storage, docker_v2_signing_key
|
||||
from auth.decorators import process_auth
|
||||
from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission)
|
||||
from data.registry_model import registry_model
|
||||
from data.registry_model.manifestbuilder import lookup_manifest_builder
|
||||
from endpoints.decorators import (anon_protect, parse_repository_name, check_repository_state,
|
||||
check_readonly)
|
||||
from endpoints.v1 import v1_bp, check_v1_push_enabled
|
||||
from util.audit import track_and_log
|
||||
from util.names import TAG_ERROR, TAG_REGEX
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@v1_bp.route('/repositories/<repopath:repository>/tags', methods=['GET'])
|
||||
@process_auth
|
||||
@anon_protect
|
||||
@parse_repository_name()
|
||||
def get_tags(namespace_name, repo_name):
|
||||
permission = ReadRepositoryPermission(namespace_name, repo_name)
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
|
||||
if permission.can() or (repository_ref is not None and repository_ref.is_public):
|
||||
if repository_ref is None:
|
||||
abort(404)
|
||||
|
||||
tag_map = registry_model.get_legacy_tags_map(repository_ref, storage)
|
||||
return jsonify(tag_map)
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@v1_bp.route('/repositories/<repopath:repository>/tags/<tag>', methods=['GET'])
|
||||
@process_auth
|
||||
@anon_protect
|
||||
@parse_repository_name()
|
||||
def get_tag(namespace_name, repo_name, tag):
|
||||
permission = ReadRepositoryPermission(namespace_name, repo_name)
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
|
||||
if permission.can() or (repository_ref is not None and repository_ref.is_public):
|
||||
if repository_ref is None:
|
||||
abort(404)
|
||||
|
||||
image_id = registry_model.get_tag_legacy_image_id(repository_ref, tag, storage)
|
||||
if image_id is None:
|
||||
abort(404)
|
||||
|
||||
resp = make_response('"%s"' % image_id)
|
||||
resp.headers['Content-Type'] = 'application/json'
|
||||
return resp
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@v1_bp.route('/repositories/<repopath:repository>/tags/<tag>', methods=['PUT'])
|
||||
@process_auth
|
||||
@anon_protect
|
||||
@parse_repository_name()
|
||||
@check_repository_state
|
||||
@check_v1_push_enabled()
|
||||
@check_readonly
|
||||
def put_tag(namespace_name, repo_name, tag):
|
||||
permission = ModifyRepositoryPermission(namespace_name, repo_name)
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
|
||||
|
||||
if permission.can() and repository_ref is not None:
|
||||
if not TAG_REGEX.match(tag):
|
||||
abort(400, TAG_ERROR)
|
||||
|
||||
image_id = json.loads(request.data)
|
||||
|
||||
# Check for the image ID first in a builder (for an in-progress push).
|
||||
builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), storage,
|
||||
docker_v2_signing_key)
|
||||
if builder is not None:
|
||||
layer = builder.lookup_layer(image_id)
|
||||
if layer is not None:
|
||||
commited_tag = builder.commit_tag_and_manifest(tag, layer)
|
||||
if commited_tag is None:
|
||||
abort(400)
|
||||
|
||||
return make_response('Created', 200)
|
||||
|
||||
# Check if there is an existing image we should use (for PUT calls outside of a normal push
|
||||
# operation).
|
||||
legacy_image = registry_model.get_legacy_image(repository_ref, image_id)
|
||||
if legacy_image is None:
|
||||
abort(400)
|
||||
|
||||
if registry_model.retarget_tag(repository_ref, tag, legacy_image, storage,
|
||||
docker_v2_signing_key) is None:
|
||||
abort(400)
|
||||
|
||||
return make_response('Created', 200)
|
||||
|
||||
abort(403)
|
||||
|
||||
|
||||
@v1_bp.route('/repositories/<repopath:repository>/tags/<tag>', methods=['DELETE'])
|
||||
@process_auth
|
||||
@anon_protect
|
||||
@parse_repository_name()
|
||||
@check_repository_state
|
||||
@check_v1_push_enabled()
|
||||
@check_readonly
|
||||
def delete_tag(namespace_name, repo_name, tag):
|
||||
permission = ModifyRepositoryPermission(namespace_name, repo_name)
|
||||
repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
|
||||
|
||||
if permission.can() and repository_ref is not None:
|
||||
if not registry_model.delete_tag(repository_ref, tag):
|
||||
abort(404)
|
||||
|
||||
track_and_log('delete_tag', repository_ref, tag=tag)
|
||||
return make_response('Deleted', 200)
|
||||
|
||||
abort(403)
|
Reference in a new issue