This repository has been archived on 2020-03-24. You can view files and clone it, but cannot push or open issues or pull requests.
quay/endpoints/v1/registry.py

436 lines
16 KiB
Python
Raw Normal View History

2013-09-25 21:50:03 +00:00
import logging
import json
2013-09-25 21:50:03 +00:00
from functools import wraps
from datetime import datetime
from time import time
2016-07-06 18:10:02 +00:00
from flask import make_response, request, session, Response, redirect, abort as flask_abort
from app import storage as store, app
from auth.auth_context import get_authenticated_user
from auth.permissions import (ReadRepositoryPermission,
ModifyRepositoryPermission)
from auth.process import process_auth, extract_namespace_repo_from_session
2016-07-06 18:10:02 +00:00
from auth.registry_jwt_auth import get_granted_username
from data import model, database
2016-09-23 21:50:09 +00:00
from data.interfaces.v1 import pre_oci_model as model
2016-07-06 18:10:02 +00:00
from digest import checksums
2015-06-22 21:37:13 +00:00
from endpoints.v1 import v1_bp
from endpoints.decorators import anon_protect
2016-07-06 18:10:02 +00:00
from util.http import abort, exact_abort
from util.registry.filelike import SocketReader
from util.registry import gzipstream
from util.registry.replication import queue_storage_replication
2016-07-06 18:10:02 +00:00
from util.registry.torrent import PieceHasher
2013-09-25 21:50:03 +00:00
2013-09-25 21:50:03 +00:00
logger = logging.getLogger(__name__)
2015-06-22 21:37:13 +00:00
def _finish_image(namespace, repository, image_id):
# Checksum is ok, we remove the marker
blob_ref = model.update_image_uploading(namespace, repository, image_id, False)
# Send a job to the work queue to replicate the image layer.
queue_storage_replication(namespace, blob_ref)
2013-09-25 21:50:03 +00:00
def require_completion(f):
"""This make sure that the image push correctly finished."""
@wraps(f)
def wrapper(namespace, repository, *args, **kwargs):
image_id = kwargs['image_id']
if model.is_image_uploading(namespace, repository, image_id):
abort(400, 'Image %(image_id)s is being uploaded, retry later',
issue='upload-in-progress', image_id=image_id)
return f(namespace, repository, *args, **kwargs)
2013-09-25 21:50:03 +00:00
return wrapper
def set_cache_headers(f):
"""Returns HTTP headers suitable for caching."""
@wraps(f)
def wrapper(*args, **kwargs):
# Set TTL to 1 year by default
ttl = 31536000
expires = datetime.fromtimestamp(int(time()) + ttl)
expires = expires.strftime('%a, %d %b %Y %H:%M:%S GMT')
headers = {
'Cache-Control': 'public, max-age={0}'.format(ttl),
'Expires': expires,
'Last-Modified': 'Thu, 01 Jan 1970 00:00:00 GMT',
}
if 'If-Modified-Since' in request.headers:
response = make_response('Not modified', 304)
response.headers.extend(headers)
return response
kwargs['headers'] = headers
# Prevent the Cookie to be sent when the object is cacheable
session.modified = False
return f(*args, **kwargs)
return wrapper
2015-06-22 21:37:13 +00:00
@v1_bp.route('/images/<image_id>/layer', methods=['HEAD'])
2013-09-25 21:50:03 +00:00
@process_auth
@extract_namespace_repo_from_session
2013-09-25 21:50:03 +00:00
@require_completion
@set_cache_headers
@anon_protect
def head_image_layer(namespace, repository, image_id, headers):
permission = ReadRepositoryPermission(namespace, repository)
logger.debug('Checking repo permissions')
if permission.can() or model.repository_is_public(namespace, repository):
2016-07-12 17:48:44 +00:00
logger.debug('Looking up placement locations')
locations, _ = model.placement_locations_and_path_docker_v1(namespace, repository, image_id)
if locations is None:
logger.debug('Could not find any blob placement locations')
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id)
# Add the Accept-Ranges header if the storage engine supports resumable
# downloads.
extra_headers = {}
if store.get_supports_resumable_downloads(locations):
logger.debug('Storage supports resumable downloads')
2014-07-07 22:19:52 +00:00
extra_headers['Accept-Ranges'] = 'bytes'
resp = make_response('')
2014-07-07 22:19:52 +00:00
resp.headers.extend(headers)
resp.headers.extend(extra_headers)
return resp
2014-07-07 22:19:52 +00:00
abort(403)
2015-06-22 21:37:13 +00:00
@v1_bp.route('/images/<image_id>/layer', methods=['GET'])
@process_auth
@extract_namespace_repo_from_session
@require_completion
@set_cache_headers
@anon_protect
def get_image_layer(namespace, repository, image_id, headers):
permission = ReadRepositoryPermission(namespace, repository)
logger.debug('Checking repo permissions')
if permission.can() or model.repository_is_public(namespace, repository):
2016-07-12 17:48:44 +00:00
logger.debug('Looking up placement locations and path')
locations, path = model.placement_locations_and_path_docker_v1(namespace, repository, image_id)
if not locations or not path:
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id)
try:
logger.debug('Looking up the direct download URL for path: %s', path)
direct_download_url = store.get_direct_download_url(locations, path)
if direct_download_url:
logger.debug('Returning direct download URL')
resp = redirect(direct_download_url)
return resp
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
logger.debug('Streaming layer data')
return Response(store.stream_read(locations, path), headers=headers)
except (IOError, AttributeError):
logger.exception('Image layer data not found')
abort(404, 'Image %(image_id)s not found', issue='unknown-image',
image_id=image_id)
2014-07-07 22:19:52 +00:00
2013-09-28 04:05:32 +00:00
abort(403)
2013-09-25 21:50:03 +00:00
2015-06-22 21:37:13 +00:00
@v1_bp.route('/images/<image_id>/layer', methods=['PUT'])
2013-09-25 21:50:03 +00:00
@process_auth
@extract_namespace_repo_from_session
@anon_protect
def put_image_layer(namespace, repository, image_id):
logger.debug('Checking repo permissions')
permission = ModifyRepositoryPermission(namespace, repository)
if not permission.can():
abort(403)
logger.debug('Retrieving image')
if model.storage_exists(namespace, repository, image_id):
exact_abort(409, 'Image already exists')
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
if v1_metadata is None:
abort(404)
logger.debug('Storing layer data')
2013-09-25 21:50:03 +00:00
input_stream = request.stream
if request.headers.get('transfer-encoding') == 'chunked':
# Careful, might work only with WSGI servers supporting chunked
# encoding (Gunicorn)
input_stream = request.environ['wsgi.input']
# Create a socket reader to read the input stream containing the layer data.
2013-09-25 21:50:03 +00:00
sr = SocketReader(input_stream)
# Add a handler that copies the data into a temp file. This is used to calculate the tarsum,
# which is only needed for older versions of Docker.
requires_tarsum = session.get('checksum_format') == 'tarsum'
if requires_tarsum:
tmp, tmp_hndlr = store.temp_store_handler()
sr.add_handler(tmp_hndlr)
# Add a handler to compute the compressed and uncompressed sizes of the layer.
size_info, size_hndlr = gzipstream.calculate_size_handler()
sr.add_handler(size_hndlr)
2016-01-05 17:14:52 +00:00
# Add a handler to hash the chunks of the upload for torrenting
2016-01-22 20:52:28 +00:00
piece_hasher = PieceHasher(app.config['BITTORRENT_PIECE_SIZE'])
2016-01-05 17:14:52 +00:00
sr.add_handler(piece_hasher.update)
# Add a handler which computes the checksum.
h, sum_hndlr = checksums.simple_checksum_handler(v1_metadata.compat_json)
2013-09-25 21:50:03 +00:00
sr.add_handler(sum_hndlr)
# Add a handler which computes the content checksum only
ch, content_sum_hndlr = checksums.content_checksum_handler()
sr.add_handler(content_sum_hndlr)
# Stream write the data to storage.
locations, path = model.placement_locations_and_path_docker_v1(namespace, repository, image_id)
with database.CloseForLongOperation(app.config):
try:
store.stream_write(locations, path, sr)
except IOError:
logger.exception('Exception when writing image data')
abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id)
# Save the size of the image.
model.update_image_sizes(namespace, repository, image_id, size_info.compressed_size,
size_info.uncompressed_size)
# Save the BitTorrent pieces.
model.create_bittorrent_pieces(namespace, repository, image_id, piece_hasher.final_piece_hashes())
# Append the computed checksum.
csums = []
2013-09-25 21:50:03 +00:00
csums.append('sha256:{0}'.format(h.hexdigest()))
2013-09-25 21:50:03 +00:00
try:
if requires_tarsum:
tmp.seek(0)
csums.append(checksums.compute_tarsum(tmp, v1_metadata.compat_json))
tmp.close()
except (IOError, checksums.TarError) as exc:
logger.debug('put_image_layer: Error when computing tarsum %s', exc)
if v1_metadata.checksum is None:
2013-09-25 21:50:03 +00:00
# We don't have a checksum stored yet, that's fine skipping the check.
# Not removing the mark though, image is not downloadable yet.
session['checksum'] = csums
session['content_checksum'] = 'sha256:{0}'.format(ch.hexdigest())
2013-09-25 21:50:03 +00:00
return make_response('true', 200)
2013-09-25 21:50:03 +00:00
# We check if the checksums provided matches one the one we computed
if v1_metadata.checksum not in csums:
logger.warning('put_image_layer: Wrong checksum')
abort(400, 'Checksum mismatch; ignoring the layer for image %(image_id)s',
issue='checksum-mismatch', image_id=image_id)
# Mark the image as uploaded.
_finish_image(namespace, repository, image_id)
2013-09-25 21:50:03 +00:00
return make_response('true', 200)
2015-06-22 21:37:13 +00:00
@v1_bp.route('/images/<image_id>/checksum', methods=['PUT'])
2013-09-25 21:50:03 +00:00
@process_auth
@extract_namespace_repo_from_session
@anon_protect
def put_image_checksum(namespace, repository, image_id):
logger.debug('Checking repo permissions')
permission = ModifyRepositoryPermission(namespace, repository)
if not permission.can():
abort(403)
# Docker Version < 0.10 (tarsum+sha):
old_checksum = request.headers.get('X-Docker-Checksum')
# Docker Version >= 0.10 (sha):
new_checksum = request.headers.get('X-Docker-Checksum-Payload')
# Store whether we need to calculate the tarsum.
if new_checksum:
session['checksum_format'] = 'sha256'
else:
session['checksum_format'] = 'tarsum'
checksum = new_checksum or old_checksum
2013-09-25 21:50:03 +00:00
if not checksum:
abort(400, "Missing checksum for image %(image_id)s", issue='missing-checksum',
image_id=image_id)
2013-09-25 21:50:03 +00:00
if not session.get('checksum'):
2014-05-08 15:50:38 +00:00
abort(400, 'Checksum not found in Cookie for image %(image_id)s',
issue='missing-checksum-cookie', image_id=image_id)
logger.debug('Looking up repo image')
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
if not v1_metadata:
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
logger.debug('Looking up repo layer data')
if not v1_metadata.compat_json:
abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
logger.debug('Marking image path')
if not model.is_image_uploading(namespace, repository, image_id):
abort(409, 'Cannot set checksum for image %(image_id)s',
issue='image-write-error', image_id=image_id)
logger.debug('Storing image and content checksums')
content_checksum = session.get('content_checksum', None)
checksum_parts = checksum.split(':')
if len(checksum_parts) != 2:
abort(400, 'Invalid checksum format')
model.store_docker_v1_checksums(namespace, repository, image_id, checksum, content_checksum)
2013-09-25 21:50:03 +00:00
if checksum not in session.get('checksum', []):
logger.debug('session checksums: %s', session.get('checksum', []))
logger.debug('client supplied checksum: %s', checksum)
logger.debug('put_image_checksum: Wrong checksum')
abort(400, 'Checksum mismatch for image: %(image_id)s',
issue='checksum-mismatch', image_id=image_id)
# Mark the image as uploaded.
_finish_image(namespace, repository, image_id)
2013-09-25 21:50:03 +00:00
return make_response('true', 200)
2015-06-22 21:37:13 +00:00
@v1_bp.route('/images/<image_id>/json', methods=['GET'])
2013-09-25 21:50:03 +00:00
@process_auth
@extract_namespace_repo_from_session
2013-09-25 21:50:03 +00:00
@require_completion
@set_cache_headers
@anon_protect
def get_image_json(namespace, repository, image_id, headers):
logger.debug('Checking repo permissions')
permission = ReadRepositoryPermission(namespace, repository)
if not permission.can() and not model.repository_is_public(namespace, repository):
abort(403)
logger.debug('Looking up repo image')
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
if v1_metadata is None:
flask_abort(404)
logger.debug('Looking up repo layer size')
size = model.get_image_size(namespace, repository, image_id)
if size is not None:
# Note: X-Docker-Size is optional and we *can* end up with a NULL image_size,
# so handle this case rather than failing.
headers['X-Docker-Size'] = str(size)
response = make_response(v1_metadata.compat_json, 200)
2013-09-25 21:50:03 +00:00
response.headers.extend(headers)
return response
2015-06-22 21:37:13 +00:00
@v1_bp.route('/images/<image_id>/ancestry', methods=['GET'])
2013-09-25 21:50:03 +00:00
@process_auth
@extract_namespace_repo_from_session
2013-09-25 21:50:03 +00:00
@require_completion
@set_cache_headers
@anon_protect
def get_image_ancestry(namespace, repository, image_id, headers):
logger.debug('Checking repo permissions')
permission = ReadRepositoryPermission(namespace, repository)
if not permission.can() and not model.repository_is_public(namespace, repository):
abort(403)
ancestry_docker_ids = model.image_ancestry(namespace, repository, image_id)
if ancestry_docker_ids is None:
2015-08-24 15:58:43 +00:00
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
# We can not use jsonify here because we are returning a list not an object
response = make_response(json.dumps(ancestry_docker_ids), 200)
2013-09-25 21:50:03 +00:00
response.headers.extend(headers)
return response
2015-06-22 21:37:13 +00:00
@v1_bp.route('/images/<image_id>/json', methods=['PUT'])
2013-09-25 21:50:03 +00:00
@process_auth
@extract_namespace_repo_from_session
@anon_protect
def put_image_json(namespace, repository, image_id):
logger.debug('Checking repo permissions')
permission = ModifyRepositoryPermission(namespace, repository)
if not permission.can():
abort(403)
logger.debug('Parsing image JSON')
2013-09-25 21:50:03 +00:00
try:
uploaded_metadata = request.data
data = json.loads(uploaded_metadata.decode('utf8'))
except ValueError:
2013-09-25 21:50:03 +00:00
pass
2013-09-25 21:50:03 +00:00
if not data or not isinstance(data, dict):
abort(400, 'Invalid JSON for image: %(image_id)s\nJSON: %(json)s',
issue='invalid-request', image_id=image_id, json=request.data)
2013-09-25 21:50:03 +00:00
if 'id' not in data:
abort(400, 'Missing key `id` in JSON for image: %(image_id)s',
issue='invalid-request', image_id=image_id)
if image_id != data['id']:
abort(400, 'JSON data contains invalid id for image: %(image_id)s',
issue='invalid-request', image_id=image_id)
logger.debug('Looking up repo image')
if not model.repository_exists(namespace, repository):
abort(404, 'Repository does not exist: %(namespace)s/%(repository)s', issue='no-repo',
namespace=namespace, repository=repository)
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
if v1_metadata is None:
username = get_authenticated_user() and get_authenticated_user().username
if not username:
username = get_granted_username()
logger.debug('Image not found, creating or linking image with initiating user context: %s',
username)
location_pref = store.preferred_locations[0]
model.create_or_link_image(username, namespace, repository, image_id, location_pref)
v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
# Create a temporary tag to prevent this image from getting garbage collected while the push
# is in progress.
model.create_temp_hidden_tag(namespace, repository, image_id,
app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
parent_id = data.get('parent', None)
if parent_id:
logger.debug('Looking up parent image')
if model.docker_v1_metadata(namespace, repository, parent_id) is None:
abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s',
issue='invalid-request', image_id=image_id, parent_id=parent_id)
logger.debug('Checking if image already exists')
if v1_metadata and not model.is_image_uploading(namespace, repository, image_id):
exact_abort(409, 'Image already exists')
model.update_image_uploading(namespace, repository, image_id, True)
2013-09-25 21:50:03 +00:00
# If we reach that point, it means that this is a new image or a retry
# on a failed push, save the metadata
command_list = data.get('container_config', {}).get('Cmd', None)
command = json.dumps(command_list) if command_list else None
logger.debug('Setting image metadata')
model.update_docker_v1_metadata(namespace, repository, image_id, data.get('created'),
data.get('comment'), command, uploaded_metadata, parent_id)
2014-08-18 17:35:03 +00:00
2013-09-25 21:50:03 +00:00
return make_response('true', 200)