initial import for Open Source 🎉

This commit is contained in:
Jimmy Zelinskie 2019-11-12 11:09:47 -05:00
parent 1898c361f3
commit 9c0dd3b722
2048 changed files with 218743 additions and 0 deletions

View file

286
test/registry/fixtures.py Normal file
View file

@ -0,0 +1,286 @@
import copy
import logging.config
import json
import os
import shutil
from tempfile import NamedTemporaryFile
import pytest
from Crypto import Random
from flask import jsonify, g
from flask_principal import Identity
from app import storage
from data.database import (close_db_filter, configure, DerivedStorageForImage, QueueItem, Image,
TagManifest, TagManifestToManifest, Manifest, ManifestLegacyImage,
ManifestBlob, NamespaceGeoRestriction, User)
from data import model
from data.registry_model import registry_model
from endpoints.csrf import generate_csrf_token
from util.log import logfile_path
from test.registry.liveserverfixture import LiveServerExecutor
@pytest.fixture()
def registry_server_executor(app):
def generate_csrf():
return generate_csrf_token()
def set_supports_direct_download(enabled):
storage.put_content(['local_us'], 'supports_direct_download', 'true' if enabled else 'false')
return 'OK'
def delete_image(image_id):
image = Image.get(docker_image_id=image_id)
image.docker_image_id = 'DELETED'
image.save()
return 'OK'
def get_storage_replication_entry(image_id):
image = Image.get(docker_image_id=image_id)
QueueItem.select().where(QueueItem.queue_name ** ('%' + image.storage.uuid + '%')).get()
return 'OK'
def set_feature(feature_name, value):
import features
from app import app
old_value = features._FEATURES[feature_name].value
features._FEATURES[feature_name].value = value
app.config['FEATURE_%s' % feature_name] = value
return jsonify({'old_value': old_value})
def set_config_key(config_key, value):
from app import app as current_app
old_value = app.config.get(config_key)
app.config[config_key] = value
current_app.config[config_key] = value
# Close any existing connection.
close_db_filter(None)
# Reload the database config.
configure(app.config)
return jsonify({'old_value': old_value})
def clear_derived_cache():
DerivedStorageForImage.delete().execute()
return 'OK'
def clear_uncompressed_size(image_id):
image = model.image.get_image_by_id('devtable', 'newrepo', image_id)
image.storage.uncompressed_size = None
image.storage.save()
return 'OK'
def add_token():
another_token = model.token.create_delegate_token('devtable', 'newrepo', 'my-new-token',
'write')
return model.token.get_full_token_string(another_token)
def break_database():
# Close any existing connection.
close_db_filter(None)
# Reload the database config with an invalid connection.
config = copy.copy(app.config)
config['DB_URI'] = 'sqlite:///not/a/valid/database'
configure(config)
return 'OK'
def reload_app(server_hostname):
# Close any existing connection.
close_db_filter(None)
# Reload the database config.
app.config['SERVER_HOSTNAME'] = server_hostname[len('http://'):]
configure(app.config)
# Reload random after the process split, as it cannot be used uninitialized across forks.
Random.atfork()
# Required for anonymous calls to not exception.
g.identity = Identity(None, 'none')
if os.environ.get('DEBUGLOG') == 'true':
logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False)
return 'OK'
def create_app_repository(namespace, name):
user = model.user.get_user(namespace)
model.repository.create_repository(namespace, name, user, repo_kind='application')
return 'OK'
def disable_namespace(namespace):
namespace_obj = model.user.get_namespace_user(namespace)
namespace_obj.enabled = False
namespace_obj.save()
return 'OK'
def delete_manifests():
ManifestLegacyImage.delete().execute()
ManifestBlob.delete().execute()
Manifest.delete().execute()
TagManifestToManifest.delete().execute()
TagManifest.delete().execute()
return 'OK'
def set_geo_block_for_namespace(namespace_name, iso_country_code):
NamespaceGeoRestriction.create(namespace=User.get(username=namespace_name),
description='',
unstructured_json={},
restricted_region_iso_code=iso_country_code)
return 'OK'
executor = LiveServerExecutor()
executor.register('generate_csrf', generate_csrf)
executor.register('set_supports_direct_download', set_supports_direct_download)
executor.register('delete_image', delete_image)
executor.register('get_storage_replication_entry', get_storage_replication_entry)
executor.register('set_feature', set_feature)
executor.register('set_config_key', set_config_key)
executor.register('clear_derived_cache', clear_derived_cache)
executor.register('clear_uncompressed_size', clear_uncompressed_size)
executor.register('add_token', add_token)
executor.register('break_database', break_database)
executor.register('reload_app', reload_app)
executor.register('create_app_repository', create_app_repository)
executor.register('disable_namespace', disable_namespace)
executor.register('delete_manifests', delete_manifests)
executor.register('set_geo_block_for_namespace', set_geo_block_for_namespace)
return executor
@pytest.fixture(params=['pre_oci_model', 'oci_model'])
def data_model(request):
return request.param
@pytest.fixture()
def liveserver_app(app, registry_server_executor, init_db_path, data_model):
# Change the data model being used.
registry_model.set_for_testing(data_model == 'oci_model')
registry_server_executor.apply_blueprint_to_app(app)
if os.environ.get('DEBUG', 'false').lower() == 'true':
app.config['DEBUG'] = True
# Copy the clean database to a new path. We cannot share the DB created by the
# normal app fixture, as it is already open in the local process.
local_db_file = NamedTemporaryFile(delete=True)
local_db_file.close()
shutil.copy2(init_db_path, local_db_file.name)
app.config['DB_URI'] = 'sqlite:///{0}'.format(local_db_file.name)
return app
@pytest.fixture()
def app_reloader(request, liveserver, registry_server_executor):
registry_server_executor.on(liveserver).reload_app(liveserver.url)
yield
class FeatureFlagValue(object):
""" Helper object which temporarily sets the value of a feature flag.
Usage:
with FeatureFlagValue('ANONYMOUS_ACCESS', False, registry_server_executor.on(liveserver)):
... Features.ANONYMOUS_ACCESS is False in this context ...
"""
def __init__(self, feature_flag, test_value, executor):
self.feature_flag = feature_flag
self.test_value = test_value
self.executor = executor
self.old_value = None
def __enter__(self):
result = self.executor.set_feature(self.feature_flag, self.test_value)
self.old_value = result.json()['old_value']
def __exit__(self, type, value, traceback):
self.executor.set_feature(self.feature_flag, self.old_value)
class ConfigChange(object):
""" Helper object which temporarily sets the value of a config key.
Usage:
with ConfigChange('SOMEKEY', 'value', registry_server_executor.on(liveserver)):
... app.config['SOMEKEY'] is 'value' in this context ...
"""
def __init__(self, config_key, test_value, executor, liveserver):
self.config_key = config_key
self.test_value = test_value
self.executor = executor
self.liveserver = liveserver
self.old_value = None
def __enter__(self):
result = self.executor.set_config_key(self.config_key, self.test_value)
self.old_value = result.json()['old_value']
def __exit__(self, type, value, traceback):
self.executor.set_config_key(self.config_key, self.old_value)
class ApiCaller(object):
def __init__(self, liveserver_session, registry_server_executor):
self.liveserver_session = liveserver_session
self.registry_server_executor = registry_server_executor
def conduct_auth(self, username, password):
r = self.post('/api/v1/signin',
data=json.dumps(dict(username=username, password=password)),
headers={'Content-Type': 'application/json'})
assert r.status_code == 200
def _adjust_params(self, kwargs):
csrf_token = self.registry_server_executor.on_session(self.liveserver_session).generate_csrf()
if 'params' not in kwargs:
kwargs['params'] = {}
kwargs['params'].update({
'_csrf_token': csrf_token,
})
return kwargs
def get(self, url, **kwargs):
kwargs = self._adjust_params(kwargs)
return self.liveserver_session.get(url, **kwargs)
def post(self, url, **kwargs):
kwargs = self._adjust_params(kwargs)
return self.liveserver_session.post(url, **kwargs)
def put(self, url, **kwargs):
kwargs = self._adjust_params(kwargs)
return self.liveserver_session.put(url, **kwargs)
def delete(self, url, **kwargs):
kwargs = self._adjust_params(kwargs)
return self.liveserver_session.delete(url, **kwargs)
def change_repo_visibility(self, namespace, repository, visibility):
self.post('/api/v1/repository/%s/%s/changevisibility' % (namespace, repository),
data=json.dumps(dict(visibility=visibility)),
headers={'Content-Type': 'application/json'})
@pytest.fixture(scope="function")
def api_caller(liveserver, registry_server_executor):
return ApiCaller(liveserver.new_session(), registry_server_executor)

View file

@ -0,0 +1,283 @@
import inspect
import json
import multiprocessing
import socket
import socketserver
import time
from contextlib import contextmanager
from urlparse import urlparse, urljoin
import pytest
import requests
from flask import request
from flask.blueprints import Blueprint
class liveFlaskServer(object):
""" Helper class for spawning a live Flask server for testing.
Based on https://github.com/jarus/flask-testing/blob/master/flask_testing/utils.py#L421
"""
def __init__(self, app, port_value):
self.app = app
self._port_value = port_value
self._process = None
def get_server_url(self):
"""
Return the url of the test server
"""
return 'http://localhost:%s' % self._port_value.value
def terminate_live_server(self):
if self._process:
self._process.terminate()
def spawn_live_server(self):
self._process = None
port_value = self._port_value
def worker(app, port):
# Based on solution: http://stackoverflow.com/a/27598916
# Monkey-patch the server_bind so we can determine the port bound by Flask.
# This handles the case where the port specified is `0`, which means that
# the OS chooses the port. This is the only known way (currently) of getting
# the port out of Flask once we call `run`.
original_socket_bind = socketserver.TCPServer.server_bind
def socket_bind_wrapper(self):
ret = original_socket_bind(self)
# Get the port and save it into the port_value, so the parent process
# can read it.
(_, port) = self.socket.getsockname()
port_value.value = port
socketserver.TCPServer.server_bind = original_socket_bind
return ret
socketserver.TCPServer.server_bind = socket_bind_wrapper
app.run(port=port, use_reloader=False)
retry_count = self.app.config.get('LIVESERVER_RETRY_COUNT', 3)
started = False
for _ in range(0, retry_count):
if started:
break
self._process = multiprocessing.Process(target=worker, args=(self.app, 0))
self._process.start()
# We must wait for the server to start listening, but give up
# after a specified maximum timeout
timeout = self.app.config.get('LIVESERVER_TIMEOUT', 10)
start_time = time.time()
while True:
time.sleep(0.1)
elapsed_time = (time.time() - start_time)
if elapsed_time > timeout:
break
if self._can_connect():
self.app.config['SERVER_HOSTNAME'] = 'localhost:%s' % self._port_value.value
started = True
break
if not started:
raise RuntimeError("Failed to start the server after %d retries. " % retry_count)
def _can_connect(self):
host, port = self._get_server_address()
if port == 0:
# Port specified by the user was 0, and the OS has not yet assigned
# the proper port.
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, port))
except socket.error:
success = False
else:
success = True
finally:
sock.close()
return success
def _get_server_address(self):
"""
Gets the server address used to test the connection with a socket.
Respects both the LIVESERVER_PORT config value and overriding
get_server_url()
"""
parts = urlparse(self.get_server_url())
host = parts.hostname
port = parts.port
if port is None:
if parts.scheme == 'http':
port = 80
elif parts.scheme == 'https':
port = 443
else:
raise RuntimeError("Unsupported server url scheme: %s" % parts.scheme)
return host, port
class LiveFixtureServerSession(object):
""" Helper class for calling the live server via a single requests Session. """
def __init__(self, base_url):
self.base_url = base_url
self.session = requests.Session()
def _get_url(self, url):
return urljoin(self.base_url, url)
def get(self, url, **kwargs):
return self.session.get(self._get_url(url), **kwargs)
def post(self, url, **kwargs):
return self.session.post(self._get_url(url), **kwargs)
def put(self, url, **kwargs):
return self.session.put(self._get_url(url), **kwargs)
def delete(self, url, **kwargs):
return self.session.delete(self._get_url(url), **kwargs)
def request(self, method, url, **kwargs):
return self.session.request(method, self._get_url(url), **kwargs)
class LiveFixtureServer(object):
""" Helper for interacting with a live server. """
def __init__(self, url):
self.url = url
@contextmanager
def session(self):
""" Yields a session for speaking to the live server. """
yield LiveFixtureServerSession(self.url)
def new_session(self):
""" Returns a new session for speaking to the live server. """
return LiveFixtureServerSession(self.url)
@pytest.fixture(scope='function')
def liveserver(liveserver_app):
""" Runs a live Flask server for the app for the duration of the test.
Based on https://github.com/jarus/flask-testing/blob/master/flask_testing/utils.py#L421
"""
context = liveserver_app.test_request_context()
context.push()
port = multiprocessing.Value('i', 0)
live_server = liveFlaskServer(liveserver_app, port)
try:
live_server.spawn_live_server()
yield LiveFixtureServer(live_server.get_server_url())
finally:
context.pop()
live_server.terminate_live_server()
@pytest.fixture(scope='function')
def liveserver_session(liveserver, liveserver_app):
""" Fixtures which instantiates a liveserver and returns a single session for
interacting with that server.
"""
return LiveFixtureServerSession(liveserver.url)
class LiveServerExecutor(object):
""" Helper class which can be used to register functions to be executed in the
same process as the live server. This is necessary because the live server
runs in a different process and, therefore, in order to execute state changes
outside of the server's normal flows (i.e. via code), it must be executed
*in-process* via an HTTP call. The LiveServerExecutor class abstracts away
all the setup for this process.
Usage:
def _perform_operation(first_param, second_param):
... do some operation in the app ...
return 'some value'
@pytest.fixture(scope="session")
def my_server_executor():
executor = LiveServerExecutor()
executor.register('performoperation', _perform_operation)
return executor
@pytest.fixture()
def liveserver_app(app, my_server_executor):
... other app setup here ...
my_server_executor.apply_blueprint_to_app(app)
return app
def test_mytest(liveserver, my_server_executor):
# Invokes 'performoperation' in the liveserver's process.
my_server_executor.on(liveserver).performoperation('first', 'second')
"""
def __init__(self):
self.funcs = {}
def register(self, fn_name, fn):
""" Registers the given function under the given name. """
self.funcs[fn_name] = fn
def apply_blueprint_to_app(self, app):
""" Applies a blueprint to the app, to support invocation from this executor. """
testbp = Blueprint('testbp', __name__)
def build_invoker(fn_name, fn):
path = '/' + fn_name
@testbp.route(path, methods=['POST'], endpoint=fn_name)
def _(**kwargs):
arg_values = request.get_json()['args']
return fn(*arg_values)
for fn_name, fn in self.funcs.iteritems():
build_invoker(fn_name, fn)
app.register_blueprint(testbp, url_prefix='/__test')
def on(self, server):
""" Returns an invoker for the given live server. """
return liveServerExecutorInvoker(self.funcs, server)
def on_session(self, server_session):
""" Returns an invoker for the given live server session. """
return liveServerExecutorInvoker(self.funcs, server_session)
class liveServerExecutorInvoker(object):
def __init__(self, funcs, server_or_session):
self._funcs = funcs
self._server_or_session = server_or_session
def __getattribute__(self, name):
if name.startswith('_'):
return object.__getattribute__(self, name)
if name not in self._funcs:
raise AttributeError('Unknown function: %s' % name)
def invoker(*args):
path = '/__test/%s' % name
headers = {'Content-Type': 'application/json'}
if isinstance(self._server_or_session, LiveFixtureServerSession):
return self._server_or_session.post(path, data=json.dumps({'args': args}), headers=headers)
else:
with self._server_or_session.session() as session:
return session.post(path, data=json.dumps({'args': args}), headers=headers)
return invoker

View file

@ -0,0 +1,228 @@
# -*- coding: utf-8 -*-
import random
import string
import pytest
from Crypto.PublicKey import RSA
from jwkest.jwk import RSAKey
from test.registry.fixtures import data_model
from test.registry.protocols import Image, layer_bytes_for_contents
from test.registry.protocol_v1 import V1Protocol
from test.registry.protocol_v2 import V2Protocol
@pytest.fixture(scope="session")
def basic_images():
""" Returns basic images for push and pull testing. """
# Note: order is from base layer down to leaf.
parent_bytes = layer_bytes_for_contents('parent contents')
image_bytes = layer_bytes_for_contents('some contents')
return [
Image(id='parentid', bytes=parent_bytes, parent_id=None),
Image(id='someid', bytes=image_bytes, parent_id='parentid'),
]
@pytest.fixture(scope="session")
def unicode_images():
""" Returns basic images for push and pull testing that contain unicode in the image metadata. """
# Note: order is from base layer down to leaf.
parent_bytes = layer_bytes_for_contents('parent contents')
image_bytes = layer_bytes_for_contents('some contents')
return [
Image(id='parentid', bytes=parent_bytes, parent_id=None),
Image(id='someid', bytes=image_bytes, parent_id='parentid',
config={'comment': u'the Pawe\xc5\x82 Kami\xc5\x84ski image',
'author': u'Sômé guy'}),
]
@pytest.fixture(scope="session")
def different_images():
""" Returns different basic images for push and pull testing. """
# Note: order is from base layer down to leaf.
parent_bytes = layer_bytes_for_contents('different parent contents')
image_bytes = layer_bytes_for_contents('some different contents')
return [
Image(id='anotherparentid', bytes=parent_bytes, parent_id=None),
Image(id='anothersomeid', bytes=image_bytes, parent_id='anotherparentid'),
]
@pytest.fixture(scope="session")
def sized_images():
""" Returns basic images (with sizes) for push and pull testing. """
# Note: order is from base layer down to leaf.
parent_bytes = layer_bytes_for_contents('parent contents', mode='')
image_bytes = layer_bytes_for_contents('some contents', mode='')
return [
Image(id='parentid', bytes=parent_bytes, parent_id=None, size=len(parent_bytes),
config={'foo': 'bar'}),
Image(id='someid', bytes=image_bytes, parent_id='parentid', size=len(image_bytes),
config={'foo': 'childbar', 'Entrypoint': ['hello']},
created='2018-04-03T18:37:09.284840891Z'),
]
@pytest.fixture(scope="session")
def multi_layer_images():
""" Returns complex images (with sizes) for push and pull testing. """
# Note: order is from base layer down to leaf.
layer1_bytes = layer_bytes_for_contents('layer 1 contents', mode='', other_files={
'file1': 'from-layer-1',
})
layer2_bytes = layer_bytes_for_contents('layer 2 contents', mode='', other_files={
'file2': 'from-layer-2',
})
layer3_bytes = layer_bytes_for_contents('layer 3 contents', mode='', other_files={
'file1': 'from-layer-3',
'file3': 'from-layer-3',
})
layer4_bytes = layer_bytes_for_contents('layer 4 contents', mode='', other_files={
'file3': 'from-layer-4',
})
layer5_bytes = layer_bytes_for_contents('layer 5 contents', mode='', other_files={
'file4': 'from-layer-5',
})
return [
Image(id='layer1', bytes=layer1_bytes, parent_id=None, size=len(layer1_bytes),
config={'internal_id': 'layer1'}),
Image(id='layer2', bytes=layer2_bytes, parent_id='layer1', size=len(layer2_bytes),
config={'internal_id': 'layer2'}),
Image(id='layer3', bytes=layer3_bytes, parent_id='layer2', size=len(layer3_bytes),
config={'internal_id': 'layer3'}),
Image(id='layer4', bytes=layer4_bytes, parent_id='layer3', size=len(layer4_bytes),
config={'internal_id': 'layer4'}),
Image(id='someid', bytes=layer5_bytes, parent_id='layer4', size=len(layer5_bytes),
config={'internal_id': 'layer5'}),
]
@pytest.fixture(scope="session")
def remote_images():
""" Returns images with at least one remote layer for push and pull testing. """
# Note: order is from base layer down to leaf.
remote_bytes = layer_bytes_for_contents('remote contents')
parent_bytes = layer_bytes_for_contents('parent contents')
image_bytes = layer_bytes_for_contents('some contents')
return [
Image(id='remoteid', bytes=remote_bytes, parent_id=None, urls=['http://some/url']),
Image(id='parentid', bytes=parent_bytes, parent_id='remoteid'),
Image(id='someid', bytes=image_bytes, parent_id='parentid'),
]
@pytest.fixture(scope="session")
def images_with_empty_layer():
""" Returns images for push and pull testing that contain an empty layer. """
# Note: order is from base layer down to leaf.
parent_bytes = layer_bytes_for_contents('parent contents')
empty_bytes = layer_bytes_for_contents('', empty=True)
image_bytes = layer_bytes_for_contents('some contents')
middle_bytes = layer_bytes_for_contents('middle')
return [
Image(id='parentid', bytes=parent_bytes, parent_id=None),
Image(id='emptyid', bytes=empty_bytes, parent_id='parentid', is_empty=True),
Image(id='middleid', bytes=middle_bytes, parent_id='emptyid'),
Image(id='emptyid2', bytes=empty_bytes, parent_id='middleid', is_empty=True),
Image(id='someid', bytes=image_bytes, parent_id='emptyid2'),
]
@pytest.fixture(scope="session")
def unicode_emoji_images():
""" Returns basic images for push and pull testing that contain unicode in the image metadata. """
# Note: order is from base layer down to leaf.
parent_bytes = layer_bytes_for_contents('parent contents')
image_bytes = layer_bytes_for_contents('some contents')
return [
Image(id='parentid', bytes=parent_bytes, parent_id=None),
Image(id='someid', bytes=image_bytes, parent_id='parentid',
config={'comment': u'😱',
'author': u'Sômé guy'}),
]
@pytest.fixture(scope="session")
def jwk():
return RSAKey(key=RSA.generate(2048))
@pytest.fixture(params=[V2Protocol])
def v2_protocol(request, jwk):
return request.param(jwk)
@pytest.fixture()
def v22_protocol(request, jwk):
return V2Protocol(jwk, schema2=True)
@pytest.fixture(params=[V1Protocol])
def v1_protocol(request, jwk):
return request.param(jwk)
@pytest.fixture(params=['schema1', 'schema2'])
def manifest_protocol(request, data_model, jwk):
return V2Protocol(jwk, schema2=(request == 'schema2' and data_model == 'oci_model'))
@pytest.fixture(params=['v1', 'v2_1', 'v2_2'])
def pusher(request, data_model, jwk):
if request.param == 'v1':
return V1Protocol(jwk)
if request.param == 'v2_2' and data_model == 'oci_model':
return V2Protocol(jwk, schema2=True)
return V2Protocol(jwk)
@pytest.fixture(params=['v1', 'v2_1'])
def legacy_puller(request, data_model, jwk):
if request.param == 'v1':
return V1Protocol(jwk)
return V2Protocol(jwk)
@pytest.fixture(params=['v1', 'v2_1'])
def legacy_pusher(request, data_model, jwk):
if request.param == 'v1':
return V1Protocol(jwk)
return V2Protocol(jwk)
@pytest.fixture(params=['v1', 'v2_1', 'v2_2'])
def puller(request, data_model, jwk):
if request.param == 'v1':
return V1Protocol(jwk)
if request.param == 'v2_2' and data_model == 'oci_model':
return V2Protocol(jwk, schema2=True)
return V2Protocol(jwk)
@pytest.fixture(params=[V1Protocol, V2Protocol])
def loginer(request, jwk):
return request.param(jwk)
@pytest.fixture(scope="session")
def random_layer_data():
size = 4096
contents = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(size))
return layer_bytes_for_contents(contents)

View file

@ -0,0 +1,263 @@
import json
from cStringIO import StringIO
from enum import Enum, unique
from digest.checksums import compute_simple, compute_tarsum
from test.registry.protocols import (RegistryProtocol, Failures, ProtocolOptions, PushResult,
PullResult)
@unique
class V1ProtocolSteps(Enum):
""" Defines the various steps of the protocol, for matching failures. """
PUT_IMAGES = 'put-images'
GET_IMAGES = 'get-images'
PUT_TAG = 'put-tag'
PUT_IMAGE_JSON = 'put-image-json'
DELETE_TAG = 'delete-tag'
GET_TAG = 'get-tag'
GET_LAYER = 'get-layer'
class V1Protocol(RegistryProtocol):
FAILURE_CODES = {
V1ProtocolSteps.PUT_IMAGES: {
Failures.INVALID_AUTHENTICATION: 403,
Failures.UNAUTHENTICATED: 401,
Failures.UNAUTHORIZED: 403,
Failures.APP_REPOSITORY: 405,
Failures.SLASH_REPOSITORY: 404,
Failures.INVALID_REPOSITORY: 400,
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
Failures.NAMESPACE_DISABLED: 400,
Failures.READ_ONLY: 405,
Failures.MIRROR_ONLY: 405,
Failures.MIRROR_MISCONFIGURED: 500,
Failures.MIRROR_ROBOT_MISSING: 400,
Failures.READONLY_REGISTRY: 405,
},
V1ProtocolSteps.GET_IMAGES: {
Failures.INVALID_AUTHENTICATION: 403,
Failures.UNAUTHENTICATED: 403,
Failures.UNAUTHORIZED: 403,
Failures.APP_REPOSITORY: 404,
Failures.ANONYMOUS_NOT_ALLOWED: 401,
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
Failures.NAMESPACE_DISABLED: 400,
},
V1ProtocolSteps.PUT_IMAGE_JSON: {
Failures.INVALID_IMAGES: 400,
Failures.READ_ONLY: 405,
Failures.MIRROR_ONLY: 405,
Failures.MIRROR_MISCONFIGURED: 500,
Failures.MIRROR_ROBOT_MISSING: 400,
Failures.READONLY_REGISTRY: 405,
},
V1ProtocolSteps.PUT_TAG: {
Failures.MISSING_TAG: 404,
Failures.INVALID_TAG: 400,
Failures.INVALID_IMAGES: 400,
Failures.NAMESPACE_DISABLED: 400,
Failures.READ_ONLY: 405,
Failures.MIRROR_ONLY: 405,
Failures.MIRROR_MISCONFIGURED: 500,
Failures.MIRROR_ROBOT_MISSING: 400,
Failures.READONLY_REGISTRY: 405,
},
V1ProtocolSteps.GET_LAYER: {
Failures.GEO_BLOCKED: 403,
},
V1ProtocolSteps.GET_TAG: {
Failures.UNKNOWN_TAG: 404,
},
}
def __init__(self, jwk):
pass
def _auth_for_credentials(self, credentials):
if credentials is None:
return None
return credentials
def ping(self, session):
assert session.get('/v1/_ping').status_code == 200
def login(self, session, username, password, scopes, expect_success):
data = {
'username': username,
'password': password,
}
response = self.conduct(session, 'POST', '/v1/users/', json_data=data, expected_status=400)
assert (response.text == '"Username or email already exists"') == expect_success
def pull(self, session, namespace, repo_name, tag_names, images, credentials=None,
expected_failure=None, options=None):
options = options or ProtocolOptions()
auth = self._auth_for_credentials(credentials)
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
prefix = '/v1/repositories/%s/' % self.repo_name(namespace, repo_name)
# Ping!
self.ping(session)
# GET /v1/repositories/{namespace}/{repository}/images
headers = {'X-Docker-Token': 'true'}
result = self.conduct(session, 'GET', prefix + 'images', auth=auth, headers=headers,
expected_status=(200, expected_failure, V1ProtocolSteps.GET_IMAGES))
if result.status_code != 200:
return
headers = {}
if credentials is not None:
headers['Authorization'] = 'token ' + result.headers['www-authenticate']
else:
assert not 'www-authenticate' in result.headers
# GET /v1/repositories/{namespace}/{repository}/tags
image_ids = self.conduct(session, 'GET', prefix + 'tags', headers=headers).json()
for tag_name in tag_names:
# GET /v1/repositories/{namespace}/{repository}/tags/<tag_name>
image_id_data = self.conduct(session, 'GET', prefix + 'tags/' + tag_name,
headers=headers,
expected_status=(200, expected_failure,
V1ProtocolSteps.GET_TAG))
if tag_name not in image_ids:
assert expected_failure == Failures.UNKNOWN_TAG
return None
tag_image_id = image_ids[tag_name]
assert image_id_data.json() == tag_image_id
# Retrieve the ancestry of the tagged image.
image_prefix = '/v1/images/%s/' % tag_image_id
ancestors = self.conduct(session, 'GET', image_prefix + 'ancestry', headers=headers).json()
assert len(ancestors) == len(images)
for index, image_id in enumerate(reversed(ancestors)):
# /v1/images/{imageID}/{ancestry, json, layer}
image_prefix = '/v1/images/%s/' % image_id
self.conduct(session, 'GET', image_prefix + 'ancestry', headers=headers)
result = self.conduct(session, 'GET', image_prefix + 'json', headers=headers)
assert result.json()['id'] == image_id
# Ensure we can HEAD the image layer.
self.conduct(session, 'HEAD', image_prefix + 'layer', headers=headers)
# And retrieve the layer data.
result = self.conduct(session, 'GET', image_prefix + 'layer', headers=headers,
expected_status=(200, expected_failure, V1ProtocolSteps.GET_LAYER),
options=options)
if result.status_code == 200:
assert result.content == images[index].bytes
return PullResult(manifests=None, image_ids=image_ids)
def push(self, session, namespace, repo_name, tag_names, images, credentials=None,
expected_failure=None, options=None):
auth = self._auth_for_credentials(credentials)
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
# PUT /v1/repositories/{namespace}/{repository}/
result = self.conduct(session, 'PUT',
'/v1/repositories/%s/' % self.repo_name(namespace, repo_name),
expected_status=(201, expected_failure, V1ProtocolSteps.PUT_IMAGES),
json_data={},
auth=auth)
if result.status_code != 201:
return
headers = {}
headers['Authorization'] = 'token ' + result.headers['www-authenticate']
for image in images:
assert image.urls is None
# PUT /v1/images/{imageID}/json
image_json_data = {'id': image.id}
if image.size is not None:
image_json_data['Size'] = image.size
if image.parent_id is not None:
image_json_data['parent'] = image.parent_id
if image.config is not None:
image_json_data['config'] = image.config
if image.created is not None:
image_json_data['created'] = image.created
image_json = json.dumps(image_json_data)
response = self.conduct(session, 'PUT', '/v1/images/%s/json' % image.id,
data=image_json, headers=headers,
expected_status=(200, expected_failure,
V1ProtocolSteps.PUT_IMAGE_JSON))
if response.status_code != 200:
return
# PUT /v1/images/{imageID}/checksum (old style)
old_checksum = compute_tarsum(StringIO(image.bytes), image_json)
checksum_headers = {'X-Docker-Checksum': old_checksum}
checksum_headers.update(headers)
self.conduct(session, 'PUT', '/v1/images/%s/checksum' % image.id,
headers=checksum_headers)
# PUT /v1/images/{imageID}/layer
self.conduct(session, 'PUT', '/v1/images/%s/layer' % image.id,
data=StringIO(image.bytes), headers=headers)
# PUT /v1/images/{imageID}/checksum (new style)
checksum = compute_simple(StringIO(image.bytes), image_json)
checksum_headers = {'X-Docker-Checksum-Payload': checksum}
checksum_headers.update(headers)
self.conduct(session, 'PUT', '/v1/images/%s/checksum' % image.id,
headers=checksum_headers)
# PUT /v1/repositories/{namespace}/{repository}/tags/latest
for tag_name in tag_names:
self.conduct(session, 'PUT',
'/v1/repositories/%s/tags/%s' % (self.repo_name(namespace, repo_name), tag_name),
data='"%s"' % images[-1].id,
headers=headers,
expected_status=(200, expected_failure, V1ProtocolSteps.PUT_TAG))
# PUT /v1/repositories/{namespace}/{repository}/images
self.conduct(session, 'PUT',
'/v1/repositories/%s/images' % self.repo_name(namespace, repo_name),
expected_status=204, headers=headers)
return PushResult(manifests=None, headers=headers)
def delete(self, session, namespace, repo_name, tag_names, credentials=None,
expected_failure=None, options=None):
auth = self._auth_for_credentials(credentials)
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
for tag_name in tag_names:
# DELETE /v1/repositories/{namespace}/{repository}/tags/{tag}
self.conduct(session, 'DELETE',
'/v1/repositories/%s/tags/%s' % (self.repo_name(namespace, repo_name), tag_name),
auth=auth,
expected_status=(200, expected_failure, V1ProtocolSteps.DELETE_TAG))
def tag(self, session, namespace, repo_name, tag_name, image, credentials=None,
expected_failure=None, options=None):
auth = self._auth_for_credentials(credentials)
self.conduct(session, 'PUT',
'/v1/repositories/%s/tags/%s' % (self.repo_name(namespace, repo_name), tag_name),
data='"%s"' % image.id,
auth=auth,
expected_status=(200, expected_failure, V1ProtocolSteps.PUT_TAG))

View file

@ -0,0 +1,705 @@
import hashlib
import json
from enum import Enum, unique
from image.docker.schema1 import (DockerSchema1ManifestBuilder, DockerSchema1Manifest,
DOCKER_SCHEMA1_CONTENT_TYPES)
from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
from image.docker.schema2.config import DockerSchema2Config
from image.docker.schema2.list import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
from image.docker.schemas import parse_manifest_from_bytes
from test.registry.protocols import (RegistryProtocol, Failures, ProtocolOptions, PushResult,
PullResult)
from util.bytes import Bytes
@unique
class V2ProtocolSteps(Enum):
""" Defines the various steps of the protocol, for matching failures. """
AUTH = 'auth'
BLOB_HEAD_CHECK = 'blob-head-check'
GET_MANIFEST = 'get-manifest'
GET_MANIFEST_LIST = 'get-manifest-list'
PUT_MANIFEST = 'put-manifest'
PUT_MANIFEST_LIST = 'put-manifest-list'
MOUNT_BLOB = 'mount-blob'
CATALOG = 'catalog'
LIST_TAGS = 'list-tags'
START_UPLOAD = 'start-upload'
GET_BLOB = 'get-blob'
class V2Protocol(RegistryProtocol):
FAILURE_CODES = {
V2ProtocolSteps.AUTH: {
Failures.UNAUTHENTICATED: 401,
Failures.INVALID_AUTHENTICATION: 401,
Failures.INVALID_REGISTRY: 400,
Failures.APP_REPOSITORY: 405,
Failures.ANONYMOUS_NOT_ALLOWED: 401,
Failures.INVALID_REPOSITORY: 400,
Failures.SLASH_REPOSITORY: 400,
Failures.NAMESPACE_DISABLED: 405,
},
V2ProtocolSteps.MOUNT_BLOB: {
Failures.UNAUTHORIZED_FOR_MOUNT: 202,
Failures.READONLY_REGISTRY: 405,
},
V2ProtocolSteps.GET_MANIFEST: {
Failures.UNKNOWN_TAG: 404,
Failures.UNAUTHORIZED: 401,
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
Failures.ANONYMOUS_NOT_ALLOWED: 401,
},
V2ProtocolSteps.GET_BLOB: {
Failures.GEO_BLOCKED: 403,
},
V2ProtocolSteps.BLOB_HEAD_CHECK: {
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
},
V2ProtocolSteps.START_UPLOAD: {
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
Failures.READ_ONLY: 401,
Failures.MIRROR_ONLY: 401,
Failures.MIRROR_MISCONFIGURED: 401,
Failures.MIRROR_ROBOT_MISSING: 401,
Failures.READ_ONLY: 401,
Failures.READONLY_REGISTRY: 405,
},
V2ProtocolSteps.PUT_MANIFEST: {
Failures.DISALLOWED_LIBRARY_NAMESPACE: 400,
Failures.MISSING_TAG: 404,
Failures.INVALID_TAG: 404,
Failures.INVALID_IMAGES: 400,
Failures.INVALID_BLOB: 400,
Failures.UNSUPPORTED_CONTENT_TYPE: 415,
Failures.READ_ONLY: 401,
Failures.MIRROR_ONLY: 401,
Failures.MIRROR_MISCONFIGURED: 401,
Failures.MIRROR_ROBOT_MISSING: 401,
Failures.READONLY_REGISTRY: 405,
},
V2ProtocolSteps.PUT_MANIFEST_LIST: {
Failures.INVALID_MANIFEST: 400,
Failures.READ_ONLY: 401,
Failures.MIRROR_ONLY: 401,
Failures.MIRROR_MISCONFIGURED: 401,
Failures.MIRROR_ROBOT_MISSING: 401,
Failures.READONLY_REGISTRY: 405,
}
}
def __init__(self, jwk, schema2=False):
self.jwk = jwk
self.schema2 = schema2
def ping(self, session):
result = session.get('/v2/')
assert result.status_code == 401
assert result.headers['Docker-Distribution-API-Version'] == 'registry/2.0'
def login(self, session, username, password, scopes, expect_success):
scopes = scopes if isinstance(scopes, list) else [scopes]
params = {
'account': username,
'service': 'localhost:5000',
'scope': scopes,
}
auth = (username, password)
if not username or not password:
auth = None
response = session.get('/v2/auth', params=params, auth=auth)
if expect_success:
assert response.status_code / 100 == 2
else:
assert response.status_code / 100 == 4
return response
def auth(self, session, credentials, namespace, repo_name, scopes=None,
expected_failure=None):
"""
Performs the V2 Auth flow, returning the token (if any) and the response.
Spec: https://docs.docker.com/registry/spec/auth/token/
"""
scopes = scopes or []
auth = None
username = None
if credentials is not None:
username, _ = credentials
auth = credentials
params = {
'account': username,
'service': 'localhost:5000',
}
if scopes:
params['scope'] = scopes
response = self.conduct(session, 'GET', '/v2/auth', params=params, auth=auth,
expected_status=(200, expected_failure, V2ProtocolSteps.AUTH))
expect_token = (expected_failure is None or
not V2Protocol.FAILURE_CODES[V2ProtocolSteps.AUTH].get(expected_failure))
if expect_token:
assert response.json().get('token') is not None
return response.json().get('token'), response
return None, response
def pull_list(self, session, namespace, repo_name, tag_names, manifestlist,
credentials=None, expected_failure=None, options=None):
options = options or ProtocolOptions()
scopes = options.scopes or ['repository:%s:push,pull' % self.repo_name(namespace, repo_name)]
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
expected_failure=expected_failure)
if token is None:
assert V2Protocol.FAILURE_CODES[V2ProtocolSteps.AUTH].get(expected_failure)
return
headers = {
'Authorization': 'Bearer ' + token,
'Accept': ','.join(DOCKER_SCHEMA2_CONTENT_TYPES),
}
for tag_name in tag_names:
# Retrieve the manifest for the tag or digest.
response = self.conduct(session, 'GET',
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name),
tag_name),
expected_status=(200, expected_failure,
V2ProtocolSteps.GET_MANIFEST_LIST),
headers=headers)
if expected_failure is not None:
return None
# Parse the returned manifest list and ensure it matches.
ct = response.headers['Content-Type']
assert ct == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
retrieved = parse_manifest_from_bytes(Bytes.for_string_or_unicode(response.text), ct)
assert retrieved.schema_version == 2
assert retrieved.is_manifest_list
assert retrieved.digest == manifestlist.digest
# Pull each of the manifests inside and ensure they can be retrieved.
for manifest_digest in retrieved.child_manifest_digests():
response = self.conduct(session, 'GET',
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name),
manifest_digest),
expected_status=(200, expected_failure,
V2ProtocolSteps.GET_MANIFEST),
headers=headers)
if expected_failure is not None:
return None
ct = response.headers['Content-Type']
manifest = parse_manifest_from_bytes(Bytes.for_string_or_unicode(response.text), ct)
assert not manifest.is_manifest_list
assert manifest.digest == manifest_digest
def push_list(self, session, namespace, repo_name, tag_names, manifestlist, manifests, blobs,
credentials=None, expected_failure=None, options=None):
options = options or ProtocolOptions()
scopes = options.scopes or ['repository:%s:push,pull' % self.repo_name(namespace, repo_name)]
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
expected_failure=expected_failure)
if token is None:
assert V2Protocol.FAILURE_CODES[V2ProtocolSteps.AUTH].get(expected_failure)
return
headers = {
'Authorization': 'Bearer ' + token,
'Accept': ','.join(options.accept_mimetypes) if options.accept_mimetypes is not None else '*/*',
}
# Push all blobs.
if not self._push_blobs(blobs, session, namespace, repo_name, headers, options,
expected_failure):
return
# Push the individual manifests.
for manifest in manifests:
manifest_headers = {'Content-Type': manifest.media_type}
manifest_headers.update(headers)
self.conduct(session, 'PUT',
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name), manifest.digest),
data=manifest.bytes.as_encoded_str(),
expected_status=(202, expected_failure, V2ProtocolSteps.PUT_MANIFEST),
headers=manifest_headers)
# Push the manifest list.
for tag_name in tag_names:
manifest_headers = {'Content-Type': manifestlist.media_type}
manifest_headers.update(headers)
if options.manifest_content_type is not None:
manifest_headers['Content-Type'] = options.manifest_content_type
self.conduct(session, 'PUT',
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name), tag_name),
data=manifestlist.bytes.as_encoded_str(),
expected_status=(202, expected_failure, V2ProtocolSteps.PUT_MANIFEST_LIST),
headers=manifest_headers)
return PushResult(manifests=None, headers=headers)
def build_schema2(self, images, blobs, options):
builder = DockerSchema2ManifestBuilder()
for image in images:
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
if image.urls is None:
blobs[checksum] = image.bytes
# If invalid blob references were requested, just make it up.
if options.manifest_invalid_blob_references:
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
if not image.is_empty:
builder.add_layer(checksum, len(image.bytes), urls=image.urls)
def history_for_image(image):
history = {
'created': '2018-04-03T18:37:09.284840891Z',
'created_by': (('/bin/sh -c #(nop) ENTRYPOINT %s' % image.config['Entrypoint'])
if image.config and image.config.get('Entrypoint')
else '/bin/sh -c #(nop) %s' % image.id),
}
if image.is_empty:
history['empty_layer'] = True
return history
config = {
"os": "linux",
"rootfs": {
"type": "layers",
"diff_ids": []
},
"history": [history_for_image(image) for image in images],
}
if images[-1].config:
config['config'] = images[-1].config
config_json = json.dumps(config, ensure_ascii=options.ensure_ascii)
schema2_config = DockerSchema2Config(Bytes.for_string_or_unicode(config_json))
builder.set_config(schema2_config)
blobs[schema2_config.digest] = schema2_config.bytes.as_encoded_str()
return builder.build(ensure_ascii=options.ensure_ascii)
def build_schema1(self, namespace, repo_name, tag_name, images, blobs, options, arch='amd64'):
builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name, arch)
for image in reversed(images):
assert image.urls is None
checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest()
blobs[checksum] = image.bytes
# If invalid blob references were requested, just make it up.
if options.manifest_invalid_blob_references:
checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest()
layer_dict = {'id': image.id, 'parent': image.parent_id}
if image.config is not None:
layer_dict['config'] = image.config
if image.size is not None:
layer_dict['Size'] = image.size
if image.created is not None:
layer_dict['created'] = image.created
builder.add_layer(checksum, json.dumps(layer_dict, ensure_ascii=options.ensure_ascii))
# Build the manifest.
built = builder.build(self.jwk, ensure_ascii=options.ensure_ascii)
# Validate it before we send it.
DockerSchema1Manifest(built.bytes)
return built
def push(self, session, namespace, repo_name, tag_names, images, credentials=None,
expected_failure=None, options=None):
options = options or ProtocolOptions()
scopes = options.scopes or ['repository:%s:push,pull' % self.repo_name(namespace, repo_name)]
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
expected_failure=expected_failure)
if token is None:
assert V2Protocol.FAILURE_CODES[V2ProtocolSteps.AUTH].get(expected_failure)
return
headers = {
'Authorization': 'Bearer ' + token,
'Accept': ','.join(options.accept_mimetypes) if options.accept_mimetypes is not None else '*/*',
}
# Build fake manifests.
manifests = {}
blobs = {}
for tag_name in tag_names:
if self.schema2:
manifests[tag_name] = self.build_schema2(images, blobs, options)
else:
manifests[tag_name] = self.build_schema1(namespace, repo_name, tag_name, images, blobs,
options)
# Push the blob data.
if not self._push_blobs(blobs, session, namespace, repo_name, headers, options,
expected_failure):
return
# Write a manifest for each tag.
for tag_name in tag_names:
manifest = manifests[tag_name]
# Write the manifest. If we expect it to be invalid, we expect a 404 code. Otherwise, we
# expect a 202 response for success.
put_code = 404 if options.manifest_invalid_blob_references else 202
manifest_headers = {'Content-Type': manifest.media_type}
manifest_headers.update(headers)
if options.manifest_content_type is not None:
manifest_headers['Content-Type'] = options.manifest_content_type
tag_or_digest = tag_name if not options.push_by_manifest_digest else manifest.digest
self.conduct(session, 'PUT',
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name), tag_or_digest),
data=manifest.bytes.as_encoded_str(),
expected_status=(put_code, expected_failure, V2ProtocolSteps.PUT_MANIFEST),
headers=manifest_headers)
return PushResult(manifests=manifests, headers=headers)
def _push_blobs(self, blobs, session, namespace, repo_name, headers, options, expected_failure):
for blob_digest, blob_bytes in blobs.iteritems():
if not options.skip_head_checks:
# Blob data should not yet exist.
self.conduct(session, 'HEAD',
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), blob_digest),
expected_status=(404, expected_failure, V2ProtocolSteps.BLOB_HEAD_CHECK),
headers=headers)
# Check for mounting of blobs.
if options.mount_blobs and blob_digest in options.mount_blobs:
self.conduct(session, 'POST',
'/v2/%s/blobs/uploads/' % self.repo_name(namespace, repo_name),
params={
'mount': blob_digest,
'from': options.mount_blobs[blob_digest],
},
expected_status=(201, expected_failure, V2ProtocolSteps.MOUNT_BLOB),
headers=headers)
if expected_failure is not None:
return
else:
# Start a new upload of the blob data.
response = self.conduct(session, 'POST',
'/v2/%s/blobs/uploads/' % self.repo_name(namespace, repo_name),
expected_status=(202, expected_failure,
V2ProtocolSteps.START_UPLOAD),
headers=headers)
if response.status_code != 202:
continue
upload_uuid = response.headers['Docker-Upload-UUID']
new_upload_location = response.headers['Location']
assert new_upload_location.startswith('http://localhost:5000')
# We need to make this relative just for the tests because the live server test
# case modifies the port.
location = response.headers['Location'][len('http://localhost:5000'):]
# PATCH the data into the blob.
if options.chunks_for_upload is None:
self.conduct(session, 'PATCH', location, data=blob_bytes, expected_status=204,
headers=headers)
else:
# If chunked upload is requested, upload the data as a series of chunks, checking
# status at every point.
for chunk_data in options.chunks_for_upload:
if len(chunk_data) == 3:
(start_byte, end_byte, expected_code) = chunk_data
else:
(start_byte, end_byte) = chunk_data
expected_code = 204
patch_headers = {'Range': 'bytes=%s-%s' % (start_byte, end_byte)}
patch_headers.update(headers)
contents_chunk = blob_bytes[start_byte:end_byte]
self.conduct(session, 'PATCH', location, data=contents_chunk,
expected_status=expected_code,
headers=patch_headers)
if expected_code != 204:
return False
# Retrieve the upload status at each point, and ensure it is valid.
status_url = '/v2/%s/blobs/uploads/%s' % (self.repo_name(namespace, repo_name),
upload_uuid)
response = self.conduct(session, 'GET', status_url, expected_status=204,
headers=headers)
assert response.headers['Docker-Upload-UUID'] == upload_uuid
assert response.headers['Range'] == "bytes=0-%s" % end_byte
if options.cancel_blob_upload:
self.conduct(session, 'DELETE', location, params=dict(digest=blob_digest),
expected_status=204, headers=headers)
# Ensure the upload was canceled.
status_url = '/v2/%s/blobs/uploads/%s' % (self.repo_name(namespace, repo_name),
upload_uuid)
self.conduct(session, 'GET', status_url, expected_status=404, headers=headers)
return False
# Finish the blob upload with a PUT.
response = self.conduct(session, 'PUT', location, params=dict(digest=blob_digest),
expected_status=201, headers=headers)
assert response.headers['Docker-Content-Digest'] == blob_digest
# Ensure the blob exists now.
response = self.conduct(session, 'HEAD',
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name),
blob_digest),
expected_status=200, headers=headers)
assert response.headers['Docker-Content-Digest'] == blob_digest
assert response.headers['Content-Length'] == str(len(blob_bytes))
# And retrieve the blob data.
if not options.skip_blob_push_checks:
result = self.conduct(session, 'GET',
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name), blob_digest),
headers=headers, expected_status=200)
assert result.content == blob_bytes
return True
def delete(self, session, namespace, repo_name, tag_names, credentials=None,
expected_failure=None, options=None):
options = options or ProtocolOptions()
scopes = options.scopes or ['repository:%s:*' % self.repo_name(namespace, repo_name)]
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
expected_failure=expected_failure)
if token is None:
return None
headers = {
'Authorization': 'Bearer ' + token,
}
for tag_name in tag_names:
self.conduct(session, 'DELETE',
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name), tag_name),
headers=headers,
expected_status=202)
def pull(self, session, namespace, repo_name, tag_names, images, credentials=None,
expected_failure=None, options=None):
options = options or ProtocolOptions()
scopes = options.scopes or ['repository:%s:pull' % self.repo_name(namespace, repo_name)]
tag_names = [tag_names] if isinstance(tag_names, str) else tag_names
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
expected_failure=expected_failure)
if token is None and not options.attempt_pull_without_token:
return None
headers = {}
if token:
headers = {
'Authorization': 'Bearer ' + token,
}
if self.schema2:
headers['Accept'] = ','.join(options.accept_mimetypes
if options.accept_mimetypes is not None
else DOCKER_SCHEMA2_CONTENT_TYPES)
manifests = {}
image_ids = {}
for tag_name in tag_names:
# Retrieve the manifest for the tag or digest.
response = self.conduct(session, 'GET',
'/v2/%s/manifests/%s' % (self.repo_name(namespace, repo_name),
tag_name),
expected_status=(200, expected_failure, V2ProtocolSteps.GET_MANIFEST),
headers=headers)
if response.status_code == 401:
assert 'WWW-Authenticate' in response.headers
response.encoding = 'utf-8'
if expected_failure is not None:
return None
# Ensure the manifest returned by us is valid.
ct = response.headers['Content-Type']
if not self.schema2:
assert ct in DOCKER_SCHEMA1_CONTENT_TYPES
manifest = parse_manifest_from_bytes(Bytes.for_string_or_unicode(response.text), ct)
manifests[tag_name] = manifest
if manifest.schema_version == 1:
image_ids[tag_name] = manifest.leaf_layer_v1_image_id
# Verify the blobs.
layer_index = 0
empty_count = 0
blob_digests = list(manifest.blob_digests)
for image in images:
if manifest.schema_version == 2 and image.is_empty:
empty_count += 1
continue
# If the layer is remote, then we expect the blob to *not* exist in the system.
blob_digest = blob_digests[layer_index]
expected_status = 404 if image.urls else 200
result = self.conduct(session, 'GET',
'/v2/%s/blobs/%s' % (self.repo_name(namespace, repo_name),
blob_digest),
expected_status=(expected_status, expected_failure,
V2ProtocolSteps.GET_BLOB),
headers=headers,
options=options)
if expected_status == 200:
assert result.content == image.bytes
layer_index += 1
assert (len(blob_digests) + empty_count) >= len(images) # Schema 2 has 1 extra for config
return PullResult(manifests=manifests, image_ids=image_ids)
def tags(self, session, namespace, repo_name, page_size=2, credentials=None, options=None,
expected_failure=None):
options = options or ProtocolOptions()
scopes = options.scopes or ['repository:%s:pull' % self.repo_name(namespace, repo_name)]
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
headers = {}
if credentials is not None:
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
expected_failure=expected_failure)
if token is None:
return None
headers = {
'Authorization': 'Bearer ' + token,
}
results = []
url = '/v2/%s/tags/list' % (self.repo_name(namespace, repo_name))
params = {}
if page_size is not None:
params['n'] = page_size
while True:
response = self.conduct(session, 'GET', url, headers=headers, params=params,
expected_status=(200, expected_failure, V2ProtocolSteps.LIST_TAGS))
data = response.json()
assert len(data['tags']) <= page_size
results.extend(data['tags'])
if not response.headers.get('Link'):
return results
link_url = response.headers['Link']
v2_index = link_url.find('/v2/')
url = link_url[v2_index:]
return results
def catalog(self, session, page_size=2, credentials=None, options=None, expected_failure=None,
namespace=None, repo_name=None, bearer_token=None):
options = options or ProtocolOptions()
scopes = options.scopes or []
# Ping!
self.ping(session)
# Perform auth and retrieve a token.
headers = {}
if credentials is not None:
token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes,
expected_failure=expected_failure)
if token is None:
return None
headers = {
'Authorization': 'Bearer ' + token,
}
if bearer_token is not None:
headers = {
'Authorization': 'Bearer ' + bearer_token,
}
results = []
url = '/v2/_catalog'
params = {}
if page_size is not None:
params['n'] = page_size
while True:
response = self.conduct(session, 'GET', url, headers=headers, params=params,
expected_status=(200, expected_failure, V2ProtocolSteps.CATALOG))
data = response.json()
assert len(data['repositories']) <= page_size
results.extend(data['repositories'])
if not response.headers.get('Link'):
return results
link_url = response.headers['Link']
v2_index = link_url.find('/v2/')
url = link_url[v2_index:]
return results

148
test/registry/protocols.py Normal file
View file

@ -0,0 +1,148 @@
import json
import tarfile
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from cStringIO import StringIO
from enum import Enum, unique
from six import add_metaclass
from image.docker.schema2 import EMPTY_LAYER_BYTES
Image = namedtuple('Image', ['id', 'parent_id', 'bytes', 'size', 'config', 'created', 'urls',
'is_empty'])
Image.__new__.__defaults__ = (None, None, None, None, False)
PushResult = namedtuple('PushResult', ['manifests', 'headers'])
PullResult = namedtuple('PullResult', ['manifests', 'image_ids'])
def layer_bytes_for_contents(contents, mode='|gz', other_files=None, empty=False):
if empty:
return EMPTY_LAYER_BYTES
layer_data = StringIO()
tar_file = tarfile.open(fileobj=layer_data, mode='w' + mode)
def add_file(name, contents):
tar_file_info = tarfile.TarInfo(name=name)
tar_file_info.type = tarfile.REGTYPE
tar_file_info.size = len(contents)
tar_file_info.mtime = 1
tar_file.addfile(tar_file_info, StringIO(contents))
add_file('contents', contents)
if other_files is not None:
for file_name, file_contents in other_files.iteritems():
add_file(file_name, file_contents)
tar_file.close()
layer_bytes = layer_data.getvalue()
layer_data.close()
return layer_bytes
@unique
class Failures(Enum):
""" Defines the various forms of expected failure. """
UNAUTHENTICATED = 'unauthenticated'
UNAUTHORIZED = 'unauthorized'
INVALID_AUTHENTICATION = 'invalid-authentication'
INVALID_REGISTRY = 'invalid-registry'
INVALID_REPOSITORY = 'invalid-repository'
SLASH_REPOSITORY = 'slash-repository'
APP_REPOSITORY = 'app-repository'
UNKNOWN_TAG = 'unknown-tag'
ANONYMOUS_NOT_ALLOWED = 'anonymous-not-allowed'
DISALLOWED_LIBRARY_NAMESPACE = 'disallowed-library-namespace'
MISSING_TAG = 'missing-tag'
INVALID_TAG = 'invalid-tag'
INVALID_MANIFEST = 'invalid-manifest'
INVALID_IMAGES = 'invalid-images'
UNSUPPORTED_CONTENT_TYPE = 'unsupported-content-type'
INVALID_BLOB = 'invalid-blob'
NAMESPACE_DISABLED = 'namespace-disabled'
UNAUTHORIZED_FOR_MOUNT = 'unauthorized-for-mount'
GEO_BLOCKED = 'geo-blocked'
READ_ONLY = 'read-only'
MIRROR_ONLY = 'mirror-only'
MIRROR_MISCONFIGURED = 'mirror-misconfigured'
MIRROR_ROBOT_MISSING = 'mirror-robot-missing'
READONLY_REGISTRY = 'readonly-registry'
class ProtocolOptions(object):
def __init__(self):
self.scopes = None
self.cancel_blob_upload = False
self.manifest_invalid_blob_references = False
self.chunks_for_upload = None
self.skip_head_checks = False
self.manifest_content_type = None
self.accept_mimetypes = None
self.mount_blobs = None
self.push_by_manifest_digest = False
self.request_addr = None
self.skip_blob_push_checks = False
self.ensure_ascii = True
self.attempt_pull_without_token = False
@add_metaclass(ABCMeta)
class RegistryProtocol(object):
""" Interface for protocols. """
FAILURE_CODES = {}
@abstractmethod
def login(self, session, username, password, scopes, expect_success):
""" Performs the login flow with the given credentials, over the given scopes. """
@abstractmethod
def pull(self, session, namespace, repo_name, tag_names, images, credentials=None,
expected_failure=None, options=None):
""" Pulls the given tag via the given session, using the given credentials, and
ensures the given images match.
"""
@abstractmethod
def push(self, session, namespace, repo_name, tag_names, images, credentials=None,
expected_failure=None, options=None):
""" Pushes the specified images as the given tag via the given session, using
the given credentials.
"""
@abstractmethod
def delete(self, session, namespace, repo_name, tag_names, credentials=None,
expected_failure=None, options=None):
""" Deletes some tags. """
def repo_name(self, namespace, repo_name):
if namespace:
return '%s/%s' % (namespace, repo_name)
return repo_name
def conduct(self, session, method, url, expected_status=200, params=None, data=None,
json_data=None, headers=None, auth=None, options=None):
if json_data is not None:
data = json.dumps(json_data).encode('utf-8')
headers = headers or {}
headers['Content-Type'] = 'application/json'
if options and options.request_addr:
headers = headers or {}
headers['X-Override-Remote-Addr-For-Testing'] = options.request_addr
if isinstance(expected_status, tuple):
expected_status, expected_failure, protocol_step = expected_status
if expected_failure is not None:
failures = self.__class__.FAILURE_CODES.get(protocol_step, {})
expected_status = failures.get(expected_failure, expected_status)
result = session.request(method, url, params=params, data=data, headers=headers, auth=auth)
msg = "Expected response %s, got %s: %s" % (expected_status, result.status_code, result.text)
assert result.status_code == expected_status, msg
return result

File diff suppressed because it is too large Load diff