From 23c19bcbc1e68c2a02fafa68e1a17b79358805f5 Mon Sep 17 00:00:00 2001 From: Joseph Schorr Date: Tue, 24 Apr 2018 14:54:07 +0300 Subject: [PATCH] Implement support for registry integration tests via py.test This change implements support for registry integration tests using the new py.test-based live server test fixture. We can now parametrize the protocols we use (in prep for V2_2), and it makes the code *much* cleaner and less hacky. Note that moving the vast majority of the tests over from the existing impl will come as a followup PR --- test/fixtures.py | 2 + test/registry/fixtures.py | 154 ++++++++++++++++++ test/registry/protocol_fixtures.py | 35 ++++ test/registry/protocol_v1.py | 144 +++++++++++++++++ test/registry/protocol_v2.py | 246 +++++++++++++++++++++++++++++ test/registry/protocols.py | 89 +++++++++++ test/registry/registry_tests.py | 35 ++++ 7 files changed, 705 insertions(+) create mode 100644 test/registry/fixtures.py create mode 100644 test/registry/protocol_fixtures.py create mode 100644 test/registry/protocol_v1.py create mode 100644 test/registry/protocol_v2.py create mode 100644 test/registry/protocols.py create mode 100644 test/registry/registry_tests.py diff --git a/test/fixtures.py b/test/fixtures.py index 67a973992..b07e63a80 100644 --- a/test/fixtures.py +++ b/test/fixtures.py @@ -15,6 +15,7 @@ from data.model.user import LoginWrappedDBUser from endpoints.api import api_bp from endpoints.appr import appr_bp from endpoints.web import web +from endpoints.v1 import v1_bp from endpoints.v2 import v2_bp from endpoints.verbs import verbs as verbs_bp from endpoints.webhooks import webhooks @@ -175,6 +176,7 @@ def app(appconfig, initialized_db): app.register_blueprint(appr_bp, url_prefix='/cnr') app.register_blueprint(web, url_prefix='/') app.register_blueprint(verbs_bp, url_prefix='/c1') + app.register_blueprint(v1_bp, url_prefix='/v1') app.register_blueprint(v2_bp, url_prefix='/v2') app.register_blueprint(webhooks, url_prefix='/webhooks') diff --git a/test/registry/fixtures.py b/test/registry/fixtures.py new file mode 100644 index 000000000..abfb4d95c --- /dev/null +++ b/test/registry/fixtures.py @@ -0,0 +1,154 @@ +import copy +import logging.config +import os +import shutil + +from tempfile import NamedTemporaryFile + +import pytest + +from Crypto import Random +from flask import jsonify, g +from flask_principal import Identity + +from app import storage +from data.database import close_db_filter, configure, DerivedStorageForImage, QueueItem, Image +from data import model +from endpoints.csrf import generate_csrf_token +from util.log import logfile_path + +from test.registry.liveserverfixture import LiveServerExecutor + + +@pytest.fixture() +def registry_server_executor(app): + def generate_csrf(): + return generate_csrf_token() + + def set_supports_direct_download(enabled): + storage.put_content(['local_us'], 'supports_direct_download', enabled) + return 'OK' + + def delete_image(image_id): + image = Image.get(docker_image_id=image_id) + image.docker_image_id = 'DELETED' + image.save() + return 'OK' + + def get_storage_replication_entry(image_id): + image = Image.get(docker_image_id=image_id) + QueueItem.select().where(QueueItem.queue_name ** ('%' + image.storage.uuid + '%')).get() + return 'OK' + + def set_feature(feature_name, value): + import features + old_value = features._FEATURES[feature_name].value + features._FEATURES[feature_name].value = value + return jsonify({'old_value': old_value}) + + def clear_derived_cache(): + DerivedStorageForImage.delete().execute() + return 'OK' + + def clear_uncompressed_size(image_id): + image = model.image.get_image_by_id('devtable', 'newrepo', image_id) + image.storage.uncompressed_size = None + image.storage.save() + return 'OK' + + def add_token(): + another_token = model.token.create_delegate_token('devtable', 'newrepo', 'my-new-token', + 'write') + another_token.code = 'somecooltokencode' + another_token.save() + return 'OK' + + def break_database(): + # Close any existing connection. + close_db_filter(None) + + # Reload the database config with an invalid connection. + config = copy.copy(app.config) + config['DB_URI'] = 'sqlite:///not/a/valid/database' + configure(config) + + return 'OK' + + def reload_app(server_hostname): + # Close any existing connection. + close_db_filter(None) + + # Reload the database config. + app.config['SERVER_HOSTNAME'] = server_hostname[len('http://'):] + configure(app.config) + + # Reload random after the process split, as it cannot be used uninitialized across forks. + Random.atfork() + + # Required for anonymous calls to not exception. + g.identity = Identity(None, 'none') + + if os.environ.get('DEBUGLOG') == 'true': + logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) + + return 'OK' + + executor = LiveServerExecutor() + executor.register('generate_csrf', generate_csrf) + executor.register('set_supports_direct_download', set_supports_direct_download) + executor.register('delete_image', delete_image) + executor.register('get_storage_replication_entry', get_storage_replication_entry) + executor.register('set_feature', set_feature) + executor.register('clear_derived_cache', clear_derived_cache) + executor.register('clear_uncompressed_size', clear_uncompressed_size) + executor.register('add_token', add_token) + executor.register('break_database', break_database) + executor.register('reload_app', reload_app) + return executor + + +@pytest.fixture() +def liveserver_app(app, registry_server_executor, init_db_path): + registry_server_executor.apply_blueprint_to_app(app) + + if os.environ.get('DEBUG', 'false').lower() == 'true': + app.config['DEBUG'] = True + + # Copy the clean database to a new path. We cannot share the DB created by the + # normal app fixture, as it is already open in the local process. + local_db_file = NamedTemporaryFile(delete=True) + local_db_file.close() + + shutil.copy2(init_db_path, local_db_file.name) + app.config['DB_URI'] = 'sqlite:///{0}'.format(local_db_file.name) + return app + + +@pytest.fixture() +def app_reloader(liveserver, registry_server_executor): + registry_server_executor.on(liveserver).reload_app(liveserver.url) + yield + + +class FeatureFlagValue(object): + """ Helper object which temporarily sets the value of a feature flag. + + Usage: + + with FeatureFlagValue('ANONYMOUS_ACCESS', False, registry_server_executor.on(liveserver)): + ... Features.ANONYMOUS_ACCESS is False in this context ... + """ + + def __init__(self, feature_flag, test_value, executor): + self.feature_flag = feature_flag + self.test_value = test_value + self.executor = executor + + self.old_value = None + + def __enter__(self): + result = self.executor.set_feature(self.feature_flag, self.test_value) + self.old_value = result.json['old_value'] + + def __exit__(self, type, value, traceback): + self.executor.set_feature(self.feature_flag, self.old_value) diff --git a/test/registry/protocol_fixtures.py b/test/registry/protocol_fixtures.py new file mode 100644 index 000000000..0c678b85a --- /dev/null +++ b/test/registry/protocol_fixtures.py @@ -0,0 +1,35 @@ +import pytest + +from Crypto.PublicKey import RSA +from jwkest.jwk import RSAKey + +from test.registry.protocols import Image, layer_bytes_for_contents +from test.registry.protocol_v1 import V1Protocol +from test.registry.protocol_v2 import V2Protocol + + +@pytest.fixture(scope="session") +def basic_images(): + """ Returns basic images for push and pull testing. """ + # Note: order is from base layer down to leaf. + return [ + Image(id='parentid', bytes=layer_bytes_for_contents('parent contents'), + parent_id=None, size=None), + Image(id='someid', bytes=layer_bytes_for_contents('some contents'), + parent_id='parentid', size=None), + ] + + +@pytest.fixture(scope="session") +def jwk(): + return RSAKey(key=RSA.generate(2048)) + + +@pytest.fixture(params=[V1Protocol, V2Protocol]) +def pusher(request, jwk): + return request.param(jwk) + + +@pytest.fixture(params=[V1Protocol, V2Protocol]) +def puller(request, jwk): + return request.param(jwk) diff --git a/test/registry/protocol_v1.py b/test/registry/protocol_v1.py new file mode 100644 index 000000000..f38aa6bb2 --- /dev/null +++ b/test/registry/protocol_v1.py @@ -0,0 +1,144 @@ +import json + +from cStringIO import StringIO +from enum import Enum, unique + +from digest.checksums import compute_simple +from test.registry.protocols import (RegistryProtocol, Failures, ProtocolOptions, PushResult, + PullResult) + +@unique +class V1ProtocolSteps(Enum): + """ Defines the various steps of the protocol, for matching failures. """ + PUT_IMAGES = 'put-images' + GET_IMAGES = 'get-images' + + +class V1Protocol(RegistryProtocol): + FAILURE_CODES = { + V1ProtocolSteps.PUT_IMAGES: { + Failures.UNAUTHENTICATED: 401, + Failures.UNAUTHORIZED: 403, + }, + V1ProtocolSteps.GET_IMAGES: { + Failures.UNAUTHENTICATED: 403, + Failures.UNAUTHORIZED: 403, + }, + } + + def __init__(self, jwk): + pass + + def _auth_for_credentials(self, credentials): + if credentials is None: + return None + + return credentials + + def ping(self, session): + assert session.get('/v1/_ping').status_code == 200 + + def pull(self, session, namespace, repo_name, tag_names, images, credentials=None, + expected_failure=None, options=None): + options = options or ProtocolOptions() + auth = self._auth_for_credentials(credentials) + tag_names = [tag_names] if isinstance(tag_names, str) else tag_names + prefix = '/v1/repositories/%s/%s/' % (namespace, repo_name) + + # Ping! + self.ping(session) + + # GET /v1/repositories/{namespace}/{repository}/images + result = self.conduct(session, 'GET', prefix + 'images', auth=auth, + expected_status=(200, expected_failure, V1ProtocolSteps.GET_IMAGES)) + if expected_failure is not None: + return + + # GET /v1/repositories/{namespace}/{repository}/tags + tags_result = self.conduct(session, 'GET', prefix + 'tags', auth=auth).json() + assert len(tags_result.values()) == len(tag_names) + + tag_image_id = tags_result['latest'] + if not options.munge_shas: + # Ensure we have a matching image ID. + known_ids = {image.id for image in images} + assert tag_image_id in known_ids + + # Retrieve the ancestry of the tagged image. + image_prefix = '/v1/images/%s/' % tag_image_id + ancestors = self.conduct(session, 'GET', image_prefix + 'ancestry', auth=auth).json() + + assert len(ancestors) == len(images) + for index, image_id in enumerate(reversed(ancestors)): + # /v1/images/{imageID}/{ancestry, json, layer} + image_prefix = '/v1/images/%s/' % image_id + self.conduct(session, 'GET', image_prefix + 'ancestry', auth=auth) + + result = self.conduct(session, 'GET', image_prefix + 'json', auth=auth) + assert result.json()['id'] == image_id + + # Ensure we can HEAD the image layer. + self.conduct(session, 'HEAD', image_prefix + 'layer', auth=auth) + + # And retrieve the layer data. + result = self.conduct(session, 'GET', image_prefix + 'layer', auth=auth) + assert result.content == images[index].bytes + + return PullResult(manifests=None) + + def push(self, session, namespace, repo_name, tag_names, images, credentials=None, + expected_failure=None, options=None): + auth = self._auth_for_credentials(credentials) + tag_names = [tag_names] if isinstance(tag_names, str) else tag_names + + # Ping! + self.ping(session) + + # PUT /v1/repositories/{namespace}/{repository}/ + result = self.conduct(session, 'PUT', '/v1/repositories/%s/%s/' % (namespace, repo_name), + expected_status=(201, expected_failure, V1ProtocolSteps.PUT_IMAGES), + json_data={}, + auth=auth) + + if expected_failure is not None: + return + + headers = {} + headers['Authorization'] = 'token ' + result.headers['www-authenticate'] + + for image in images: + # PUT /v1/images/{imageID}/json + image_json_data = {'id': image.id} + if image.size is not None: + image_json_data['Size'] = image.size + + if image.parent_id is not None: + image_json_data['parent'] = image.parent_id + + self.conduct(session, 'PUT', '/v1/images/%s/json' % image.id, + json_data=image_json_data, headers=headers) + + # PUT /v1/images/{imageID}/layer + self.conduct(session, 'PUT', '/v1/images/%s/layer' % image.id, + data=StringIO(image.bytes), headers=headers) + + # PUT /v1/images/{imageID}/checksum + checksum = compute_simple(StringIO(image.bytes), json.dumps(image_json_data)) + checksum_headers = {'X-Docker-Checksum-Payload': checksum} + checksum_headers.update(headers) + + self.conduct(session, 'PUT', '/v1/images/%s/checksum' % image.id, + headers=checksum_headers) + + # PUT /v1/repositories/{namespace}/{repository}/tags/latest + for tag_name in tag_names: + self.conduct(session, 'PUT', + '/v1/repositories/%s/%s/tags/%s' % (namespace, repo_name, tag_name), + data='"%s"' % images[-1].id, + headers=headers) + + # PUT /v1/repositories/{namespace}/{repository}/images + self.conduct(session, 'PUT', '/v1/repositories/%s/%s/images' % (namespace, repo_name), + expected_status=204, headers=headers) + + return PushResult(checksums=None, manifests=None) diff --git a/test/registry/protocol_v2.py b/test/registry/protocol_v2.py new file mode 100644 index 000000000..ab626bcca --- /dev/null +++ b/test/registry/protocol_v2.py @@ -0,0 +1,246 @@ +import hashlib +import json + +from enum import Enum, unique + +from image.docker.schema1 import DockerSchema1ManifestBuilder, DockerSchema1Manifest +from test.registry.protocols import (RegistryProtocol, Failures, ProtocolOptions, PushResult, + PullResult) + + +@unique +class V2ProtocolSteps(Enum): + """ Defines the various steps of the protocol, for matching failures. """ + AUTH = 'auth' + BLOB_HEAD_CHECK = 'blob-head-check' + GET_MANIFEST = 'get-manifest' + + +class V2Protocol(RegistryProtocol): + FAILURE_CODES = { + V2ProtocolSteps.AUTH: { + Failures.UNAUTHENTICATED: 401, + Failures.UNAUTHORIZED: 403, + Failures.INVALID_REGISTRY: 400, + Failures.APP_REPOSITORY: 405, + }, + V2ProtocolSteps.GET_MANIFEST: { + Failures.UNKNOWN_TAG: 404, + }, + } + + def __init__(self, jwk): + self.jwk = jwk + + def ping(self, session): + result = session.get('/v2/') + assert result.status_code == 401 + assert result.headers['Docker-Distribution-API-Version'] == 'registry/2.0' + + def auth(self, session, credentials, namespace, repository, scopes=None, + expected_failure=None): + """ + Performs the V2 Auth flow, returning the token (if any) and the response. + + Spec: https://docs.docker.com/registry/spec/auth/token/ + """ + + scopes = scopes or [] + auth = None + if credentials is not None: + username, _ = credentials + auth = credentials + + params = { + 'account': username, + 'service': 'localhost:5000', + } + + if scopes: + params['scope'] = 'repository:%s/%s:%s' % (namespace, repository, ','.join(scopes)) + + response = self.conduct(session, 'GET', '/v2/auth', params=params, auth=auth, + expected_status=(200, expected_failure, V2ProtocolSteps.AUTH)) + + if expected_failure is None: + assert response.json().get('token') is not None + return response.json().get('token'), response + + return None, response + + def push(self, session, namespace, repo_name, tag_names, images, credentials=None, + expected_failure=None, options=None): + options = options or ProtocolOptions() + scopes = options.scopes or ['push', 'pull'] + tag_names = [tag_names] if isinstance(tag_names, str) else tag_names + + # Ping! + self.ping(session) + + # Perform auth and retrieve a token. + token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes, + expected_failure=expected_failure) + if token is None: + return + + headers = { + 'Authorization': 'Bearer ' + token, + } + + # Build fake manifests. + manifests = {} + for tag_name in tag_names: + builder = DockerSchema1ManifestBuilder(namespace, repo_name, tag_name) + + for image in reversed(images): + checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest() + + # If invalid blob references were requested, just make it up. + if options.manifest_invalid_blob_references: + checksum = 'sha256:' + hashlib.sha256('notarealthing').hexdigest() + + builder.add_layer(checksum, json.dumps({'id': image.id, 'parent': image.parent_id})) + + # Build the manifest. + manifests[tag_name] = builder.build(self.jwk) + + # Push the layer data. + checksums = {} + for image in reversed(images): + checksum = 'sha256:' + hashlib.sha256(image.bytes).hexdigest() + checksums[image.id] = checksum + + # Layer data should not yet exist. + self.conduct(session, 'HEAD', '/v2/%s/%s/blobs/%s' % (namespace, repo_name, checksum), + expected_status=(404, expected_failure, V2ProtocolSteps.BLOB_HEAD_CHECK), + headers=headers) + + # Start a new upload of the layer data. + response = self.conduct(session, 'POST', '/v2/%s/%s/blobs/uploads/' % (namespace, repo_name), + expected_status=202, + headers=headers) + + upload_uuid = response.headers['Docker-Upload-UUID'] + new_upload_location = response.headers['Location'] + assert new_upload_location.startswith('http://localhost:5000') + + # We need to make this relative just for the tests because the live server test + # case modifies the port. + location = response.headers['Location'][len('http://localhost:5000'):] + + # PATCH the image data into the layer. + if options.chunks_for_upload is None: + self.conduct(session, 'PATCH', location, data=image.bytes, expected_status=204, + headers=headers) + else: + # If chunked upload is requested, upload the data as a series of chunks, checking + # status at every point. + for chunk_data in options.chunks_for_upload: + if len(chunk_data) == 3: + (start_byte, end_byte, expected_code) = chunk_data + else: + (start_byte, end_byte) = chunk_data + expected_code = 204 + + patch_headers = {'Range': 'bytes=%s-%s' % (start_byte, end_byte)} + patch_headers.update(headers) + + contents_chunk = image.bytes[start_byte:end_byte] + self.conduct(session, 'PATCH', location, data=contents_chunk, + expected_status=expected_code, + headers=patch_headers) + if expected_code != 204: + return + + # Retrieve the upload status at each point, and ensure it is valid. + status_url = '/v2/%s/%s/blobs/uploads/%s' % (namespace, repo_name, upload_uuid) + response = self.conduct(session, 'GET', status_url, expected_status=204, headers=headers) + assert response.headers['Docker-Upload-UUID'] == upload_uuid + assert response.headers['Range'] == "bytes=0-%s" % end_byte + + if options.cancel_blob_upload: + self.conduct(session, 'DELETE', location, params=dict(digest=checksum), expected_status=204, + headers=headers) + + # Ensure the upload was canceled. + status_url = '/v2/%s/%s/blobs/uploads/%s' % (namespace, repo_name, upload_uuid) + self.conduct(session, 'GET', status_url, expected_status=404, headers=headers) + return + + # Finish the layer upload with a PUT. + response = self.conduct(session, 'PUT', location, params=dict(digest=checksum), + expected_status=201, headers=headers) + assert response.headers['Docker-Content-Digest'] == checksum + + # Ensure the layer exists now. + response = self.conduct(session, 'HEAD', + '/v2/%s/%s/blobs/%s' % (namespace, repo_name, checksum), + expected_status=200, headers=headers) + + assert response.headers['Docker-Content-Digest'] == checksum + assert response.headers['Content-Length'] == str(len(image.bytes)) + + # And retrieve the layer data. + result = self.conduct(session, 'GET', '/v2/%s/%s/blobs/%s' % (namespace, repo_name, checksum), + headers=headers, expected_status=200) + assert result.content == image.bytes + + # Write a manifest for each tag. + for tag_name in tag_names: + manifest = manifests[tag_name] + + # Write the manifest. If we expect it to be invalid, we expect a 404 code. Otherwise, we + # expect a 202 response for success. + put_code = 404 if options.manifest_invalid_blob_references else 202 + manifest_headers = {'Content-Type': 'application/json'} + manifest_headers.update(headers) + + self.conduct(session, 'PUT', '/v2/%s/%s/manifests/%s' % (namespace, repo_name, tag_name), + data=manifest.bytes, expected_status=put_code, + headers=manifest_headers) + + return PushResult(checksums=checksums, manifests=manifests) + + + def pull(self, session, namespace, repo_name, tag_names, images, credentials=None, + expected_failure=None, options=None): + options = options or ProtocolOptions() + scopes = options.scopes or ['pull'] + tag_names = [tag_names] if isinstance(tag_names, str) else tag_names + + # Ping! + self.ping(session) + + # Perform auth and retrieve a token. + token, _ = self.auth(session, credentials, namespace, repo_name, scopes=scopes, + expected_failure=expected_failure) + if token is None: + return None + + headers = { + 'Authorization': 'Bearer ' + token, + } + + manifests = {} + for tag_name in tag_names: + # Retrieve the manifest for the tag or digest. + response = self.conduct(session, 'GET', + '/v2/%s/%s/manifests/%s' % (namespace, repo_name, tag_name), + expected_status=(200, expected_failure, V2ProtocolSteps.GET_MANIFEST), + headers=headers) + if expected_failure is not None: + return None + + # Ensure the manifest returned by us is valid. + manifest = DockerSchema1Manifest(response.text) + manifests[tag_name] = manifest + + # Verify the layers. + for index, layer in enumerate(manifest.layers): + result = self.conduct(session, 'GET', + '/v2/%s/%s/blobs/%s' % (namespace, repo_name, layer.digest), + expected_status=200, + headers=headers) + assert result.content == images[index].bytes + + return PullResult(manifests=manifests) diff --git a/test/registry/protocols.py b/test/registry/protocols.py new file mode 100644 index 000000000..a7b720afa --- /dev/null +++ b/test/registry/protocols.py @@ -0,0 +1,89 @@ +import json +import tarfile + +from abc import ABCMeta, abstractmethod +from collections import namedtuple +from cStringIO import StringIO +from enum import Enum, unique +from six import add_metaclass + +Image = namedtuple('Image', ['id', 'parent_id', 'size', 'bytes']) +PushResult = namedtuple('PushResult', ['checksums', 'manifests']) +PullResult = namedtuple('PullResult', ['manifests']) + + +def layer_bytes_for_contents(contents): + layer_data = StringIO() + + def add_file(name, contents): + tar_file_info = tarfile.TarInfo(name=name) + tar_file_info.type = tarfile.REGTYPE + tar_file_info.size = len(contents) + tar_file_info.mtime = 1 + + tar_file = tarfile.open(fileobj=layer_data, mode='w|gz') + tar_file.addfile(tar_file_info, StringIO(contents)) + tar_file.close() + + add_file('contents', contents) + + layer_bytes = layer_data.getvalue() + layer_data.close() + return layer_bytes + + +@unique +class Failures(Enum): + """ Defines the various forms of expected failure. """ + UNAUTHENTICATED = 'unauthenticated' + UNAUTHORIZED = 'unauthorized' + INVALID_REGISTRY = 'invalid-registry' + APP_REPOSITORY = 'app-repository' + UNKNOWN_TAG = 'unknown-tag' + + +class ProtocolOptions(object): + def __init__(self): + self.munge_shas = False + self.scopes = None + self.cancel_blob_upload = False + self.manifest_invalid_blob_references = False + self.chunks_for_upload = None + + +@add_metaclass(ABCMeta) +class RegistryProtocol(object): + """ Interface for protocols. """ + FAILURE_CODES = {} + + @abstractmethod + def pull(self, session, namespace, repo_name, tag_names, images, credentials=None, + expected_failure=None, options=None): + """ Pulls the given tag via the given session, using the given credentials, and + ensures the given images match. + """ + + @abstractmethod + def push(self, session, namespace, repo_name, tag_names, images, credentials=None, + expected_failure=None, options=None): + """ Pushes the specified images as the given tag via the given session, using + the given credentials. + """ + + def conduct(self, session, method, url, expected_status=200, params=None, data=None, + json_data=None, headers=None, auth=None): + if json_data is not None: + data = json.dumps(json_data) + headers = headers or {} + headers['Content-Type'] = 'application/json' + + if isinstance(expected_status, tuple): + expected_status, expected_failure, protocol_step = expected_status + if expected_failure is not None: + failures = self.__class__.FAILURE_CODES.get(protocol_step, {}) + expected_status = failures.get(expected_failure, expected_status) + + result = session.request(method, url, params=params, data=data, headers=headers, auth=auth) + msg = "Expected response %s, got %s" % (expected_status, result.status_code) + assert result.status_code == expected_status, msg + return result diff --git a/test/registry/registry_tests.py b/test/registry/registry_tests.py new file mode 100644 index 000000000..69d23c6f5 --- /dev/null +++ b/test/registry/registry_tests.py @@ -0,0 +1,35 @@ +from test.fixtures import * +from test.registry.liveserverfixture import * +from test.registry.fixtures import * +from test.registry.protocol_fixtures import * + +from test.registry.protocols import Failures + + +def test_basic_push_pull(pusher, puller, basic_images, liveserver_session, app_reloader): + """ Test: Basic push and pull of an image to a new repository. """ + credentials = ('devtable', 'password') + + # Push a new repository. + pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images, + credentials=credentials) + + # Pull the repository to verify. + puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images, + credentials=credentials) + + +def test_push_invalid_credentials(pusher, basic_images, liveserver_session, app_reloader): + """ Test: Ensure we get auth errors when trying to push with invalid credentials. """ + invalid_credentials = ('devtable', 'notcorrectpassword') + + pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images, + credentials=invalid_credentials, expected_failure=Failures.UNAUTHENTICATED) + + +def test_pull_invalid_credentials(puller, basic_images, liveserver_session, app_reloader): + """ Test: Ensure we get auth errors when trying to pull with invalid credentials. """ + invalid_credentials = ('devtable', 'notcorrectpassword') + + puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', basic_images, + credentials=invalid_credentials, expected_failure=Failures.UNAUTHENTICATED)