-{% endif %}
- {% block content %}{% endblock %}
-{% if with_base_template %}
+ {% block content %}{% endblock %}
+
@@ -73,4 +71,3 @@
{% endif %}
-{% endif %}
diff --git a/emails/confirmemail.html b/emails/confirmemail.html
index 11ea31d00..55b1db157 100644
--- a/emails/confirmemail.html
+++ b/emails/confirmemail.html
@@ -7,13 +7,13 @@
- This email address was used to register user {{ username }}. |
+ This email address was user to register user {{ username }}. |
- Once you confirm this email, you’ll be able to access your {{ app_title }} account. |
+ Once you confirm this email you’ll be able to access your {{ app_title }} account. |
diff --git a/emails/logsexported.html b/emails/logsexported.html
deleted file mode 100644
index 945ddedcc..000000000
--- a/emails/logsexported.html
+++ /dev/null
@@ -1,44 +0,0 @@
-{% extends "base.html" %}
-
-{% block content %}
-
-Usage Logs Export has completed
-Export ID: {{ export_id }}
-
-
-{% if status == 'success' %}
-
-
- The exported logs information can be found at {{ exported_data_url }} and will remain accessible for {{ exported_data_expiration }} seconds before being deleted. |
-
-
-{% elif status == 'failed' %}
-
-
- The attempt to export the logs in the specified range has failed. This operation will be retried up to 3 times. Please contact support if this problem persists. |
-
-
-{% elif status == 'timedout' %}
-
-
- The attempt to export the logs in the specified range has timed out. Please contact support if this problem persists. |
-
-
-{% elif status == 'invalidrequest' %}
-
-
- The attempt to export the logs failed due to an invalid request. Please contact support if this problem persists. |
-
-
-{% endif %}
-
-
-
- If you did not initiate this operation, please delete this e-mail. |
-
-
-
-Best Wishes,
-The {{ app_title }} Team
-
-{% endblock %}
diff --git a/endpoints/api/__init__.py b/endpoints/api/__init__.py
index 8dcabe6a3..6a9369d8d 100644
--- a/endpoints/api/__init__.py
+++ b/endpoints/api/__init__.py
@@ -1,63 +1,56 @@
import logging
import datetime
+import json
from calendar import timegm
from email.utils import formatdate
from functools import partial, wraps
-from flask import Blueprint, request, session
+from enum import Enum
+from flask import Blueprint, Response, request, make_response, jsonify, session, url_for
from flask_restful import Resource, abort, Api, reqparse
from flask_restful.utils.cors import crossdomain
from jsonschema import validate, ValidationError
-from app import app, metric_queue, authentication
+from app import app, metric_queue
+from data import model
from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission,
AdministerRepositoryPermission, UserReadPermission,
UserAdminPermission)
from auth import scopes
-from auth.auth_context import (get_authenticated_context, get_authenticated_user,
- get_validated_oauth_token)
-from auth.decorators import process_oauth
-from data import model as data_model
-from data.logs_model import logs_model
-from data.database import RepositoryState
+from auth.auth_context import get_authenticated_user, get_validated_oauth_token
+from auth.process import process_oauth
from endpoints.csrf import csrf_protect
-from endpoints.exception import (Unauthorized, InvalidRequest, InvalidResponse,
- FreshLoginRequired, NotFound)
-from endpoints.decorators import check_anon_protection, require_xhr_from_browser, check_readonly
+from endpoints.exception import (ApiException, Unauthorized, InvalidRequest, InvalidResponse,
+ FreshLoginRequired)
+from endpoints.decorators import check_anon_protection
from util.metrics.metricqueue import time_decorator
from util.names import parse_namespace_repository
from util.pagination import encrypt_page_token, decrypt_page_token
-from util.request import get_request_ip
-from __init__models_pre_oci import pre_oci_model as model
-
logger = logging.getLogger(__name__)
api_bp = Blueprint('api', __name__)
-
-
-CROSS_DOMAIN_HEADERS = ['Authorization', 'Content-Type', 'X-Requested-With']
-
-class ApiExceptionHandlingApi(Api):
- @crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS)
- def handle_error(self, error):
- return super(ApiExceptionHandlingApi, self).handle_error(error)
-
-
-api = ApiExceptionHandlingApi()
+api = Api()
api.init_app(api_bp)
api.decorators = [csrf_protect(),
- crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS),
- process_oauth, time_decorator(api_bp.name, metric_queue),
- require_xhr_from_browser]
+ crossdomain(origin='*', headers=['Authorization', 'Content-Type']),
+ process_oauth, time_decorator(api_bp.name, metric_queue)]
+@api_bp.app_errorhandler(ApiException)
+@crossdomain(origin='*', headers=['Authorization', 'Content-Type'])
+def handle_api_error(error):
+ response = Response(json.dumps(error.to_dict()), error.status_code, mimetype='application/json')
+ if error.status_code == 401:
+ response.headers['WWW-Authenticate'] = ('Bearer error="%s" error_description="%s"' %
+ (error.error_type.value, error.error_description))
+ return response
+
def resource(*urls, **kwargs):
def wrapper(api_resource):
if not api_resource:
return None
- api_resource.registered = True
api.add_resource(api_resource, *urls, **kwargs)
return api_resource
return wrapper
@@ -65,11 +58,6 @@ def resource(*urls, **kwargs):
def show_if(value):
def f(inner):
- if hasattr(inner, 'registered') and inner.registered:
- msg = ('API endpoint %s is already registered; please switch the ' +
- '@show_if to be *below* the @resource decorator')
- raise Exception(msg % inner)
-
if not value:
return None
@@ -79,11 +67,6 @@ def show_if(value):
def hide_if(value):
def f(inner):
- if hasattr(inner, 'registered') and inner.registered:
- msg = ('API endpoint %s is already registered; please switch the ' +
- '@hide_if to be *below* the @resource decorator')
- raise Exception(msg % inner)
-
if value:
return None
@@ -207,39 +190,14 @@ def parse_repository_name(func):
class ApiResource(Resource):
- registered = False
- method_decorators = [check_anon_protection, check_readonly]
+ method_decorators = [check_anon_protection]
def options(self):
return None, 200
class RepositoryParamResource(ApiResource):
- method_decorators = [check_anon_protection, parse_repository_name, check_readonly]
-
-
-def disallow_for_app_repositories(func):
- @wraps(func)
- def wrapped(self, namespace_name, repository_name, *args, **kwargs):
- # Lookup the repository with the given namespace and name and ensure it is not an application
- # repository.
- if model.is_app_repository(namespace_name, repository_name):
- abort(501)
-
- return func(self, namespace_name, repository_name, *args, **kwargs)
-
- return wrapped
-
-
-def disallow_for_non_normal_repositories(func):
- @wraps(func)
- def wrapped(self, namespace_name, repository_name, *args, **kwargs):
- repo = data_model.repository.get_repository(namespace_name, repository_name)
- if repo and repo.state != RepositoryState.NORMAL:
- abort(503, message='Repository is in read only or mirror mode: %s' % repo.state)
-
- return func(self, namespace_name, repository_name, *args, **kwargs)
- return wrapped
+ method_decorators = [check_anon_protection, parse_repository_name]
def require_repo_permission(permission_class, scope, allow_public=False):
@@ -252,7 +210,7 @@ def require_repo_permission(permission_class, scope, allow_public=False):
permission = permission_class(namespace, repository)
if (permission.can() or
(allow_public and
- model.repository_is_public(namespace, repository))):
+ model.repository.repository_is_public(namespace, repository))):
return func(self, namespace, repository, *args, **kwargs)
raise Unauthorized()
return wrapped
@@ -310,7 +268,8 @@ def require_fresh_login(func):
if not user:
raise Unauthorized()
- if get_validated_oauth_token():
+ oauth_token = get_validated_oauth_token()
+ if oauth_token:
return func(*args, **kwargs)
logger.debug('Checking fresh login for user %s', user.username)
@@ -318,8 +277,7 @@ def require_fresh_login(func):
last_login = session.get('login_time', datetime.datetime.min)
valid_span = datetime.datetime.now() - datetime.timedelta(minutes=10)
- if (not user.password_hash or last_login >= valid_span or
- not authentication.supports_fresh_login):
+ if not user.password_hash or last_login >= valid_span:
return func(*args, **kwargs)
raise FreshLoginRequired()
@@ -336,19 +294,7 @@ def require_scope(scope_object):
return wrapper
-def max_json_size(max_size):
- def wrapper(func):
- @wraps(func)
- def wrapped(self, *args, **kwargs):
- if request.is_json and len(request.get_data()) > max_size:
- raise InvalidRequest()
-
- return func(self, *args, **kwargs)
- return wrapped
- return wrapper
-
-
-def validate_json_request(schema_name, optional=False):
+def validate_json_request(schema_name):
def wrapper(func):
@add_method_metadata('request_schema', schema_name)
@wraps(func)
@@ -357,13 +303,12 @@ def validate_json_request(schema_name, optional=False):
try:
json_data = request.get_json()
if json_data is None:
- if not optional:
- raise InvalidRequest('Missing JSON body')
- else:
- validate(json_data, schema)
+ raise InvalidRequest('Missing JSON body')
+
+ validate(json_data, schema)
return func(self, *args, **kwargs)
except ValidationError as ex:
- raise InvalidRequest(str(ex))
+ raise InvalidRequest(ex.message)
return wrapped
return wrapper
@@ -372,13 +317,12 @@ def request_error(exception=None, **kwargs):
data = kwargs.copy()
message = 'Request error.'
if exception:
- message = str(exception)
-
+ message = exception.message
message = data.pop('message', message)
raise InvalidRequest(message, data)
-def log_action(kind, user_or_orgname, metadata=None, repo=None, repo_name=None):
+def log_action(kind, user_or_orgname, metadata=None, repo=None):
if not metadata:
metadata = {}
@@ -389,15 +333,8 @@ def log_action(kind, user_or_orgname, metadata=None, repo=None, repo_name=None):
metadata['oauth_token_application'] = oauth_token.application.name
performer = get_authenticated_user()
-
- if repo_name is not None:
- repo = data_model.repository.get_repository(user_or_orgname, repo_name)
-
- logs_model.log_action(kind, user_or_orgname,
- repository=repo,
- performer=performer,
- ip=get_request_ip(),
- metadata=metadata)
+ model.log.log_action(kind, user_or_orgname, performer=performer, ip=request.remote_addr,
+ metadata=metadata, repository=repo)
def define_json_response(schema_name):
@@ -412,14 +349,13 @@ def define_json_response(schema_name):
try:
validate(resp, schema)
except ValidationError as ex:
- raise InvalidResponse(str(ex))
+ raise InvalidResponse(ex.message)
return resp
return wrapped
return wrapper
-import endpoints.api.appspecifictokens
import endpoints.api.billing
import endpoints.api.build
import endpoints.api.discovery
@@ -444,5 +380,4 @@ import endpoints.api.team
import endpoints.api.trigger
import endpoints.api.user
import endpoints.api.secscan
-import endpoints.api.signing
-import endpoints.api.mirror
+
diff --git a/endpoints/api/__init__models_interface.py b/endpoints/api/__init__models_interface.py
deleted file mode 100644
index 974d9e0e1..000000000
--- a/endpoints/api/__init__models_interface.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from abc import ABCMeta, abstractmethod
-
-from six import add_metaclass
-
-
-@add_metaclass(ABCMeta)
-class InitDataInterface(object):
- """
- Interface that represents all data store interactions required by __init__.
- """
-
- @abstractmethod
- def is_app_repository(self, namespace_name, repository_name):
- """
-
- Args:
- namespace_name: namespace or user
- repository_name: repository
-
- Returns:
- Boolean
- """
- pass
-
- @abstractmethod
- def repository_is_public(self, namespace_name, repository_name):
- """
-
- Args:
- namespace_name: namespace or user
- repository_name: repository
-
- Returns:
- Boolean
- """
- pass
-
- @abstractmethod
- def log_action(self, kind, namespace_name, repository_name, performer, ip, metadata):
- """
-
- Args:
- kind: type of log
- user_or_orgname: name of user or organization
- performer: user doing the action
- ip: originating ip
- metadata: metadata
- repository: repository the action is related to
-
- Returns:
- None
- """
- pass
-
diff --git a/endpoints/api/__init__models_pre_oci.py b/endpoints/api/__init__models_pre_oci.py
deleted file mode 100644
index f14e7267c..000000000
--- a/endpoints/api/__init__models_pre_oci.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from __init__models_interface import InitDataInterface
-
-from data import model
-from data.logs_model import logs_model
-
-class PreOCIModel(InitDataInterface):
- def is_app_repository(self, namespace_name, repository_name):
- return model.repository.get_repository(namespace_name, repository_name,
- kind_filter='application') is not None
-
- def repository_is_public(self, namespace_name, repository_name):
- return model.repository.repository_is_public(namespace_name, repository_name)
-
- def log_action(self, kind, namespace_name, repository_name, performer, ip, metadata):
- repository = model.repository.get_repository(namespace_name, repository_name)
- logs_model.log_action(kind, namespace_name, performer=performer, ip=ip, metadata=metadata,
- repository=repository)
-
-pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/appspecifictokens.py b/endpoints/api/appspecifictokens.py
deleted file mode 100644
index 1e886c385..000000000
--- a/endpoints/api/appspecifictokens.py
+++ /dev/null
@@ -1,133 +0,0 @@
-""" Manages app specific tokens for the current user. """
-
-import logging
-import math
-
-from datetime import timedelta
-from flask import request
-
-import features
-
-from app import app
-from auth.auth_context import get_authenticated_user
-from data import model
-from endpoints.api import (ApiResource, nickname, resource, validate_json_request,
- log_action, require_user_admin, require_fresh_login,
- path_param, NotFound, format_date, show_if, query_param, parse_args,
- truthy_bool)
-from util.timedeltastring import convert_to_timedelta
-
-logger = logging.getLogger(__name__)
-
-
-def token_view(token, include_code=False):
- data = {
- 'uuid': token.uuid,
- 'title': token.title,
- 'last_accessed': format_date(token.last_accessed),
- 'created': format_date(token.created),
- 'expiration': format_date(token.expiration),
- }
-
- if include_code:
- data.update({
- 'token_code': model.appspecifictoken.get_full_token_string(token),
- })
-
- return data
-
-
-# The default window to use when looking up tokens that will be expiring.
-_DEFAULT_TOKEN_EXPIRATION_WINDOW = '4w'
-
-
-@resource('/v1/user/apptoken')
-@show_if(features.APP_SPECIFIC_TOKENS)
-class AppTokens(ApiResource):
- """ Lists all app specific tokens for a user """
- schemas = {
- 'NewToken': {
- 'type': 'object',
- 'required': [
- 'title',
- ],
- 'properties': {
- 'title': {
- 'type': 'string',
- 'description': 'The user-defined title for the token',
- },
- }
- },
- }
-
- @require_user_admin
- @nickname('listAppTokens')
- @parse_args()
- @query_param('expiring', 'If true, only returns those tokens expiring soon', type=truthy_bool)
- def get(self, parsed_args):
- """ Lists the app specific tokens for the user. """
- expiring = parsed_args['expiring']
- if expiring:
- expiration = app.config.get('APP_SPECIFIC_TOKEN_EXPIRATION')
- token_expiration = convert_to_timedelta(expiration or _DEFAULT_TOKEN_EXPIRATION_WINDOW)
- seconds = math.ceil(token_expiration.total_seconds() * 0.1) or 1
- soon = timedelta(seconds=seconds)
- tokens = model.appspecifictoken.get_expiring_tokens(get_authenticated_user(), soon)
- else:
- tokens = model.appspecifictoken.list_tokens(get_authenticated_user())
-
- return {
- 'tokens': [token_view(token, include_code=False) for token in tokens],
- 'only_expiring': expiring,
- }
-
- @require_user_admin
- @require_fresh_login
- @nickname('createAppToken')
- @validate_json_request('NewToken')
- def post(self):
- """ Create a new app specific token for user. """
- title = request.get_json()['title']
- token = model.appspecifictoken.create_token(get_authenticated_user(), title)
-
- log_action('create_app_specific_token', get_authenticated_user().username,
- {'app_specific_token_title': token.title,
- 'app_specific_token': token.uuid})
-
- return {
- 'token': token_view(token, include_code=True),
- }
-
-
-@resource('/v1/user/apptoken/')
-@show_if(features.APP_SPECIFIC_TOKENS)
-@path_param('token_uuid', 'The uuid of the app specific token')
-class AppToken(ApiResource):
- """ Provides operations on an app specific token """
- @require_user_admin
- @require_fresh_login
- @nickname('getAppToken')
- def get(self, token_uuid):
- """ Returns a specific app token for the user. """
- token = model.appspecifictoken.get_token_by_uuid(token_uuid, owner=get_authenticated_user())
- if token is None:
- raise NotFound()
-
- return {
- 'token': token_view(token, include_code=True),
- }
-
- @require_user_admin
- @require_fresh_login
- @nickname('revokeAppToken')
- def delete(self, token_uuid):
- """ Revokes a specific app token for the user. """
- token = model.appspecifictoken.revoke_token_by_uuid(token_uuid, owner=get_authenticated_user())
- if token is None:
- raise NotFound()
-
- log_action('revoke_app_specific_token', get_authenticated_user().username,
- {'app_specific_token_title': token.title,
- 'app_specific_token': token.uuid})
-
- return '', 204
diff --git a/endpoints/api/billing.py b/endpoints/api/billing.py
index db7158d12..5e12b8f6b 100644
--- a/endpoints/api/billing.py
+++ b/endpoints/api/billing.py
@@ -32,7 +32,7 @@ def get_namespace_plan(namespace):
# TODO: Can we cache this or make it faster somehow?
try:
cus = billing.Customer.retrieve(namespace_user.stripe_id)
- except stripe.error.APIConnectionError:
+ except stripe.APIConnectionError:
abort(503, message='Cannot contact Stripe')
if not cus.subscription:
@@ -55,7 +55,7 @@ def lookup_allowed_private_repos(namespace):
def carderror_response(e):
- return {'carderror': str(e)}, 402
+ return {'carderror': e.message}, 402
def get_card(user):
@@ -66,7 +66,7 @@ def get_card(user):
if user.stripe_id:
try:
cus = billing.Customer.retrieve(user.stripe_id)
- except stripe.error.APIConnectionError as e:
+ except stripe.APIConnectionError as e:
abort(503, message='Cannot contact Stripe')
if cus and cus.default_card:
@@ -93,18 +93,18 @@ def set_card(user, token):
if user.stripe_id:
try:
cus = billing.Customer.retrieve(user.stripe_id)
- except stripe.error.APIConnectionError as e:
+ except stripe.APIConnectionError as e:
abort(503, message='Cannot contact Stripe')
if cus:
try:
cus.card = token
cus.save()
- except stripe.error.CardError as exc:
+ except stripe.CardError as exc:
return carderror_response(exc)
- except stripe.error.InvalidRequestError as exc:
+ except stripe.InvalidRequestError as exc:
return carderror_response(exc)
- except stripe.error.APIConnectionError as e:
+ except stripe.APIConnectionError as e:
return carderror_response(e)
return get_card(user)
@@ -127,8 +127,8 @@ def get_invoices(customer_id):
}
try:
- invoices = billing.Invoice.list(customer=customer_id, count=12)
- except stripe.error.APIConnectionError as e:
+ invoices = billing.Invoice.all(customer=customer_id, count=12)
+ except stripe.APIConnectionError as e:
abort(503, message='Cannot contact Stripe')
return {
@@ -139,7 +139,7 @@ def get_invoices(customer_id):
def get_invoice_fields(user):
try:
cus = billing.Customer.retrieve(user.stripe_id)
- except stripe.error.APIConnectionError:
+ except stripe.APIConnectionError:
abort(503, message='Cannot contact Stripe')
if not 'metadata' in cus:
@@ -329,7 +329,7 @@ class UserPlan(ApiResource):
if user.stripe_id:
try:
cus = billing.Customer.retrieve(user.stripe_id)
- except stripe.error.APIConnectionError as e:
+ except stripe.APIConnectionError as e:
abort(503, message='Cannot contact Stripe')
if cus.subscription:
@@ -398,7 +398,7 @@ class OrganizationPlan(ApiResource):
if organization.stripe_id:
try:
cus = billing.Customer.retrieve(organization.stripe_id)
- except stripe.error.APIConnectionError as e:
+ except stripe.APIConnectionError as e:
abort(503, message='Cannot contact Stripe')
if cus.subscription:
diff --git a/endpoints/api/build.py b/endpoints/api/build.py
index d7fb55ae1..ae97571eb 100644
--- a/endpoints/api/build.py
+++ b/endpoints/api/build.py
@@ -1,33 +1,31 @@
""" Create, list, cancel and get status/logs of repository builds. """
-import datetime
-import hashlib
-import json
-import logging
-import os
-from flask import request
from urlparse import urlparse
-import features
+import logging
+import json
+import datetime
+import hashlib
+
+from flask import request
from app import userfiles as user_files, build_logs, log_archive, dockerfile_build_queue
-from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission,
- AdministerRepositoryPermission, AdministerOrganizationPermission,
- SuperUserPermission)
from buildtrigger.basehandler import BuildTriggerHandler
-from data import database
-from data import model
-from data.buildlogs import BuildStatusRetrievalError
from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource,
require_repo_read, require_repo_write, validate_json_request,
ApiResource, internal_only, format_date, api, path_param,
- require_repo_admin, abort, disallow_for_app_repositories,
- disallow_for_non_normal_repositories)
-from endpoints.building import (start_build, PreparedBuild, MaximumBuildsQueuedException,
- BuildTriggerDisabledException)
+ require_repo_admin, abort)
from endpoints.exception import Unauthorized, NotFound, InvalidRequest
+from endpoints.building import start_build, PreparedBuild, MaximumBuildsQueuedException
+from data import database
+from data import model
+from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission,
+ AdministerRepositoryPermission, AdministerOrganizationPermission,
+ SuperUserPermission)
+
+from data.buildlogs import BuildStatusRetrievalError
from util.names import parse_robot_username
-from util.request import get_request_ip
+
logger = logging.getLogger(__name__)
@@ -53,7 +51,6 @@ def user_view(user):
'is_robot': user.robot,
}
-
def trigger_view(trigger, can_read=False, can_admin=False, for_build=False):
if trigger and trigger.uuid:
build_trigger = BuildTriggerHandler.get_handler(trigger)
@@ -72,8 +69,6 @@ def trigger_view(trigger, can_read=False, can_admin=False, for_build=False):
'config': build_trigger.config if can_admin else {},
'can_invoke': can_admin,
- 'enabled': trigger.enabled,
- 'disabled_reason': trigger.disabled_reason.name if trigger.disabled_reason else None,
}
if not for_build and can_admin and trigger.pull_robot:
@@ -138,8 +133,6 @@ def build_status_view(build_obj):
'display_name': build_obj.display_name,
'status': status or {},
'subdirectory': job_config.get('build_subdir', ''),
- 'dockerfile_path': job_config.get('build_subdir', ''),
- 'context': job_config.get('context', ''),
'tags': job_config.get('docker_tags', []),
'manual_user': job_config.get('manual_user', None),
'is_writer': can_write,
@@ -154,10 +147,9 @@ def build_status_view(build_obj):
'error': error,
}
- if can_write or features.READER_BUILD_LOGS:
+ if can_write:
if build_obj.resource_key is not None:
- resp['archive_url'] = user_files.get_file_url(build_obj.resource_key,
- get_request_ip(), requires_cors=True)
+ resp['archive_url'] = user_files.get_file_url(build_obj.resource_key, requires_cors=True)
elif job_config.get('archive_url', None):
resp['archive_url'] = job_config['archive_url']
@@ -183,15 +175,7 @@ class RepositoryBuildList(RepositoryParamResource):
},
'subdirectory': {
'type': 'string',
- 'description': 'Subdirectory in which the Dockerfile can be found. You can only specify this or dockerfile_path',
- },
- 'dockerfile_path': {
- 'type': 'string',
- 'description': 'Path to a dockerfile. You can only specify this or subdirectory.',
- },
- 'context': {
- 'type': 'string',
- 'description': 'Pass in the context for the dockerfile. This is optional.',
+ 'description': 'Subdirectory in which the Dockerfile can be found',
},
'pull_robot': {
'type': 'string',
@@ -216,7 +200,6 @@ class RepositoryBuildList(RepositoryParamResource):
@query_param('limit', 'The maximum number of builds to return', type=int, default=5)
@query_param('since', 'Returns all builds since the given unix timecode', type=int, default=None)
@nickname('getRepoBuilds')
- @disallow_for_app_repositories
def get(self, namespace, repository, parsed_args):
""" Get the list of repository builds. """
limit = parsed_args.get('limit', 5)
@@ -232,8 +215,6 @@ class RepositoryBuildList(RepositoryParamResource):
@require_repo_write
@nickname('requestRepoBuild')
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
@validate_json_request('RepositoryBuildRequest')
def post(self, namespace, repository):
""" Request that a repository be built and pushed from the specified input. """
@@ -260,7 +241,7 @@ class RepositoryBuildList(RepositoryParamResource):
if scheme != 'http' and scheme != 'https':
raise InvalidRequest('Invalid Archive URL: Must be http or https')
- context, subdir = self.get_dockerfile_context(request_json)
+ subdir = request_json['subdirectory'] if 'subdirectory' in request_json else ''
tags = request_json.get('docker_tags', ['latest'])
pull_robot_name = request_json.get('pull_robot', None)
@@ -295,12 +276,9 @@ class RepositoryBuildList(RepositoryParamResource):
if repo is None:
raise NotFound()
- try:
- build_name = (user_files.get_file_checksum(dockerfile_id)
- if dockerfile_id
- else hashlib.sha224(archive_url).hexdigest()[0:7])
- except IOError:
- raise InvalidRequest('File %s could not be found or is invalid' % dockerfile_id)
+ build_name = (user_files.get_file_checksum(dockerfile_id)
+ if dockerfile_id
+ else hashlib.sha224(archive_url).hexdigest()[0:7])
prepared = PreparedBuild()
prepared.build_name = build_name
@@ -308,15 +286,13 @@ class RepositoryBuildList(RepositoryParamResource):
prepared.archive_url = archive_url
prepared.tags = tags
prepared.subdirectory = subdir
- prepared.context = context
prepared.is_manual = True
prepared.metadata = {}
+
try:
build_request = start_build(repo, prepared, pull_robot_name=pull_robot_name)
except MaximumBuildsQueuedException:
abort(429, message='Maximum queued build rate exceeded.')
- except BuildTriggerDisabledException:
- abort(400, message='Build trigger is disabled')
resp = build_status_view(build_request)
repo_string = '%s/%s' % (namespace, repository)
@@ -326,29 +302,8 @@ class RepositoryBuildList(RepositoryParamResource):
}
return resp, 201, headers
- @staticmethod
- def get_dockerfile_context(request_json):
- context = request_json['context'] if 'context' in request_json else os.path.sep
- if 'dockerfile_path' in request_json:
- subdir = request_json['dockerfile_path']
- if 'context' not in request_json:
- context = os.path.dirname(subdir)
- return context, subdir
- if 'subdirectory' in request_json:
- subdir = request_json['subdirectory']
- context = subdir
- if not subdir.endswith(os.path.sep):
- subdir += os.path.sep
- subdir += 'Dockerfile'
- else:
- if context.endswith(os.path.sep):
- subdir = context + 'Dockerfile'
- else:
- subdir = context + os.path.sep + 'Dockerfile'
-
- return context, subdir
@resource('/v1/repository//build/')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
@@ -357,7 +312,6 @@ class RepositoryBuildResource(RepositoryParamResource):
""" Resource for dealing with repository builds. """
@require_repo_read
@nickname('getRepoBuild')
- @disallow_for_app_repositories
def get(self, namespace, repository, build_uuid):
""" Returns information about a build. """
try:
@@ -372,8 +326,6 @@ class RepositoryBuildResource(RepositoryParamResource):
@require_repo_admin
@nickname('cancelRepoBuild')
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
def delete(self, namespace, repository, build_uuid):
""" Cancels a repository build. """
try:
@@ -397,7 +349,6 @@ class RepositoryBuildStatus(RepositoryParamResource):
""" Resource for dealing with repository build status. """
@require_repo_read
@nickname('getRepoBuildStatus')
- @disallow_for_app_repositories
def get(self, namespace, repository, build_uuid):
""" Return the status for the builds specified by the build uuids. """
build = model.build.get_repository_build(build_uuid)
@@ -412,7 +363,7 @@ def get_logs_or_log_url(build):
# If the logs have been archived, just return a URL of the completed archive
if build.logs_archived:
return {
- 'logs_url': log_archive.get_file_url(build.uuid, get_request_ip(), requires_cors=True)
+ 'logs_url': log_archive.get_file_url(build.uuid, requires_cors=True)
}
start = int(request.args.get('start', 0))
@@ -436,15 +387,10 @@ def get_logs_or_log_url(build):
@path_param('build_uuid', 'The UUID of the build')
class RepositoryBuildLogs(RepositoryParamResource):
""" Resource for loading repository build logs. """
- @require_repo_read
+ @require_repo_write
@nickname('getRepoBuildLogs')
- @disallow_for_app_repositories
def get(self, namespace, repository, build_uuid):
""" Return the build logs for the build specified by the build uuid. """
- can_write = ModifyRepositoryPermission(namespace, repository).can()
- if not features.READER_BUILD_LOGS and not can_write:
- raise Unauthorized()
-
build = model.build.get_repository_build(build_uuid)
if (not build or build.repository.name != repository or
build.repository.namespace_user.username != namespace):
diff --git a/endpoints/api/discovery.py b/endpoints/api/discovery.py
index 66e7c74a3..c688feb4e 100644
--- a/endpoints/api/discovery.py
+++ b/endpoints/api/discovery.py
@@ -1,4 +1,3 @@
-# TODO to extract the discovery stuff into a util at the top level and then use it both here and config_app discovery.py
""" API discovery information. """
import re
@@ -13,13 +12,12 @@ from app import app
from auth import scopes
from endpoints.api import (ApiResource, resource, method_metadata, nickname, truthy_bool,
parse_args, query_param)
-from endpoints.decorators import anon_allowed
logger = logging.getLogger(__name__)
-PARAM_REGEX = re.compile(r'<([^:>]+:)*([\w]+)>')
+PARAM_REGEX = re.compile(r'<([\w]+:)?([\w]+)>')
TYPE_CONVERTER = {
@@ -328,7 +326,6 @@ class DiscoveryResource(ApiResource):
@parse_args()
@query_param('internal', 'Whether to include internal APIs.', type=truthy_bool, default=False)
@nickname('discovery')
- @anon_allowed
def get(self, parsed_args):
""" List all of the API endpoints available in the swagger API format."""
return swagger_route_data(parsed_args['internal'])
diff --git a/endpoints/api/error.py b/endpoints/api/error.py
index bfa80efe2..23d95ed32 100644
--- a/endpoints/api/error.py
+++ b/endpoints/api/error.py
@@ -7,7 +7,7 @@ from endpoints.exception import NotFound, ApiErrorType, ERROR_DESCRIPTION
def error_view(error_type):
return {
- 'type': url_for('api.error', error_type=error_type, _external=True),
+ 'type': url_for('error', error_type=error_type, _external=True),
'title': error_type,
'description': ERROR_DESCRIPTION[error_type]
}
diff --git a/endpoints/api/globalmessages.py b/endpoints/api/globalmessages.py
index 43ea58083..b27683a17 100644
--- a/endpoints/api/globalmessages.py
+++ b/endpoints/api/globalmessages.py
@@ -6,10 +6,10 @@ from flask import request
import features
from auth import scopes
from auth.permissions import SuperUserPermission
+from data import model
from endpoints.api import (ApiResource, resource, nickname,
require_fresh_login, verify_not_prod, validate_json_request,
require_scope, show_if,)
-from globalmessages_models_pre_oci import pre_oci_model as model
@resource('/v1/messages')
@@ -58,11 +58,6 @@ class GlobalUserMessages(ApiResource):
'message': {
'type': 'object',
'description': 'A single message',
- 'required': [
- 'content',
- 'media_type',
- 'severity',
- ],
'properties': {
'content': {
'type': 'string',
@@ -88,7 +83,7 @@ class GlobalUserMessages(ApiResource):
def get(self):
""" Return a super users messages """
return {
- 'messages': [m.to_dict() for m in model.get_all_messages()],
+ 'messages': [message_view(m) for m in model.message.get_messages()],
}
@require_fresh_login
@@ -102,10 +97,7 @@ class GlobalUserMessages(ApiResource):
abort(404)
if SuperUserPermission().can():
- message_req = request.get_json()['message']
- message = model.create_message(message_req['severity'], message_req['media_type'], message_req['content'])
- if message is None:
- abort(400)
+ model.message.create([request.get_json()['message']])
return make_response('', 201)
abort(403)
@@ -122,7 +114,16 @@ class GlobalUserMessage(ApiResource):
def delete(self, uuid):
""" Delete a message """
if SuperUserPermission().can():
- model.delete_message(uuid)
+ model.message.delete_message([uuid])
return make_response('', 204)
abort(403)
+
+
+def message_view(message):
+ return {
+ 'uuid': message.uuid,
+ 'content': message.content,
+ 'severity': message.severity,
+ 'media_type': message.media_type.name,
+ }
diff --git a/endpoints/api/globalmessages_models_interface.py b/endpoints/api/globalmessages_models_interface.py
deleted file mode 100644
index 679462c1d..000000000
--- a/endpoints/api/globalmessages_models_interface.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from collections import namedtuple
-
-from six import add_metaclass
-
-class GlobalMessage(
- namedtuple('GlobalMessage', [
- 'uuid',
- 'content',
- 'severity',
- 'media_type_name',
- ])):
-
- def to_dict(self):
- return {
- 'uuid': self.uuid,
- 'content': self.content,
- 'severity': self.severity,
- 'media_type': self.media_type_name,
- }
-
-
-
-@add_metaclass(ABCMeta)
-class GlobalMessageDataInterface(object):
- """
- Data interface for globalmessages API
- """
-
- @abstractmethod
- def get_all_messages(self):
- """
-
- Returns:
- list(GlobalMessage)
- """
-
- @abstractmethod
- def create_message(self, severity, media_type_name, content):
- """
-
- Returns:
- GlobalMessage or None
- """
-
- @abstractmethod
- def delete_message(self, uuid):
- """
-
- Returns:
- void
- """
-
-
\ No newline at end of file
diff --git a/endpoints/api/globalmessages_models_pre_oci.py b/endpoints/api/globalmessages_models_pre_oci.py
deleted file mode 100644
index d9a623f1b..000000000
--- a/endpoints/api/globalmessages_models_pre_oci.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from globalmessages_models_interface import GlobalMessageDataInterface, GlobalMessage
-from data import model
-
-
-class GlobalMessagePreOCI(GlobalMessageDataInterface):
-
- def get_all_messages(self):
- messages = model.message.get_messages()
- return [self._message(m) for m in messages]
-
- def create_message(self, severity, media_type_name, content):
- message = {
- 'severity': severity,
- 'media_type': media_type_name,
- 'content': content
- }
- messages = model.message.create([message])
- return self._message(messages[0])
-
- def delete_message(self, uuid):
- model.message.delete_message([uuid])
-
- def _message(self, message_obj):
- if message_obj is None:
- return None
- return GlobalMessage(
- uuid=message_obj.uuid,
- content=message_obj.content,
- severity=message_obj.severity,
- media_type_name=message_obj.media_type.name,
- )
-
-pre_oci_model = GlobalMessagePreOCI()
\ No newline at end of file
diff --git a/endpoints/api/image.py b/endpoints/api/image.py
index 3a9dcd82c..0d6e59425 100644
--- a/endpoints/api/image.py
+++ b/endpoints/api/image.py
@@ -1,58 +1,85 @@
""" List and lookup repository images. """
+
import json
-from data.registry_model import registry_model
+from collections import defaultdict
from endpoints.api import (resource, nickname, require_repo_read, RepositoryParamResource,
- path_param, disallow_for_app_repositories, format_date)
+ format_date, path_param)
from endpoints.exception import NotFound
+from data import model
-def image_dict(image, with_history=False, with_tags=False):
- parsed_command = None
- if image.command:
- try:
- parsed_command = json.loads(image.command)
- except (ValueError, TypeError):
- parsed_command = {'error': 'Could not parse command'}
+def image_view(image, image_map, include_ancestors=True):
+ command = image.command
+
+ def docker_id(aid):
+ if aid not in image_map:
+ return ''
+
+ return image_map[aid].docker_image_id
image_data = {
'id': image.docker_image_id,
'created': format_date(image.created),
'comment': image.comment,
- 'command': parsed_command,
- 'size': image.image_size,
- 'uploading': image.uploading,
- 'sort_index': len(image.parents),
+ 'command': json.loads(command) if command else None,
+ 'size': image.storage.image_size,
+ 'uploading': image.storage.uploading,
+ 'sort_index': len(image.ancestors),
}
- if with_tags:
- image_data['tags'] = [tag.name for tag in image.tags]
+ if include_ancestors:
+ # Calculate the ancestors string, with the DBID's replaced with the docker IDs.
+ ancestors = [docker_id(a) for a in image.ancestor_id_list()]
+ image_data['ancestors'] = '/{0}/'.format('/'.join(ancestors))
- if with_history:
- image_data['history'] = [image_dict(parent) for parent in image.parents]
-
- # Calculate the ancestors string, with the DBID's replaced with the docker IDs.
- parent_docker_ids = [parent_image.docker_image_id for parent_image in image.parents]
- image_data['ancestors'] = '/{0}/'.format('/'.join(parent_docker_ids))
return image_data
+def historical_image_view(image, image_map):
+ ancestors = [image_map[a] for a in image.ancestor_id_list()]
+ normal_view = image_view(image, image_map)
+ normal_view['history'] = [image_view(parent, image_map, False) for parent in ancestors]
+ return normal_view
+
+
@resource('/v1/repository//image/')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
class RepositoryImageList(RepositoryParamResource):
""" Resource for listing repository images. """
-
@require_repo_read
@nickname('listRepositoryImages')
- @disallow_for_app_repositories
def get(self, namespace, repository):
""" List the images for the specified repository. """
- repo_ref = registry_model.lookup_repository(namespace, repository)
- if repo_ref is None:
+ repo = model.repository.get_repository(namespace, repository)
+ if not repo:
raise NotFound()
- images = registry_model.get_legacy_images(repo_ref)
- return {'images': [image_dict(image, with_tags=True) for image in images]}
+ all_images = model.image.get_repository_images_without_placements(repo)
+ all_tags = model.tag.list_repository_tags(namespace, repository)
+
+ tags_by_docker_id = defaultdict(list)
+ found_image_ids = set()
+
+ for tag in all_tags:
+ tags_by_docker_id[tag.image.docker_image_id].append(tag.name)
+ found_image_ids.add(tag.image.id)
+ found_image_ids.update(tag.image.ancestor_id_list())
+
+ image_map = {}
+ filtered_images = []
+ for image in all_images:
+ if image.id in found_image_ids:
+ image_map[image.id] = image
+ filtered_images.append(image)
+
+ def add_tags(image_json):
+ image_json['tags'] = tags_by_docker_id[image_json['id']]
+ return image_json
+
+ return {
+ 'images': [add_tags(image_view(image, image_map)) for image in filtered_images]
+ }
@resource('/v1/repository//image/')
@@ -60,18 +87,18 @@ class RepositoryImageList(RepositoryParamResource):
@path_param('image_id', 'The Docker image ID')
class RepositoryImage(RepositoryParamResource):
""" Resource for handling repository images. """
-
@require_repo_read
@nickname('getImage')
- @disallow_for_app_repositories
def get(self, namespace, repository, image_id):
""" Get the information available for the specified image. """
- repo_ref = registry_model.lookup_repository(namespace, repository)
- if repo_ref is None:
+ image = model.image.get_repo_image_extended(namespace, repository, image_id)
+ if not image:
raise NotFound()
- image = registry_model.get_legacy_image(repo_ref, image_id, include_parents=True)
- if image is None:
- raise NotFound()
+ # Lookup all the ancestor images for the image.
+ image_map = {}
+ for current_image in model.image.get_parent_images(namespace, repository, image):
+ image_map[current_image.id] = current_image
+
+ return historical_image_view(image, image_map)
- return image_dict(image, with_history=True)
diff --git a/endpoints/api/logs.py b/endpoints/api/logs.py
index 1760a2e9b..05d340d20 100644
--- a/endpoints/api/logs.py
+++ b/endpoints/api/logs.py
@@ -1,70 +1,115 @@
""" Access usage logs for organizations or repositories. """
+
+import json
+
from datetime import datetime, timedelta
+from dateutil.relativedelta import relativedelta
-from flask import request
-
-import features
-
-from app import app, export_action_logs_queue, avatar
-from auth.permissions import AdministerOrganizationPermission
-from auth.auth_context import get_authenticated_user
-from auth import scopes
-from data.logs_model import logs_model
-from data.registry_model import registry_model
from endpoints.api import (resource, nickname, ApiResource, query_param, parse_args,
RepositoryParamResource, require_repo_admin, related_user_resource,
- format_date, require_user_admin, path_param, require_scope, page_support,
- validate_json_request, InvalidRequest, show_if)
+ format_date, require_user_admin, path_param, require_scope, page_support)
from endpoints.exception import Unauthorized, NotFound
-
+from auth.permissions import AdministerOrganizationPermission, AdministerOrganizationPermission
+from auth.auth_context import get_authenticated_user
+from data import model, database
+from auth import scopes
+from app import avatar
+from tzlocal import get_localzone
LOGS_PER_PAGE = 20
SERVICE_LEVEL_LOG_KINDS = set(['service_key_create', 'service_key_approve', 'service_key_delete',
'service_key_modify', 'service_key_extend', 'service_key_rotate'])
+def log_view(log, kinds):
+ view = {
+ 'kind': kinds[log.kind_id],
+ 'metadata': json.loads(log.metadata_json),
+ 'ip': log.ip,
+ 'datetime': format_date(log.datetime),
+ }
-def _parse_datetime(dt_string):
- if not dt_string:
- return None
+ if log.performer and log.performer.username:
+ view['performer'] = {
+ 'kind': 'user',
+ 'name': log.performer.username,
+ 'is_robot': log.performer.robot,
+ 'avatar': avatar.get_data_for_user(log.performer)
+ }
- try:
- return datetime.strptime(dt_string + ' UTC', '%m/%d/%Y %Z')
- except ValueError:
- return None
+ return view
+
+def aggregated_log_view(log, kinds, start_time):
+ # Because we aggregate based on the day of the month in SQL, we only have that information.
+ # Therefore, create a synthetic date based on the day and the month of the start time.
+ # Logs are allowed for a maximum period of one week, so this calculation should always work.
+ synthetic_date = datetime(start_time.year, start_time.month, int(log.day), tzinfo=get_localzone())
+ if synthetic_date.day < start_time.day:
+ synthetic_date = synthetic_date + relativedelta(months=1)
+
+ view = {
+ 'kind': kinds[log.kind_id],
+ 'count': log.count,
+ 'datetime': format_date(synthetic_date),
+ }
+
+ return view
+
+def _validate_logs_arguments(start_time, end_time, performer_name):
+ performer = None
+ if performer_name:
+ performer = model.user.get_user(performer_name)
+
+ if start_time:
+ try:
+ start_time = datetime.strptime(start_time + ' UTC', '%m/%d/%Y %Z')
+ except ValueError:
+ start_time = None
+
+ if not start_time:
+ start_time = datetime.today() - timedelta(7) # One week
+
+ if end_time:
+ try:
+ end_time = datetime.strptime(end_time + ' UTC', '%m/%d/%Y %Z')
+ end_time = end_time + timedelta(days=1)
+ except ValueError:
+ end_time = None
+
+ if not end_time:
+ end_time = datetime.today()
+
+ return (start_time, end_time, performer)
-def _validate_logs_arguments(start_time, end_time):
- start_time = _parse_datetime(start_time) or (datetime.today() - timedelta(days=1))
- end_time = _parse_datetime(end_time) or datetime.today()
- end_time = end_time + timedelta(days=1)
- return start_time, end_time
+def get_logs(start_time, end_time, performer_name=None, repository=None, namespace=None,
+ page_token=None, ignore=None):
+ (start_time, end_time, performer) = _validate_logs_arguments(start_time, end_time, performer_name)
+ kinds = model.log.get_log_entry_kinds()
+ logs_query = model.log.get_logs_query(start_time, end_time, performer=performer,
+ repository=repository, namespace=namespace,
+ ignore=ignore)
+ logs, next_page_token = model.modelutil.paginate(logs_query, database.LogEntry, descending=True,
+ page_token=page_token, limit=LOGS_PER_PAGE)
-def _get_logs(start_time, end_time, performer_name=None, repository_name=None, namespace_name=None,
- page_token=None, filter_kinds=None):
- (start_time, end_time) = _validate_logs_arguments(start_time, end_time)
- log_entry_page = logs_model.lookup_logs(start_time, end_time, performer_name, repository_name,
- namespace_name, filter_kinds, page_token,
- app.config['ACTION_LOG_MAX_PAGE'])
- include_namespace = namespace_name is None and repository_name is None
return {
'start_time': format_date(start_time),
'end_time': format_date(end_time),
- 'logs': [log.to_dict(avatar, include_namespace) for log in log_entry_page.logs],
- }, log_entry_page.next_page_token
+ 'logs': [log_view(log, kinds) for log in logs],
+ }, next_page_token
-def _get_aggregate_logs(start_time, end_time, performer_name=None, repository=None, namespace=None,
- filter_kinds=None):
- (start_time, end_time) = _validate_logs_arguments(start_time, end_time)
- aggregated_logs = logs_model.get_aggregated_log_counts(start_time, end_time,
- performer_name=performer_name,
- repository_name=repository,
- namespace_name=namespace,
- filter_kinds=filter_kinds)
+def get_aggregate_logs(start_time, end_time, performer_name=None, repository=None, namespace=None,
+ ignore=None):
+ (start_time, end_time, performer) = _validate_logs_arguments(start_time, end_time, performer_name)
+
+ kinds = model.log.get_log_entry_kinds()
+ aggregated_logs = model.log.get_aggregated_logs(start_time, end_time, performer=performer,
+ repository=repository, namespace=namespace,
+ ignore=ignore)
return {
- 'aggregated': [log.to_dict() for log in aggregated_logs]
+ 'aggregated': [aggregated_log_view(log, kinds, start_time) for log in aggregated_logs]
}
@@ -72,35 +117,33 @@ def _get_aggregate_logs(start_time, end_time, performer_name=None, repository=No
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
class RepositoryLogs(RepositoryParamResource):
""" Resource for fetching logs for the specific repository. """
-
@require_repo_admin
@nickname('listRepoLogs')
@parse_args()
- @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('starttime', 'Earliest time from which to get logs (%m/%d/%Y %Z)', type=str)
+ @query_param('endtime', 'Latest time to which to get logs (%m/%d/%Y %Z)', type=str)
+ @query_param('page', 'The page number for the logs', type=int, default=1)
@page_support()
def get(self, namespace, repository, page_token, parsed_args):
""" List the logs for the specified repository. """
- if registry_model.lookup_repository(namespace, repository) is None:
+ repo = model.repository.get_repository(namespace, repository)
+ if not repo:
raise NotFound()
start_time = parsed_args['starttime']
end_time = parsed_args['endtime']
- return _get_logs(start_time, end_time,
- repository_name=repository,
- page_token=page_token,
- namespace_name=namespace)
+ return get_logs(start_time, end_time, repository=repo, page_token=page_token,
+ ignore=SERVICE_LEVEL_LOG_KINDS)
@resource('/v1/user/logs')
class UserLogs(ApiResource):
""" Resource for fetching logs for the current user. """
-
@require_user_admin
@nickname('listUserLogs')
@parse_args()
- @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('starttime', 'Earliest time from which to get logs. (%m/%d/%Y %Z)', type=str)
+ @query_param('endtime', 'Latest time to which to get logs. (%m/%d/%Y %Z)', type=str)
@query_param('performer', 'Username for which to filter logs.', type=str)
@page_support()
def get(self, parsed_args, page_token):
@@ -110,11 +153,8 @@ class UserLogs(ApiResource):
end_time = parsed_args['endtime']
user = get_authenticated_user()
- return _get_logs(start_time, end_time,
- performer_name=performer_name,
- namespace_name=user.username,
- page_token=page_token,
- filter_kinds=SERVICE_LEVEL_LOG_KINDS)
+ return get_logs(start_time, end_time, performer_name=performer_name, namespace=user.username,
+ page_token=page_token, ignore=SERVICE_LEVEL_LOG_KINDS)
@resource('/v1/organization//logs')
@@ -122,12 +162,12 @@ class UserLogs(ApiResource):
@related_user_resource(UserLogs)
class OrgLogs(ApiResource):
""" Resource for fetching logs for the entire organization. """
-
@nickname('listOrgLogs')
@parse_args()
- @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('starttime', 'Earliest time from which to get logs. (%m/%d/%Y %Z)', type=str)
+ @query_param('endtime', 'Latest time to which to get logs. (%m/%d/%Y %Z)', type=str)
@query_param('performer', 'Username for which to filter logs.', type=str)
+ @query_param('page', 'The page number for the logs', type=int, default=1)
@page_support()
@require_scope(scopes.ORG_ADMIN)
def get(self, orgname, page_token, parsed_args):
@@ -138,47 +178,41 @@ class OrgLogs(ApiResource):
start_time = parsed_args['starttime']
end_time = parsed_args['endtime']
- return _get_logs(start_time, end_time,
- namespace_name=orgname,
- performer_name=performer_name,
- page_token=page_token)
+ return get_logs(start_time, end_time, namespace=orgname, performer_name=performer_name,
+ page_token=page_token, ignore=SERVICE_LEVEL_LOG_KINDS)
raise Unauthorized()
@resource('/v1/repository//aggregatelogs')
-@show_if(features.AGGREGATED_LOG_COUNT_RETRIEVAL)
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
class RepositoryAggregateLogs(RepositoryParamResource):
""" Resource for fetching aggregated logs for the specific repository. """
-
@require_repo_admin
@nickname('getAggregateRepoLogs')
@parse_args()
- @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('starttime', 'Earliest time from which to get logs (%m/%d/%Y %Z)', type=str)
+ @query_param('endtime', 'Latest time to which to get logs (%m/%d/%Y %Z)', type=str)
def get(self, namespace, repository, parsed_args):
""" Returns the aggregated logs for the specified repository. """
- if registry_model.lookup_repository(namespace, repository) is None:
+ repo = model.repository.get_repository(namespace, repository)
+ if not repo:
raise NotFound()
start_time = parsed_args['starttime']
end_time = parsed_args['endtime']
- return _get_aggregate_logs(start_time, end_time,
- repository=repository,
- namespace=namespace)
+ return get_aggregate_logs(start_time, end_time, repository=repo,
+ ignore=SERVICE_LEVEL_LOG_KINDS)
@resource('/v1/user/aggregatelogs')
-@show_if(features.AGGREGATED_LOG_COUNT_RETRIEVAL)
class UserAggregateLogs(ApiResource):
""" Resource for fetching aggregated logs for the current user. """
-
@require_user_admin
@nickname('getAggregateUserLogs')
@parse_args()
- @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('starttime', 'Earliest time from which to get logs. (%m/%d/%Y %Z)', type=str)
+ @query_param('endtime', 'Latest time to which to get logs. (%m/%d/%Y %Z)', type=str)
@query_param('performer', 'Username for which to filter logs.', type=str)
def get(self, parsed_args):
""" Returns the aggregated logs for the current user. """
@@ -187,23 +221,19 @@ class UserAggregateLogs(ApiResource):
end_time = parsed_args['endtime']
user = get_authenticated_user()
- return _get_aggregate_logs(start_time, end_time,
- performer_name=performer_name,
- namespace=user.username,
- filter_kinds=SERVICE_LEVEL_LOG_KINDS)
+ return get_aggregate_logs(start_time, end_time, performer_name=performer_name,
+ namespace=user.username, ignore=SERVICE_LEVEL_LOG_KINDS)
@resource('/v1/organization//aggregatelogs')
-@show_if(features.AGGREGATED_LOG_COUNT_RETRIEVAL)
@path_param('orgname', 'The name of the organization')
@related_user_resource(UserLogs)
class OrgAggregateLogs(ApiResource):
""" Resource for fetching aggregate logs for the entire organization. """
-
@nickname('getAggregateOrgLogs')
@parse_args()
- @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
+ @query_param('starttime', 'Earliest time from which to get logs. (%m/%d/%Y %Z)', type=str)
+ @query_param('endtime', 'Latest time to which to get logs. (%m/%d/%Y %Z)', type=str)
@query_param('performer', 'Username for which to filter logs.', type=str)
@require_scope(scopes.ORG_ADMIN)
def get(self, orgname, parsed_args):
@@ -214,131 +244,7 @@ class OrgAggregateLogs(ApiResource):
start_time = parsed_args['starttime']
end_time = parsed_args['endtime']
- return _get_aggregate_logs(start_time, end_time,
- namespace=orgname,
- performer_name=performer_name)
-
- raise Unauthorized()
-
-
-EXPORT_LOGS_SCHEMA = {
- 'type': 'object',
- 'description': 'Configuration for an export logs operation',
- 'properties': {
- 'callback_url': {
- 'type': 'string',
- 'description': 'The callback URL to invoke with a link to the exported logs',
- },
- 'callback_email': {
- 'type': 'string',
- 'description': 'The e-mail address at which to e-mail a link to the exported logs',
- },
- },
-}
-
-
-def _queue_logs_export(start_time, end_time, options, namespace_name, repository_name=None):
- callback_url = options.get('callback_url')
- if callback_url:
- if not callback_url.startswith('https://') and not callback_url.startswith('http://'):
- raise InvalidRequest('Invalid callback URL')
-
- callback_email = options.get('callback_email')
- if callback_email:
- if callback_email.find('@') < 0:
- raise InvalidRequest('Invalid callback e-mail')
-
- (start_time, end_time) = _validate_logs_arguments(start_time, end_time)
- export_id = logs_model.queue_logs_export(start_time, end_time, export_action_logs_queue,
- namespace_name, repository_name, callback_url,
- callback_email)
- if export_id is None:
- raise InvalidRequest('Invalid export request')
-
- return export_id
-
-
-@resource('/v1/repository//exportlogs')
-@show_if(features.LOG_EXPORT)
-@path_param('repository', 'The full path of the repository. e.g. namespace/name')
-class ExportRepositoryLogs(RepositoryParamResource):
- """ Resource for exporting the logs for the specific repository. """
- schemas = {
- 'ExportLogs': EXPORT_LOGS_SCHEMA
- }
-
- @require_repo_admin
- @nickname('exportRepoLogs')
- @parse_args()
- @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @validate_json_request('ExportLogs')
- def post(self, namespace, repository, parsed_args):
- """ Queues an export of the logs for the specified repository. """
- if registry_model.lookup_repository(namespace, repository) is None:
- raise NotFound()
-
- start_time = parsed_args['starttime']
- end_time = parsed_args['endtime']
- export_id = _queue_logs_export(start_time, end_time, request.get_json(), namespace,
- repository_name=repository)
- return {
- 'export_id': export_id,
- }
-
-
-@resource('/v1/user/exportlogs')
-@show_if(features.LOG_EXPORT)
-class ExportUserLogs(ApiResource):
- """ Resource for exporting the logs for the current user repository. """
- schemas = {
- 'ExportLogs': EXPORT_LOGS_SCHEMA
- }
-
- @require_user_admin
- @nickname('exportUserLogs')
- @parse_args()
- @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @validate_json_request('ExportLogs')
- def post(self, parsed_args):
- """ Returns the aggregated logs for the current user. """
- start_time = parsed_args['starttime']
- end_time = parsed_args['endtime']
-
- user = get_authenticated_user()
- export_id = _queue_logs_export(start_time, end_time, request.get_json(), user.username)
- return {
- 'export_id': export_id,
- }
-
-
-@resource('/v1/organization//exportlogs')
-@show_if(features.LOG_EXPORT)
-@path_param('orgname', 'The name of the organization')
-@related_user_resource(ExportUserLogs)
-class ExportOrgLogs(ApiResource):
- """ Resource for exporting the logs for an entire organization. """
- schemas = {
- 'ExportLogs': EXPORT_LOGS_SCHEMA
- }
-
- @nickname('exportOrgLogs')
- @parse_args()
- @query_param('starttime', 'Earliest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @query_param('endtime', 'Latest time for logs. Format: "%m/%d/%Y" in UTC.', type=str)
- @require_scope(scopes.ORG_ADMIN)
- @validate_json_request('ExportLogs')
- def post(self, orgname, parsed_args):
- """ Exports the logs for the specified organization. """
- permission = AdministerOrganizationPermission(orgname)
- if permission.can():
- start_time = parsed_args['starttime']
- end_time = parsed_args['endtime']
-
- export_id = _queue_logs_export(start_time, end_time, request.get_json(), orgname)
- return {
- 'export_id': export_id,
- }
+ return get_aggregate_logs(start_time, end_time, namespace=orgname,
+ performer_name=performer_name, ignore=SERVICE_LEVEL_LOG_KINDS)
raise Unauthorized()
diff --git a/endpoints/api/manifest.py b/endpoints/api/manifest.py
index 1370fa743..0307ecaba 100644
--- a/endpoints/api/manifest.py
+++ b/endpoints/api/manifest.py
@@ -1,105 +1,29 @@
""" Manage the manifests of a repository. """
-import json
-import logging
+from app import label_validator
from flask import request
-
-from app import label_validator, storage
-from data.model import InvalidLabelKeyException, InvalidMediaTypeException
-from data.registry_model import registry_model
-from digest import digest_tools
from endpoints.api import (resource, nickname, require_repo_read, require_repo_write,
RepositoryParamResource, log_action, validate_json_request,
- path_param, parse_args, query_param, abort, api,
- disallow_for_app_repositories, format_date,
- disallow_for_non_normal_repositories)
-from endpoints.api.image import image_dict
+ path_param, parse_args, query_param, truthy_bool, abort, api)
from endpoints.exception import NotFound
-from util.validation import VALID_LABEL_KEY_REGEX
+from data import model
+from digest import digest_tools
BASE_MANIFEST_ROUTE = '/v1/repository//manifest/'
MANIFEST_DIGEST_ROUTE = BASE_MANIFEST_ROUTE.format(digest_tools.DIGEST_PATTERN)
ALLOWED_LABEL_MEDIA_TYPES = ['text/plain', 'application/json']
-
-logger = logging.getLogger(__name__)
-
-def _label_dict(label):
- return {
+def label_view(label):
+ view = {
'id': label.uuid,
'key': label.key,
'value': label.value,
- 'source_type': label.source_type_name,
- 'media_type': label.media_type_name,
+ 'source_type': label.source_type.name,
+ 'media_type': label.media_type.name,
}
-
-def _layer_dict(manifest_layer, index):
- # NOTE: The `command` in the layer is either a JSON string of an array (schema 1) or
- # a single string (schema 2). The block below normalizes it to have the same format.
- command = None
- if manifest_layer.command:
- try:
- command = json.loads(manifest_layer.command)
- except (TypeError, ValueError):
- command = [manifest_layer.command]
-
- return {
- 'index': index,
- 'compressed_size': manifest_layer.compressed_size,
- 'is_remote': manifest_layer.is_remote,
- 'urls': manifest_layer.urls,
- 'command': command,
- 'comment': manifest_layer.comment,
- 'author': manifest_layer.author,
- 'blob_digest': str(manifest_layer.blob_digest),
- 'created_datetime': format_date(manifest_layer.created_datetime),
- }
-
-
-def _manifest_dict(manifest):
- image = None
- if manifest.legacy_image_if_present is not None:
- image = image_dict(manifest.legacy_image, with_history=True)
-
- layers = None
- if not manifest.is_manifest_list:
- layers = registry_model.list_manifest_layers(manifest, storage)
- if layers is None:
- logger.debug('Missing layers for manifest `%s`', manifest.digest)
- abort(404)
-
- return {
- 'digest': manifest.digest,
- 'is_manifest_list': manifest.is_manifest_list,
- 'manifest_data': manifest.internal_manifest_bytes.as_unicode(),
- 'image': image,
- 'layers': ([_layer_dict(lyr.layer_info, idx) for idx, lyr in enumerate(layers)]
- if layers else None),
- }
-
-
-@resource(MANIFEST_DIGEST_ROUTE)
-@path_param('repository', 'The full path of the repository. e.g. namespace/name')
-@path_param('manifestref', 'The digest of the manifest')
-class RepositoryManifest(RepositoryParamResource):
- """ Resource for retrieving a specific repository manifest. """
- @require_repo_read
- @nickname('getRepoManifest')
- @disallow_for_app_repositories
- def get(self, namespace_name, repository_name, manifestref):
- repo_ref = registry_model.lookup_repository(namespace_name, repository_name)
- if repo_ref is None:
- raise NotFound()
-
- manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref,
- include_legacy_image=True)
- if manifest is None:
- raise NotFound()
-
- return _manifest_dict(manifest)
-
+ return view
@resource(MANIFEST_DIGEST_ROUTE + '/labels')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
@@ -125,9 +49,9 @@ class RepositoryManifestLabels(RepositoryParamResource):
'description': 'The value for the label',
},
'media_type': {
- 'type': ['string', 'null'],
+ 'type': ['string'],
'description': 'The media type for this label',
- 'enum': ALLOWED_LABEL_MEDIA_TYPES + [None],
+ 'enum': ALLOWED_LABEL_MEDIA_TYPES,
},
},
},
@@ -135,80 +59,51 @@ class RepositoryManifestLabels(RepositoryParamResource):
@require_repo_read
@nickname('listManifestLabels')
- @disallow_for_app_repositories
@parse_args()
@query_param('filter', 'If specified, only labels matching the given prefix will be returned',
type=str, default=None)
- def get(self, namespace_name, repository_name, manifestref, parsed_args):
- repo_ref = registry_model.lookup_repository(namespace_name, repository_name)
- if repo_ref is None:
- raise NotFound()
-
- manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref)
- if manifest is None:
- raise NotFound()
-
- labels = registry_model.list_manifest_labels(manifest, parsed_args['filter'])
- if labels is None:
+ def get(self, namespace, repository, manifestref, parsed_args):
+ try:
+ tag_manifest = model.tag.load_manifest_by_digest(namespace, repository, manifestref)
+ except model.DataModelException:
raise NotFound()
+ labels = model.label.list_manifest_labels(tag_manifest, prefix_filter=parsed_args['filter'])
return {
- 'labels': [_label_dict(label) for label in labels]
+ 'labels': [label_view(label) for label in labels]
}
@require_repo_write
@nickname('addManifestLabel')
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
@validate_json_request('AddLabel')
- def post(self, namespace_name, repository_name, manifestref):
+ def post(self, namespace, repository, manifestref):
""" Adds a new label into the tag manifest. """
+ try:
+ tag_manifest = model.tag.load_manifest_by_digest(namespace, repository, manifestref)
+ except model.DataModelException:
+ raise NotFound()
+
label_data = request.get_json()
# Check for any reserved prefixes.
if label_validator.has_reserved_prefix(label_data['key']):
abort(400, message='Label has a reserved prefix')
- repo_ref = registry_model.lookup_repository(namespace_name, repository_name)
- if repo_ref is None:
- raise NotFound()
-
- manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref)
- if manifest is None:
- raise NotFound()
-
- label = None
- try:
- label = registry_model.create_manifest_label(manifest,
- label_data['key'],
- label_data['value'],
- 'api',
- label_data['media_type'])
- except InvalidLabelKeyException:
- message = ('Label is of an invalid format or missing please ' +
- 'use %s format for labels' % VALID_LABEL_KEY_REGEX)
- abort(400, message=message)
- except InvalidMediaTypeException:
- message = 'Media type is invalid please use a valid media type: text/plain, application/json'
- abort(400, message=message)
-
- if label is None:
- raise NotFound()
-
+ label = model.label.create_manifest_label(tag_manifest, label_data['key'],
+ label_data['value'], 'api',
+ media_type_name=label_data['media_type'])
metadata = {
'id': label.uuid,
- 'key': label.key,
- 'value': label.value,
+ 'key': label_data['key'],
+ 'value': label_data['value'],
'manifest_digest': manifestref,
- 'media_type': label.media_type_name,
- 'namespace': namespace_name,
- 'repo': repository_name,
+ 'media_type': label_data['media_type'],
}
- log_action('manifest_label_add', namespace_name, metadata, repo_name=repository_name)
+ log_action('manifest_label_add', namespace, metadata, repo=tag_manifest.tag.repository)
- resp = {'label': _label_dict(label)}
- repo_string = '%s/%s' % (namespace_name, repository_name)
+ resp = {'label': label_view(label)}
+ repo_string = '%s/%s' % (namespace, repository)
headers = {
'Location': api.url_for(ManageRepositoryManifestLabel, repository=repo_string,
manifestref=manifestref, labelid=label.uuid),
@@ -224,39 +119,30 @@ class ManageRepositoryManifestLabel(RepositoryParamResource):
""" Resource for managing the labels on a specific repository manifest. """
@require_repo_read
@nickname('getManifestLabel')
- @disallow_for_app_repositories
- def get(self, namespace_name, repository_name, manifestref, labelid):
+ def get(self, namespace, repository, manifestref, labelid):
""" Retrieves the label with the specific ID under the manifest. """
- repo_ref = registry_model.lookup_repository(namespace_name, repository_name)
- if repo_ref is None:
+ try:
+ tag_manifest = model.tag.load_manifest_by_digest(namespace, repository, manifestref)
+ except model.DataModelException:
raise NotFound()
- manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref)
- if manifest is None:
- raise NotFound()
-
- label = registry_model.get_manifest_label(manifest, labelid)
+ label = model.label.get_manifest_label(labelid, tag_manifest)
if label is None:
raise NotFound()
- return _label_dict(label)
+ return label_view(label)
@require_repo_write
@nickname('deleteManifestLabel')
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
- def delete(self, namespace_name, repository_name, manifestref, labelid):
+ def delete(self, namespace, repository, manifestref, labelid):
""" Deletes an existing label from a manifest. """
- repo_ref = registry_model.lookup_repository(namespace_name, repository_name)
- if repo_ref is None:
+ try:
+ tag_manifest = model.tag.load_manifest_by_digest(namespace, repository, manifestref)
+ except model.DataModelException:
raise NotFound()
- manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref)
- if manifest is None:
- raise NotFound()
-
- deleted = registry_model.delete_manifest_label(manifest, labelid)
+ deleted = model.label.delete_manifest_label(labelid, tag_manifest)
if deleted is None:
raise NotFound()
@@ -264,10 +150,9 @@ class ManageRepositoryManifestLabel(RepositoryParamResource):
'id': labelid,
'key': deleted.key,
'value': deleted.value,
- 'manifest_digest': manifestref,
- 'namespace': namespace_name,
- 'repo': repository_name,
+ 'manifest_digest': manifestref
}
- log_action('manifest_label_delete', namespace_name, metadata, repo_name=repository_name)
+ log_action('manifest_label_delete', namespace, metadata, repo=tag_manifest.tag.repository)
return '', 204
+
diff --git a/endpoints/api/mirror.py b/endpoints/api/mirror.py
deleted file mode 100644
index 9c898c7f5..000000000
--- a/endpoints/api/mirror.py
+++ /dev/null
@@ -1,435 +0,0 @@
-# -*- coding: utf-8 -*-
-import logging
-
-from email.utils import parsedate_tz, mktime_tz
-from datetime import datetime
-
-from jsonschema import ValidationError
-from flask import request
-
-import features
-
-from auth.auth_context import get_authenticated_user
-from data import model
-from data.database import RepoMirrorRuleType
-from endpoints.api import (RepositoryParamResource, nickname, path_param, require_repo_admin,
- resource, validate_json_request, define_json_response, show_if,
- format_date)
-from endpoints.exception import NotFound
-from util.audit import track_and_log, wrap_repository
-from util.names import parse_robot_username
-
-
-common_properties = {
- 'is_enabled': {
- 'type': 'boolean',
- 'description': 'Used to enable or disable synchronizations.',
- },
- 'external_reference': {
- 'type': 'string',
- 'description': 'Location of the external repository.'
- },
- 'external_registry_username': {
- 'type': ['string', 'null'],
- 'description': 'Username used to authenticate with external registry.',
- },
- 'external_registry_password': {
- 'type': ['string', 'null'],
- 'description': 'Password used to authenticate with external registry.',
- },
- 'sync_start_date': {
- 'type': 'string',
- 'description': 'Determines the next time this repository is ready for synchronization.',
- },
- 'sync_interval': {
- 'type': 'integer',
- 'minimum': 0,
- 'description': 'Number of seconds after next_start_date to begin synchronizing.'
- },
- 'robot_username': {
- 'type': 'string',
- 'description': 'Username of robot which will be used for image pushes.'
- },
- 'root_rule': {
- 'type': 'object',
- 'description': 'Tag mirror rule',
- 'required': [
- 'rule_kind',
- 'rule_value'
- ],
- 'properties': {
- 'rule_kind': {
- 'type': 'string',
- 'description': 'The kind of rule type',
- 'enum': ['tag_glob_csv'],
- },
- 'rule_value': {
- 'type': 'array',
- 'description': 'Array of tag patterns',
- 'items': {
- 'type': 'string'
- }
- }
- },
- 'description': 'A list of glob-patterns used to determine which tags should be synchronized.'
- },
- 'external_registry_config': {
- 'type': 'object',
- 'properties': {
- 'verify_tls': {
- 'type': 'boolean',
- 'description': (
- 'Determines whether HTTPs is required and the certificate is verified when '
- 'communicating with the external repository.'
- ),
- },
- 'proxy': {
- 'type': 'object',
- 'description': 'Proxy configuration for use during synchronization.',
- 'properties': {
- 'https_proxy': {
- 'type': ['string', 'null'],
- 'description': 'Value for HTTPS_PROXY environment variable during sync.'
- },
- 'http_proxy': {
- 'type': ['string', 'null'],
- 'description': 'Value for HTTP_PROXY environment variable during sync.'
- },
- 'no_proxy': {
- 'type': ['string', 'null'],
- 'description': 'Value for NO_PROXY environment variable during sync.'
- }
- }
- }
- }
- }
-}
-
-
-@resource('/v1/repository//mirror/sync-now')
-@path_param('repository', 'The full path of the repository. e.g. namespace/name')
-@show_if(features.REPO_MIRROR)
-class RepoMirrorSyncNowResource(RepositoryParamResource):
- """ A resource for managing RepoMirrorConfig.sync_status """
-
- @require_repo_admin
- @nickname('syncNow')
- def post(self, namespace_name, repository_name):
- """ Update the sync_status for a given Repository's mirroring configuration. """
- repo = model.repository.get_repository(namespace_name, repository_name)
- if not repo:
- raise NotFound()
-
- mirror = model.repo_mirror.get_mirror(repository=repo)
- if not mirror:
- raise NotFound()
-
- if mirror and model.repo_mirror.update_sync_status_to_sync_now(mirror):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed="sync_status", to="SYNC_NOW")
- return '', 204
-
- raise NotFound()
-
-
-@resource('/v1/repository//mirror/sync-cancel')
-@path_param('repository', 'The full path of the repository. e.g. namespace/name')
-@show_if(features.REPO_MIRROR)
-class RepoMirrorSyncCancelResource(RepositoryParamResource):
- """ A resource for managing RepoMirrorConfig.sync_status """
-
- @require_repo_admin
- @nickname('syncCancel')
- def post(self, namespace_name, repository_name):
- """ Update the sync_status for a given Repository's mirroring configuration. """
- repo = model.repository.get_repository(namespace_name, repository_name)
- if not repo:
- raise NotFound()
-
- mirror = model.repo_mirror.get_mirror(repository=repo)
- if not mirror:
- raise NotFound()
-
- if mirror and model.repo_mirror.update_sync_status_to_cancel(mirror):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed="sync_status", to="SYNC_CANCEL")
- return '', 204
-
- raise NotFound()
-
-
-@resource('/v1/repository//mirror')
-@path_param('repository', 'The full path of the repository. e.g. namespace/name')
-@show_if(features.REPO_MIRROR)
-class RepoMirrorResource(RepositoryParamResource):
- """
- Resource for managing repository mirroring.
- """
- schemas = {
- 'CreateMirrorConfig': {
- 'description': 'Create the repository mirroring configuration.',
- 'type': 'object',
- 'required': [
- 'external_reference',
- 'sync_interval',
- 'sync_start_date',
- 'root_rule'
- ],
- 'properties': common_properties
- },
- 'UpdateMirrorConfig': {
- 'description': 'Update the repository mirroring configuration.',
- 'type': 'object',
- 'properties': common_properties
- },
- 'ViewMirrorConfig': {
- 'description': 'View the repository mirroring configuration.',
- 'type': 'object',
- 'required': [
- 'is_enabled',
- 'mirror_type',
- 'external_reference',
- 'external_registry_username',
- 'external_registry_config',
- 'sync_interval',
- 'sync_start_date',
- 'sync_expiration_date',
- 'sync_retries_remaining',
- 'sync_status',
- 'root_rule',
- 'robot_username',
- ],
- 'properties': common_properties
- }
- }
-
- @require_repo_admin
- @define_json_response('ViewMirrorConfig')
- @nickname('getRepoMirrorConfig')
- def get(self, namespace_name, repository_name):
- """ Return the Mirror configuration for a given Repository. """
- repo = model.repository.get_repository(namespace_name, repository_name)
- if not repo:
- raise NotFound()
-
- mirror = model.repo_mirror.get_mirror(repo)
- if not mirror:
- raise NotFound()
-
- # Transformations
- rules = mirror.root_rule.rule_value
- username = self._decrypt_username(mirror.external_registry_username)
- sync_start_date = self._dt_to_string(mirror.sync_start_date)
- sync_expiration_date = self._dt_to_string(mirror.sync_expiration_date)
- robot = mirror.internal_robot.username if mirror.internal_robot is not None else None
-
- return {
- 'is_enabled': mirror.is_enabled,
- 'mirror_type': mirror.mirror_type.name,
- 'external_reference': mirror.external_reference,
- 'external_registry_username': username,
- 'external_registry_config': mirror.external_registry_config or {},
- 'sync_interval': mirror.sync_interval,
- 'sync_start_date': sync_start_date,
- 'sync_expiration_date': sync_expiration_date,
- 'sync_retries_remaining': mirror.sync_retries_remaining,
- 'sync_status': mirror.sync_status.name,
- 'root_rule': {
- 'rule_kind': 'tag_glob_csv',
- 'rule_value': rules
- },
- 'robot_username': robot,
- }
-
- @require_repo_admin
- @nickname('createRepoMirrorConfig')
- @validate_json_request('CreateMirrorConfig')
- def post(self, namespace_name, repository_name):
- """ Create a RepoMirrorConfig for a given Repository. """
- # TODO: Tidy up this function
- # TODO: Specify only the data we want to pass on when creating the RepoMirrorConfig. Avoid
- # the possibility of data injection.
-
- repo = model.repository.get_repository(namespace_name, repository_name)
- if not repo:
- raise NotFound()
-
- if model.repo_mirror.get_mirror(repo):
- return {'detail': 'Mirror configuration already exits for repository %s/%s' % (
- namespace_name, repository_name)}, 409
-
- data = request.get_json()
-
- data['sync_start_date'] = self._string_to_dt(data['sync_start_date'])
-
- rule = model.repo_mirror.create_rule(repo, data['root_rule']['rule_value'])
- del data['root_rule']
-
- # Verify the robot is part of the Repository's namespace
- robot = self._setup_robot_for_mirroring(namespace_name, repository_name, data['robot_username'])
- del data['robot_username']
-
- mirror = model.repo_mirror.enable_mirroring_for_repository(repo, root_rule=rule,
- internal_robot=robot, **data)
- if mirror:
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_reference', to=data['external_reference'])
- return '', 201
- else:
- # TODO: Determine appropriate Response
- return {'detail': 'RepoMirrorConfig already exists for this repository.'}, 409
-
- @require_repo_admin
- @validate_json_request('UpdateMirrorConfig')
- @nickname('changeRepoMirrorConfig')
- def put(self, namespace_name, repository_name):
- """ Allow users to modifying the repository's mirroring configuration. """
- values = request.get_json()
-
- repo = model.repository.get_repository(namespace_name, repository_name)
- if not repo:
- raise NotFound()
-
- mirror = model.repo_mirror.get_mirror(repo)
- if not mirror:
- raise NotFound()
-
- if 'is_enabled' in values:
- if values['is_enabled'] == True:
- if model.repo_mirror.enable_mirror(repo):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='is_enabled', to=True)
- if values['is_enabled'] == False:
- if model.repo_mirror.disable_mirror(repo):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='is_enabled', to=False)
-
- if 'external_reference' in values:
- if values['external_reference'] == '':
- return {'detail': 'Empty string is an invalid repository location.'}, 400
- if model.repo_mirror.change_remote(repo, values['external_reference']):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_reference', to=values['external_reference'])
-
- if 'robot_username' in values:
- robot_username = values['robot_username']
- robot = self._setup_robot_for_mirroring(namespace_name, repository_name, robot_username)
- if model.repo_mirror.set_mirroring_robot(repo, robot):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='robot_username', to=robot_username)
-
- if 'sync_start_date' in values:
- try:
- sync_start_date = self._string_to_dt(values['sync_start_date'])
- except ValueError as e:
- return {'detail': 'Incorrect DateTime format for sync_start_date.'}, 400
- if model.repo_mirror.change_sync_start_date(repo, sync_start_date):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='sync_start_date', to=sync_start_date)
-
- if 'sync_interval' in values:
- if model.repo_mirror.change_sync_interval(repo, values['sync_interval']):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='sync_interval', to=values['sync_interval'])
-
- if 'external_registry_username' in values and 'external_registry_password' in values:
- username = values['external_registry_username']
- password = values['external_registry_password']
- if username is None and password is not None:
- return {'detail': 'Unable to delete username while setting a password.'}, 400
- if model.repo_mirror.change_credentials(repo, username, password):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_username', to=username)
- if password is None:
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_password', to=None)
- else:
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_password', to="********")
-
- elif 'external_registry_username' in values:
- username = values['external_registry_username']
- if model.repo_mirror.change_username(repo, username):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='external_registry_username', to=username)
-
- # Do not allow specifying a password without setting a username
- if 'external_registry_password' in values and 'external_registry_username' not in values:
- return {'detail': 'Unable to set a new password without also specifying a username.'}, 400
-
- if 'external_registry_config' in values:
- external_registry_config = values.get('external_registry_config', {})
-
- if 'verify_tls' in external_registry_config:
- updates = {'verify_tls': external_registry_config['verify_tls']}
- if model.repo_mirror.change_external_registry_config(repo, updates):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='verify_tls', to=external_registry_config['verify_tls'])
-
- if 'proxy' in external_registry_config:
- proxy_values = external_registry_config.get('proxy', {})
-
- if 'http_proxy' in proxy_values:
- updates = {'proxy': {'http_proxy': proxy_values['http_proxy']}}
- if model.repo_mirror.change_external_registry_config(repo, updates):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='http_proxy', to=proxy_values['http_proxy'])
-
- if 'https_proxy' in proxy_values:
- updates = {'proxy': {'https_proxy': proxy_values['https_proxy']}}
- if model.repo_mirror.change_external_registry_config(repo, updates):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='https_proxy', to=proxy_values['https_proxy'])
-
- if 'no_proxy' in proxy_values:
- updates = {'proxy': {'no_proxy': proxy_values['no_proxy']}}
- if model.repo_mirror.change_external_registry_config(repo, updates):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed='no_proxy', to=proxy_values['no_proxy'])
-
- if 'root_rule' in values:
-
- if values['root_rule']['rule_kind'] != "tag_glob_csv":
- raise ValidationError('validation failed: rule_kind must be "tag_glob_csv"')
-
- if model.repo_mirror.change_rule(repo, RepoMirrorRuleType.TAG_GLOB_CSV, values['root_rule']['rule_value']):
- track_and_log('repo_mirror_config_changed', wrap_repository(repo), changed="mirror_rule", to=values['root_rule']['rule_value'])
-
- return '', 201
-
- def _setup_robot_for_mirroring(self, namespace_name, repo_name, robot_username):
- """ Validate robot exists and give write permissions. """
- robot = model.user.lookup_robot(robot_username)
- assert robot.robot
-
- namespace, _ = parse_robot_username(robot_username)
- if namespace != namespace_name:
- raise model.DataModelException('Invalid robot')
-
- # Ensure the robot specified has access to the repository. If not, grant it.
- permissions = model.permission.get_user_repository_permissions(robot, namespace_name, repo_name)
- if not permissions or permissions[0].role.name == 'read':
- model.permission.set_user_repo_permission(robot.username, namespace_name, repo_name, 'write')
-
- return robot
-
- def _string_to_dt(self, string):
- """ Convert String to correct DateTime format. """
- if string is None:
- return None
-
- """
- # TODO: Use RFC2822. This doesn't work consistently.
- # TODO: Move this to same module as `format_date` once fixed.
- tup = parsedate_tz(string)
- if len(tup) == 8:
- tup = tup + (0,) # If TimeZone is omitted, assume UTC
- ts = mktime_tz(tup)
- dt = datetime.fromtimestamp(ts, pytz.UTC)
- return dt
- """
- assert isinstance(string, (str, unicode))
- dt = datetime.strptime(string, "%Y-%m-%dT%H:%M:%SZ")
- return dt
-
- def _dt_to_string(self, dt):
- """ Convert DateTime to correctly formatted String."""
- if dt is None:
- return None
-
- """
- # TODO: Use RFC2822. Need to make it work bi-directionally.
- return format_date(dt)
- """
-
- assert isinstance(dt, datetime)
- string = dt.isoformat() + 'Z'
- return string
-
- def _decrypt_username(self, username):
- if username is None:
- return None
- return username.decrypt()
diff --git a/endpoints/api/organization.py b/endpoints/api/organization.py
index e53bba6b9..854451454 100644
--- a/endpoints/api/organization.py
+++ b/endpoints/api/organization.py
@@ -1,15 +1,12 @@
""" Manage organizations, members and OAuth applications. """
import logging
-import recaptcha2
from flask import request
import features
-from active_migration import ActiveDataMigration, ERTMigrationFlags
-from app import (billing as stripe, avatar, all_queues, authentication, namespace_gc_queue,
- ip_resolver, app)
+from app import billing as stripe, avatar, all_queues
from endpoints.api import (resource, nickname, ApiResource, validate_json_request, request_error,
related_user_resource, internal_only, require_user_admin, log_action,
show_if, path_param, require_scope, require_fresh_login)
@@ -21,8 +18,6 @@ from auth.auth_context import get_authenticated_user
from auth import scopes
from data import model
from data.billing import get_plan
-from util.names import parse_robot_username
-from util.request import get_request_ip
logger = logging.getLogger(__name__)
@@ -38,8 +33,6 @@ def team_view(orgname, team):
'repo_count': team.repo_count,
'member_count': team.member_count,
-
- 'is_synced': team.is_synced,
}
@@ -63,8 +56,6 @@ def org_view(o, teams):
if is_admin:
view['invoice_email'] = o.invoice_email
view['invoice_email_address'] = o.invoice_email_address
- view['tag_expiration_s'] = o.removed_tag_expiration_s
- view['is_free_account'] = o.stripe_id is None
return view
@@ -88,10 +79,6 @@ class OrganizationList(ApiResource):
'type': 'string',
'description': 'Organization contact email',
},
- 'recaptcha_response': {
- 'type': 'string',
- 'description': 'The (may be disabled) recaptcha response code for verification',
- },
},
},
}
@@ -120,23 +107,9 @@ class OrganizationList(ApiResource):
if features.MAILING and not org_data.get('email'):
raise request_error(message='Email address is required')
- # If recaptcha is enabled, then verify the user is a human.
- if features.RECAPTCHA:
- recaptcha_response = org_data.get('recaptcha_response', '')
- result = recaptcha2.verify(app.config['RECAPTCHA_SECRET_KEY'],
- recaptcha_response,
- get_request_ip())
-
- if not result['success']:
- return {
- 'message': 'Are you a bot? If not, please revalidate the captcha.'
- }, 400
-
- is_possible_abuser = ip_resolver.is_ip_possible_threat(get_request_ip())
try:
model.organization.create_organization(org_data['name'], org_data.get('email'), user,
- email_required=features.MAILING,
- is_possible_abuser=is_possible_abuser)
+ email_required=features.MAILING)
return 'Created', 201
except model.DataModelException as ex:
raise request_error(exception=ex)
@@ -164,15 +137,16 @@ class Organization(ApiResource):
'type': ['string', 'null'],
'description': 'The email address at which to receive invoices',
},
- 'tag_expiration_s': {
+ 'tag_expiration': {
'type': 'integer',
+ 'maximum': 2592000,
'minimum': 0,
- 'description': 'The number of seconds for tag expiration',
},
},
},
}
+ @require_scope(scopes.ORG_ADMIN)
@nickname('getOrganization')
def get(self, orgname):
""" Get the details for the specified organization """
@@ -183,8 +157,7 @@ class Organization(ApiResource):
teams = None
if OrganizationMemberPermission(orgname).can():
- has_syncing = features.TEAM_SYNCING and bool(authentication.federated_service)
- teams = model.team.get_teams_within_org(org, has_syncing)
+ teams = model.team.get_teams_within_org(org)
return org_view(org, teams)
@@ -220,9 +193,9 @@ class Organization(ApiResource):
logger.debug('Changing email address for organization: %s', org.username)
model.user.update_email(org, new_email)
- if features.CHANGE_TAG_EXPIRATION and 'tag_expiration_s' in org_data:
- logger.debug('Changing organization tag expiration to: %ss', org_data['tag_expiration_s'])
- model.user.change_user_tag_expiration(org, org_data['tag_expiration_s'])
+ if 'tag_expiration' in org_data:
+ logger.debug('Changing organization tag expiration to: %ss', org_data['tag_expiration'])
+ model.user.change_user_tag_expiration(org, org_data['tag_expiration'])
teams = model.team.get_teams_within_org(org)
return org_view(org, teams)
@@ -241,7 +214,7 @@ class Organization(ApiResource):
except model.InvalidOrganizationException:
raise NotFound()
- model.user.mark_namespace_for_deletion(org, all_queues, namespace_gc_queue)
+ model.user.delete_user(org, all_queues)
return '', 204
raise Unauthorized()
@@ -286,55 +259,6 @@ class OrgPrivateRepositories(ApiResource):
raise Unauthorized()
-@resource('/v1/organization//collaborators')
-@path_param('orgname', 'The name of the organization')
-class OrganizationCollaboratorList(ApiResource):
- """ Resource for listing outside collaborators of an organization.
-
- Collaborators are users that do not belong to any team in the
- organiztion, but who have direct permissions on one or more
- repositories belonging to the organization.
- """
-
- @require_scope(scopes.ORG_ADMIN)
- @nickname('getOrganizationCollaborators')
- def get(self, orgname):
- """ List outside collaborators of the specified organization. """
- permission = AdministerOrganizationPermission(orgname)
- if not permission.can():
- raise Unauthorized()
-
- try:
- org = model.organization.get_organization(orgname)
- except model.InvalidOrganizationException:
- raise NotFound()
-
- all_perms = model.permission.list_organization_member_permissions(org)
- membership = model.team.list_organization_members_by_teams(org)
-
- org_members = set(m.user.username for m in membership)
-
- collaborators = {}
- for perm in all_perms:
- username = perm.user.username
-
- # Only interested in non-member permissions.
- if username in org_members:
- continue
-
- if username not in collaborators:
- collaborators[username] = {
- 'kind': 'user',
- 'name': username,
- 'avatar': avatar.get_data_for_user(perm.user),
- 'repositories': [],
- }
-
- collaborators[username]['repositories'].append(perm.repository.name)
-
- return {'collaborators': collaborators.values()}
-
-
@resource('/v1/organization//members')
@path_param('orgname', 'The name of the organization')
class OrganizationMemberList(ApiResource):
@@ -415,14 +339,7 @@ class OrganizationMember(ApiResource):
# Lookup the user's information in the organization.
teams = list(model.team.get_user_teams_within_org(membername, organization))
if not teams:
- # 404 if the user is not a robot under the organization, as that means the referenced
- # user or robot is not a member of this organization.
- if not member.robot:
- raise NotFound()
-
- namespace, _ = parse_robot_username(member.username)
- if namespace != orgname:
- raise NotFound()
+ raise NotFound()
repo_permissions = model.permission.list_organization_member_permissions(organization, member)
@@ -494,23 +411,14 @@ class ApplicationInformation(ApiResource):
def app_view(application):
is_admin = AdministerOrganizationPermission(application.organization.username).can()
- client_secret = None
- if is_admin:
- # TODO(remove-unenc): Remove legacy lookup.
- client_secret = None
- if application.secure_client_secret is not None:
- client_secret = application.secure_client_secret.decrypt()
- if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS) and client_secret is None:
- client_secret = application.client_secret
-
- assert (client_secret is not None) == is_admin
return {
'name': application.name,
'description': application.description,
'application_uri': application.application_uri,
+
'client_id': application.client_id,
- 'client_secret': client_secret,
+ 'client_secret': application.client_secret if is_admin else None,
'redirect_uri': application.redirect_uri if is_admin else None,
'avatar_email': application.avatar_email if is_admin else None,
}
diff --git a/endpoints/api/permission.py b/endpoints/api/permission.py
index e85c6480e..f07d87b2f 100644
--- a/endpoints/api/permission.py
+++ b/endpoints/api/permission.py
@@ -4,27 +4,55 @@ import logging
from flask import request
+from app import avatar
from endpoints.api import (resource, nickname, require_repo_admin, RepositoryParamResource,
log_action, request_error, validate_json_request, path_param)
from endpoints.exception import NotFound
-from permission_models_pre_oci import pre_oci_model as model
-from permission_models_interface import DeleteException, SaveException
+from data import model
+
logger = logging.getLogger(__name__)
+def role_view(repo_perm_obj):
+ return {
+ 'role': repo_perm_obj.role.name,
+ }
+
+def wrap_role_view_user(role_json, user):
+ role_json['name'] = user.username
+ role_json['is_robot'] = user.robot
+ if not user.robot:
+ role_json['avatar'] = avatar.get_data_for_user(user)
+ return role_json
+
+
+def wrap_role_view_org(role_json, user, org_members):
+ role_json['is_org_member'] = user.robot or user.username in org_members
+ return role_json
+
+
+def wrap_role_view_team(role_json, team):
+ role_json['name'] = team.name
+ role_json['avatar'] = avatar.get_data_for_team(team)
+ return role_json
+
+
@resource('/v1/repository//permissions/team/')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
class RepositoryTeamPermissionList(RepositoryParamResource):
""" Resource for repository team permissions. """
@require_repo_admin
@nickname('listRepoTeamPermissions')
- def get(self, namespace_name, repository_name):
+ def get(self, namespace, repository):
""" List all team permission. """
- repo_perms = model.get_repo_permissions_by_team(namespace_name, repository_name)
+ repo_perms = model.permission.get_all_repo_teams(namespace, repository)
+
+ def wrapped_role_view(repo_perm):
+ return wrap_role_view_team(role_view(repo_perm), repo_perm.team)
return {
- 'permissions': {repo_perm.team_name: repo_perm.to_dict()
+ 'permissions': {repo_perm.team.name: wrapped_role_view(repo_perm)
for repo_perm in repo_perms}
}
@@ -35,10 +63,38 @@ class RepositoryUserPermissionList(RepositoryParamResource):
""" Resource for repository user permissions. """
@require_repo_admin
@nickname('listRepoUserPermissions')
- def get(self, namespace_name, repository_name):
+ def get(self, namespace, repository):
""" List all user permissions. """
- perms = model.get_repo_permissions_by_user(namespace_name, repository_name)
- return {'permissions': {p.username: p.to_dict() for p in perms}}
+ # Lookup the organization (if any).
+ org = None
+ try:
+ org = model.organization.get_organization(namespace) # Will raise an error if not org
+ except model.InvalidOrganizationException:
+ # This repository isn't under an org
+ pass
+
+ # Determine how to wrap the role(s).
+ def wrapped_role_view(repo_perm):
+ return wrap_role_view_user(role_view(repo_perm), repo_perm.user)
+
+ role_view_func = wrapped_role_view
+
+ if org:
+ org_members = model.organization.get_organization_member_set(namespace)
+ current_func = role_view_func
+
+ def wrapped_role_org_view(repo_perm):
+ return wrap_role_view_org(current_func(repo_perm), repo_perm.user,
+ org_members)
+
+ role_view_func = wrapped_role_org_view
+
+ # Load and return the permissions.
+ repo_perms = model.user.get_all_repo_users(namespace, repository)
+ return {
+ 'permissions': {perm.user.username: role_view_func(perm)
+ for perm in repo_perms}
+ }
@resource('/v1/repository//permissions/user//transitive')
@@ -49,16 +105,19 @@ class RepositoryUserTransitivePermission(RepositoryParamResource):
or via a team. """
@require_repo_admin
@nickname('getUserTransitivePermission')
- def get(self, namespace_name, repository_name, username):
+ def get(self, namespace, repository, username):
""" Get the fetch the permission for the specified user. """
-
- roles = model.get_repo_roles(username, namespace_name, repository_name)
-
- if not roles:
+ user = model.user.get_user(username)
+ if not user:
raise NotFound
-
+
+ repo = model.repository.get_repository(namespace, repository)
+ if not repo:
+ raise NotFound
+
+ permissions = list(model.permission.get_user_repo_permissions(user, repo))
return {
- 'permissions': [r.to_dict() for r in roles]
+ 'permissions': [role_view(permission) for permission in permissions]
}
@@ -90,48 +149,68 @@ class RepositoryUserPermission(RepositoryParamResource):
@require_repo_admin
@nickname('getUserPermissions')
- def get(self, namespace_name, repository_name, username):
- """ Get the permission for the specified user. """
- logger.debug('Get repo: %s/%s permissions for user %s', namespace_name, repository_name, username)
- perm = model.get_repo_permission_for_user(username, namespace_name, repository_name)
- return perm.to_dict()
+ def get(self, namespace, repository, username):
+ """ Get the Fetch the permission for the specified user. """
+ logger.debug('Get repo: %s/%s permissions for user %s', namespace, repository, username)
+ perm = model.permission.get_user_reponame_permission(username, namespace, repository)
+ perm_view = wrap_role_view_user(role_view(perm), perm.user)
+
+ try:
+ model.organization.get_organization(namespace)
+ org_members = model.organization.get_organization_member_set(namespace)
+ perm_view = wrap_role_view_org(perm_view, perm.user, org_members)
+ except model.InvalidOrganizationException:
+ # This repository is not part of an organization
+ pass
+
+ return perm_view
@require_repo_admin
@nickname('changeUserPermissions')
@validate_json_request('UserPermission')
- def put(self, namespace_name, repository_name, username): # Also needs to respond to post
+ def put(self, namespace, repository, username): # Also needs to respond to post
""" Update the perimssions for an existing repository. """
new_permission = request.get_json()
logger.debug('Setting permission to: %s for user %s', new_permission['role'], username)
try:
- perm = model.set_repo_permission_for_user(username, namespace_name, repository_name,
- new_permission['role'])
- resp = perm.to_dict()
- except SaveException as ex:
+ perm = model.permission.set_user_repo_permission(username, namespace, repository,
+ new_permission['role'])
+ except model.DataModelException as ex:
raise request_error(exception=ex)
- log_action('change_repo_permission', namespace_name,
- {'username': username, 'repo': repository_name,
- 'namespace': namespace_name,
- 'role': new_permission['role']},
- repo_name=repository_name)
+ perm_view = wrap_role_view_user(role_view(perm), perm.user)
- return resp, 200
+ try:
+ model.organization.get_organization(namespace)
+ org_members = model.organization.get_organization_member_set(namespace)
+ perm_view = wrap_role_view_org(perm_view, perm.user, org_members)
+ except model.InvalidOrganizationException:
+ # This repository is not part of an organization
+ pass
+ except model.DataModelException as ex:
+ raise request_error(exception=ex)
+
+ log_action('change_repo_permission', namespace,
+ {'username': username, 'repo': repository,
+ 'role': new_permission['role']},
+ repo=model.repository.get_repository(namespace, repository))
+
+ return perm_view, 200
@require_repo_admin
@nickname('deleteUserPermissions')
- def delete(self, namespace_name, repository_name, username):
+ def delete(self, namespace, repository, username):
""" Delete the permission for the user. """
try:
- model.delete_repo_permission_for_user(username, namespace_name, repository_name)
- except DeleteException as ex:
+ model.permission.delete_user_permission(username, namespace, repository)
+ except model.DataModelException as ex:
raise request_error(exception=ex)
- log_action('delete_repo_permission', namespace_name,
- {'username': username, 'repo': repository_name, 'namespace': namespace_name},
- repo_name=repository_name)
+ log_action('delete_repo_permission', namespace,
+ {'username': username, 'repo': repository},
+ repo=model.repository.get_repository(namespace, repository))
return '', 204
@@ -164,46 +243,39 @@ class RepositoryTeamPermission(RepositoryParamResource):
@require_repo_admin
@nickname('getTeamPermissions')
- def get(self, namespace_name, repository_name, teamname):
+ def get(self, namespace, repository, teamname):
""" Fetch the permission for the specified team. """
- logger.debug('Get repo: %s/%s permissions for team %s', namespace_name, repository_name, teamname)
- role = model.get_repo_role_for_team(teamname, namespace_name, repository_name)
- return role.to_dict()
+ logger.debug('Get repo: %s/%s permissions for team %s', namespace, repository, teamname)
+ perm = model.permission.get_team_reponame_permission(teamname, namespace, repository)
+ return role_view(perm)
@require_repo_admin
@nickname('changeTeamPermissions')
@validate_json_request('TeamPermission')
- def put(self, namespace_name, repository_name, teamname):
+ def put(self, namespace, repository, teamname):
""" Update the existing team permission. """
new_permission = request.get_json()
logger.debug('Setting permission to: %s for team %s', new_permission['role'], teamname)
- try:
- perm = model.set_repo_permission_for_team(teamname, namespace_name, repository_name,
- new_permission['role'])
- resp = perm.to_dict()
- except SaveException as ex:
- raise request_error(exception=ex)
-
+ perm = model.permission.set_team_repo_permission(teamname, namespace, repository,
+ new_permission['role'])
- log_action('change_repo_permission', namespace_name,
- {'team': teamname, 'repo': repository_name,
+ log_action('change_repo_permission', namespace,
+ {'team': teamname, 'repo': repository,
'role': new_permission['role']},
- repo_name=repository_name)
- return resp, 200
+ repo=model.repository.get_repository(namespace, repository))
+
+ return wrap_role_view_team(role_view(perm), perm.team), 200
@require_repo_admin
@nickname('deleteTeamPermissions')
- def delete(self, namespace_name, repository_name, teamname):
+ def delete(self, namespace, repository, teamname):
""" Delete the permission for the specified team. """
- try:
- model.delete_repo_permission_for_team(teamname, namespace_name, repository_name)
- except DeleteException as ex:
- raise request_error(exception=ex)
-
- log_action('delete_repo_permission', namespace_name,
- {'team': teamname, 'repo': repository_name},
- repo_name=repository_name)
+ model.permission.delete_team_permission(teamname, namespace, repository)
+
+ log_action('delete_repo_permission', namespace,
+ {'team': teamname, 'repo': repository},
+ repo=model.repository.get_repository(namespace, repository))
return '', 204
diff --git a/endpoints/api/permission_models_interface.py b/endpoints/api/permission_models_interface.py
deleted file mode 100644
index 49c24744c..000000000
--- a/endpoints/api/permission_models_interface.py
+++ /dev/null
@@ -1,208 +0,0 @@
-import sys
-from abc import ABCMeta, abstractmethod
-from collections import namedtuple
-
-from six import add_metaclass
-
-
-class SaveException(Exception):
- def __init__(self, other):
- self.traceback = sys.exc_info()
- super(SaveException, self).__init__(str(other))
-
-class DeleteException(Exception):
- def __init__(self, other):
- self.traceback = sys.exc_info()
- super(DeleteException, self).__init__(str(other))
-
-
-class Role(namedtuple('Role', ['role_name'])):
- def to_dict(self):
- return {
- 'role': self.role_name,
- }
-
-class UserPermission(namedtuple('UserPermission', [
- 'role_name',
- 'username',
- 'is_robot',
- 'avatar',
- 'is_org_member',
- 'has_org',
- ])):
-
- def to_dict(self):
- perm_dict = {
- 'role': self.role_name,
- 'name': self.username,
- 'is_robot': self.is_robot,
- 'avatar': self.avatar,
- }
- if self.has_org:
- perm_dict['is_org_member'] = self.is_org_member
- return perm_dict
-
-
-class RobotPermission(namedtuple('RobotPermission', [
- 'role_name',
- 'username',
- 'is_robot',
- 'is_org_member',
-])):
-
- def to_dict(self, user=None, team=None, org_members=None):
- return {
- 'role': self.role_name,
- 'name': self.username,
- 'is_robot': True,
- 'is_org_member': self.is_org_member,
- }
-
-
-class TeamPermission(namedtuple('TeamPermission', [
- 'role_name',
- 'team_name',
- 'avatar',
-])):
-
- def to_dict(self):
- return {
- 'role': self.role_name,
- 'name': self.team_name,
- 'avatar': self.avatar,
- }
-
-@add_metaclass(ABCMeta)
-class PermissionDataInterface(object):
- """
- Data interface used by permissions API
- """
-
- @abstractmethod
- def get_repo_permissions_by_user(self, namespace_name, repository_name):
- """
-
- Args:
- namespace_name: string
- repository_name: string
-
- Returns:
- list(UserPermission)
- """
-
- @abstractmethod
- def get_repo_roles(self, username, namespace_name, repository_name):
- """
-
- Args:
- username: string
- namespace_name: string
- repository_name: string
-
- Returns:
- list(Role) or None
- """
-
- @abstractmethod
- def get_repo_permission_for_user(self, username, namespace_name, repository_name):
- """
-
- Args:
- username: string
- namespace_name: string
- repository_name: string
-
- Returns:
- UserPermission
- """
-
- @abstractmethod
- def set_repo_permission_for_user(self, username, namespace_name, repository_name, role_name):
- """
-
- Args:
- username: string
- namespace_name: string
- repository_name: string
- role_name: string
-
- Returns:
- UserPermission
-
- Raises:
- SaveException
- """
-
- @abstractmethod
- def delete_repo_permission_for_user(self, username, namespace_name, repository_name):
- """
-
- Args:
- username: string
- namespace_name: string
- repository_name: string
-
- Returns:
- void
-
- Raises:
- DeleteException
- """
-
- @abstractmethod
- def get_repo_permissions_by_team(self, namespace_name, repository_name):
- """
-
- Args:
- namespace_name: string
- repository_name: string
-
- Returns:
- list(TeamPermission)
- """
-
- @abstractmethod
- def get_repo_role_for_team(self, team_name, namespace_name, repository_name):
- """
-
- Args:
- team_name: string
- namespace_name: string
- repository_name: string
-
- Returns:
- Role
- """
-
- @abstractmethod
- def set_repo_permission_for_team(self, team_name, namespace_name, repository_name, permission):
- """
-
- Args:
- team_name: string
- namespace_name: string
- repository_name: string
- permission: string
-
- Returns:
- TeamPermission
-
- Raises:
- SaveException
- """
-
- @abstractmethod
- def delete_repo_permission_for_team(self, team_name, namespace_name, repository_name):
- """
-
- Args:
- team_name: string
- namespace_name: string
- repository_name: string
-
- Returns:
- TeamPermission
-
- Raises:
- DeleteException
- """
\ No newline at end of file
diff --git a/endpoints/api/permission_models_pre_oci.py b/endpoints/api/permission_models_pre_oci.py
deleted file mode 100644
index 1f19cad10..000000000
--- a/endpoints/api/permission_models_pre_oci.py
+++ /dev/null
@@ -1,115 +0,0 @@
-from app import avatar
-from data import model
-from permission_models_interface import PermissionDataInterface, UserPermission, TeamPermission, Role, SaveException, DeleteException
-
-
-class PreOCIModel(PermissionDataInterface):
- """
- PreOCIModel implements the data model for Permission using a database schema
- before it was changed to support the OCI specification.
- """
-
- def get_repo_permissions_by_user(self, namespace_name, repository_name):
- org = None
- try:
- org = model.organization.get_organization(namespace_name) # Will raise an error if not org
- except model.InvalidOrganizationException:
- # This repository isn't under an org
- pass
-
- # Load the permissions.
- repo_perms = model.user.get_all_repo_users(namespace_name, repository_name)
-
- if org:
- users_filter = {perm.user for perm in repo_perms}
- org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
-
- def is_org_member(user):
- if not org:
- return False
-
- return user.robot or user.username in org_members
-
- return [self._user_permission(perm, org is not None, is_org_member(perm.user)) for perm in repo_perms]
-
- def get_repo_roles(self, username, namespace_name, repository_name):
- user = model.user.get_user(username)
- if not user:
- return None
-
- repo = model.repository.get_repository(namespace_name, repository_name)
- if not repo:
- return None
-
- return [self._role(r) for r in model.permission.get_user_repo_permissions(user, repo)]
-
- def get_repo_permission_for_user(self, username, namespace_name, repository_name):
- perm = model.permission.get_user_reponame_permission(username, namespace_name, repository_name)
- org = None
- try:
- org = model.organization.get_organization(namespace_name)
- org_members = model.organization.get_organization_member_set(org, users_filter={perm.user})
- is_org_member = perm.user.robot or perm.user.username in org_members
- except model.InvalidOrganizationException:
- # This repository is not part of an organization
- is_org_member = False
-
- return self._user_permission(perm, org is not None, is_org_member)
-
- def set_repo_permission_for_user(self, username, namespace_name, repository_name, role_name):
- try:
- perm = model.permission.set_user_repo_permission(username, namespace_name, repository_name, role_name)
- org = None
- try:
- org = model.organization.get_organization(namespace_name)
- org_members = model.organization.get_organization_member_set(org, users_filter={perm.user})
- is_org_member = perm.user.robot or perm.user.username in org_members
- except model.InvalidOrganizationException:
- # This repository is not part of an organization
- is_org_member = False
- return self._user_permission(perm, org is not None, is_org_member)
- except model.DataModelException as ex:
- raise SaveException(ex)
-
- def delete_repo_permission_for_user(self, username, namespace_name, repository_name):
- try:
- model.permission.delete_user_permission(username, namespace_name, repository_name)
- except model.DataModelException as ex:
- raise DeleteException(ex)
-
- def get_repo_permissions_by_team(self, namespace_name, repository_name):
- repo_perms = model.permission.get_all_repo_teams(namespace_name, repository_name)
- return [self._team_permission(perm, perm.team.name) for perm in repo_perms]
-
- def get_repo_role_for_team(self, team_name, namespace_name, repository_name):
- return self._role(model.permission.get_team_reponame_permission(team_name, namespace_name, repository_name))
-
- def set_repo_permission_for_team(self, team_name, namespace_name, repository_name, role_name):
- try:
- return self._team_permission(model.permission.set_team_repo_permission(team_name, namespace_name, repository_name, role_name), team_name)
- except model.DataModelException as ex:
- raise SaveException(ex)
-
- def delete_repo_permission_for_team(self, team_name, namespace_name, repository_name):
- try:
- model.permission.delete_team_permission(team_name, namespace_name, repository_name)
- except model.DataModelException as ex:
- raise DeleteException(ex)
-
- def _role(self, permission_obj):
- return Role(role_name=permission_obj.role.name)
-
- def _user_permission(self, permission_obj, has_org, is_org_member):
- return UserPermission(role_name=permission_obj.role.name,
- username=permission_obj.user.username,
- is_robot=permission_obj.user.robot,
- avatar=avatar.get_data_for_user(permission_obj.user),
- is_org_member=is_org_member,
- has_org=has_org)
-
- def _team_permission(self, permission_obj, team_name):
- return TeamPermission(role_name=permission_obj.role.name,
- team_name=permission_obj.team.name,
- avatar=avatar.get_data_for_team(permission_obj.team))
-
-pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/prototype.py b/endpoints/api/prototype.py
index 2944aab60..f14458594 100644
--- a/endpoints/api/prototype.py
+++ b/endpoints/api/prototype.py
@@ -133,10 +133,7 @@ class PermissionPrototypeList(ApiResource):
raise NotFound()
permissions = model.permission.get_prototype_permissions(org)
-
- users_filter = ({p.activating_user for p in permissions} |
- {p.delegate_user for p in permissions})
- org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
+ org_members = model.organization.get_organization_member_set(orgname)
return {'prototypes': [prototype_view(p, org_members) for p in permissions]}
raise Unauthorized()
@@ -183,9 +180,7 @@ class PermissionPrototypeList(ApiResource):
prototype = model.permission.add_prototype_permission(org, role_name, activating_user,
delegate_user, delegate_team)
log_prototype_action('create_prototype_permission', orgname, prototype)
-
- users_filter = {prototype.activating_user, prototype.delegate_user}
- org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
+ org_members = model.organization.get_organization_member_set(orgname)
return prototype_view(prototype, org_members)
raise Unauthorized()
@@ -262,9 +257,7 @@ class PermissionPrototype(ApiResource):
log_prototype_action('modify_prototype_permission', orgname, prototype,
original_role=existing.role.name)
-
- users_filter = {prototype.activating_user, prototype.delegate_user}
- org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
+ org_members = model.organization.get_organization_member_set(orgname)
return prototype_view(prototype, org_members)
raise Unauthorized()
diff --git a/endpoints/api/repoemail.py b/endpoints/api/repoemail.py
index 3edccb4cc..b3c98bc36 100644
--- a/endpoints/api/repoemail.py
+++ b/endpoints/api/repoemail.py
@@ -5,35 +5,46 @@ import logging
from flask import request, abort
from endpoints.api import (resource, nickname, require_repo_admin, RepositoryParamResource,
- log_action, validate_json_request, internal_only, path_param, show_if)
-from endpoints.api.repoemail_models_pre_oci import pre_oci_model as model
+ log_action, validate_json_request, internal_only,
+ path_param, show_if)
from endpoints.exception import NotFound
from app import tf
+from data import model
from data.database import db
from util.useremails import send_repo_authorization_email
import features
+
logger = logging.getLogger(__name__)
+def record_view(record):
+ return {
+ 'email': record.email,
+ 'repository': record.repository.name,
+ 'namespace': record.repository.namespace_user.username,
+ 'confirmed': record.confirmed
+ }
+
+
@internal_only
-@resource('/v1/repository//authorizedemail/')
@show_if(features.MAILING)
+@resource('/v1/repository//authorizedemail/')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
@path_param('email', 'The e-mail address')
class RepositoryAuthorizedEmail(RepositoryParamResource):
""" Resource for checking and authorizing e-mail addresses to receive repo notifications. """
-
@require_repo_admin
@nickname('checkRepoEmailAuthorized')
def get(self, namespace, repository, email):
""" Checks to see if the given e-mail address is authorized on this repository. """
- record = model.get_email_authorized_for_repo(namespace, repository, email)
+ record = model.repository.get_email_authorized_for_repo(namespace, repository, email)
if not record:
abort(404)
- return record.to_dict()
+ return record_view(record)
+
@require_repo_admin
@nickname('sendAuthorizeRepoEmail')
@@ -41,12 +52,12 @@ class RepositoryAuthorizedEmail(RepositoryParamResource):
""" Starts the authorization process for an e-mail address on a repository. """
with tf(db):
- record = model.get_email_authorized_for_repo(namespace, repository, email)
+ record = model.repository.get_email_authorized_for_repo(namespace, repository, email)
if record and record.confirmed:
- return record.to_dict()
+ return record_view(record)
if not record:
- record = model.create_email_authorization_for_repo(namespace, repository, email)
+ record = model.repository.create_email_authorization_for_repo(namespace, repository, email)
send_repo_authorization_email(namespace, repository, email, record.code)
- return record.to_dict()
+ return record_view(record)
diff --git a/endpoints/api/repoemail_models_interface.py b/endpoints/api/repoemail_models_interface.py
deleted file mode 100644
index 2aae7ab9c..000000000
--- a/endpoints/api/repoemail_models_interface.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from collections import namedtuple
-
-from six import add_metaclass
-
-
-class RepositoryAuthorizedEmail(
- namedtuple('RepositoryAuthorizedEmail', [
- 'email',
- 'repository_name',
- 'namespace_name',
- 'confirmed',
- 'code',
- ])):
- """
- Tag represents a name to an image.
- :type email: string
- :type repository_name: string
- :type namespace_name: string
- :type confirmed: boolean
- :type code: string
- """
-
- def to_dict(self):
- return {
- 'email': self.email,
- 'repository': self.repository_name,
- 'namespace': self.namespace_name,
- 'confirmed': self.confirmed,
- 'code': self.code
- }
-
-
-@add_metaclass(ABCMeta)
-class RepoEmailDataInterface(object):
- """
- Interface that represents all data store interactions required by a Repo Email.
- """
-
- @abstractmethod
- def get_email_authorized_for_repo(self, namespace_name, repository_name, email):
- """
- Returns a RepositoryAuthorizedEmail if available else None
- """
-
- @abstractmethod
- def create_email_authorization_for_repo(self, namespace_name, repository_name, email):
- """
- Returns the newly created repository authorized email.
- """
diff --git a/endpoints/api/repoemail_models_pre_oci.py b/endpoints/api/repoemail_models_pre_oci.py
deleted file mode 100644
index 80a65c995..000000000
--- a/endpoints/api/repoemail_models_pre_oci.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from data import model
-from endpoints.api.repoemail_models_interface import RepoEmailDataInterface, RepositoryAuthorizedEmail
-
-
-def _return_none_or_data(func, namespace_name, repository_name, email):
- data = func(namespace_name, repository_name, email)
- if data is None:
- return data
- return RepositoryAuthorizedEmail(email, repository_name, namespace_name, data.confirmed,
- data.code)
-
-
-class PreOCIModel(RepoEmailDataInterface):
- """
- PreOCIModel implements the data model for the Repo Email using a database schema
- before it was changed to support the OCI specification.
- """
-
- def get_email_authorized_for_repo(self, namespace_name, repository_name, email):
- return _return_none_or_data(model.repository.get_email_authorized_for_repo, namespace_name,
- repository_name, email)
-
- def create_email_authorization_for_repo(self, namespace_name, repository_name, email):
- return _return_none_or_data(model.repository.create_email_authorization_for_repo,
- namespace_name, repository_name, email)
-
-
-pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/repository.py b/endpoints/api/repository.py
index d117f238d..1ae77b6a2 100644
--- a/endpoints/api/repository.py
+++ b/endpoints/api/repository.py
@@ -4,20 +4,18 @@ import logging
import datetime
import features
-from collections import defaultdict
from datetime import timedelta, datetime
from flask import request, abort
-from app import dockerfile_build_queue, tuf_metadata_api
-from data.database import RepositoryState
-from endpoints.api import (
- format_date, nickname, log_action, validate_json_request, require_repo_read, require_repo_write,
- require_repo_admin, RepositoryParamResource, resource, parse_args, ApiResource, request_error,
- require_scope, path_param, page_support, query_param, truthy_bool, show_if)
-from endpoints.api.repository_models_pre_oci import pre_oci_model as model
-from endpoints.exception import (
- Unauthorized, NotFound, InvalidRequest, ExceedsLicenseException, DownstreamIssue)
+from app import dockerfile_build_queue
+from data import model
+from endpoints.api import (truthy_bool, format_date, nickname, log_action, validate_json_request,
+ require_repo_read, require_repo_write, require_repo_admin,
+ RepositoryParamResource, resource, query_param, parse_args, ApiResource,
+ request_error, require_scope, path_param, page_support, parse_args,
+ query_param, truthy_bool)
+from endpoints.exception import Unauthorized, NotFound, InvalidRequest, ExceedsLicenseException
from endpoints.api.billing import lookup_allowed_private_repos, get_namespace_plan
from endpoints.api.subscribe import check_repository_usage
@@ -27,12 +25,12 @@ from auth.auth_context import get_authenticated_user
from auth import scopes
from util.names import REPOSITORY_NAME_REGEX
+
logger = logging.getLogger(__name__)
REPOS_PER_PAGE = 100
MAX_DAYS_IN_3_MONTHS = 92
-
def check_allowed_private_repos(namespace):
""" Checks to see if the given namespace has reached its private repository limit. If so,
raises a ExceedsLicenseException.
@@ -71,8 +69,7 @@ class RepositoryList(ApiResource):
],
},
'namespace': {
- 'type':
- 'string',
+ 'type': 'string',
'description': ('Namespace in which the repository should be created. If omitted, the '
'username of the caller is used'),
},
@@ -80,11 +77,6 @@ class RepositoryList(ApiResource):
'type': 'string',
'description': 'Markdown encoded description for the repository',
},
- 'repo_kind': {
- 'type': ['string', 'null'],
- 'description': 'The kind of repository',
- 'enum': ['image', 'application', None],
- }
},
},
}
@@ -107,7 +99,8 @@ class RepositoryList(ApiResource):
repository_name = req['repository']
visibility = req['visibility']
- if model.repo_exists(namespace_name, repository_name):
+ existing = model.repository.get_repository(namespace_name, repository_name)
+ if existing:
raise request_error(message='Repository already exists')
visibility = req['visibility']
@@ -118,21 +111,20 @@ class RepositoryList(ApiResource):
if not REPOSITORY_NAME_REGEX.match(repository_name):
raise InvalidRequest('Invalid repository name')
- kind = req.get('repo_kind', 'image') or 'image'
- model.create_repo(namespace_name, repository_name, owner, req['description'],
- visibility=visibility, repo_kind=kind)
+ repo = model.repository.create_repository(namespace_name, repository_name, owner, visibility)
+ repo.description = req['description']
+ repo.save()
- log_action('create_repo', namespace_name,
- {'repo': repository_name,
- 'namespace': namespace_name}, repo_name=repository_name)
+ log_action('create_repo', namespace_name, {'repo': repository_name,
+ 'namespace': namespace_name}, repo=repo)
return {
'namespace': namespace_name,
- 'name': repository_name,
- 'kind': kind,
+ 'name': repository_name
}, 201
raise Unauthorized()
+
@require_scope(scopes.READ_REPO)
@nickname('listRepos')
@parse_args()
@@ -145,7 +137,6 @@ class RepositoryList(ApiResource):
type=truthy_bool, default=False)
@query_param('popularity', 'Whether to include the repository\'s popularity metric.',
type=truthy_bool, default=False)
- @query_param('repo_kind', 'The kind of repositories to return', type=str, default='image')
@page_support()
def get(self, page_token, parsed_args):
""" Fetch the list of repositories visible to the current user under a variety of situations.
@@ -158,18 +149,85 @@ class RepositoryList(ApiResource):
user = get_authenticated_user()
username = user.username if user else None
- last_modified = parsed_args['last_modified']
- popularity = parsed_args['popularity']
+ next_page_token = None
+ repos = None
- if parsed_args['starred'] and not username:
- # No repositories should be returned, as there is no user.
- abort(400)
+ # Lookup the requested repositories (either starred or non-starred.)
+ if parsed_args['starred']:
+ if not username:
+ # No repositories should be returned, as there is no user.
+ abort(400)
- repos, next_page_token = model.get_repo_list(
- parsed_args['starred'], user, parsed_args['repo_kind'], parsed_args['namespace'], username,
- parsed_args['public'], page_token, last_modified, popularity)
+ # Return the full list of repos starred by the current user that are still visible to them.
+ def can_view_repo(repo):
+ return ReadRepositoryPermission(repo.namespace_user.username, repo.name).can()
- return {'repositories': [repo.to_dict() for repo in repos]}, next_page_token
+ unfiltered_repos = model.repository.get_user_starred_repositories(user)
+ repos = [repo for repo in unfiltered_repos if can_view_repo(repo)]
+ elif parsed_args['namespace']:
+ # Repositories filtered by namespace do not need pagination (their results are fairly small),
+ # so we just do the lookup directly.
+ repos = list(model.repository.get_visible_repositories(username=username,
+ include_public=parsed_args['public'],
+ namespace=parsed_args['namespace']))
+ else:
+ # Determine the starting offset for pagination. Note that we don't use the normal
+ # model.modelutil.paginate method here, as that does not operate over UNION queries, which
+ # get_visible_repositories will return if there is a logged-in user (for performance reasons).
+ #
+ # Also note the +1 on the limit, as paginate_query uses the extra result to determine whether
+ # there is a next page.
+ start_id = model.modelutil.pagination_start(page_token)
+ repo_query = model.repository.get_visible_repositories(username=username,
+ include_public=parsed_args['public'],
+ start_id=start_id,
+ limit=REPOS_PER_PAGE+1)
+
+ repos, next_page_token = model.modelutil.paginate_query(repo_query, limit=REPOS_PER_PAGE,
+ id_alias='rid')
+
+ # Collect the IDs of the repositories found for subequent lookup of popularity
+ # and/or last modified.
+ if parsed_args['last_modified'] or parsed_args['popularity']:
+ repository_ids = [repo.rid for repo in repos]
+
+ if parsed_args['last_modified']:
+ last_modified_map = model.repository.get_when_last_modified(repository_ids)
+
+ if parsed_args['popularity']:
+ action_sum_map = model.log.get_repositories_action_sums(repository_ids)
+
+ # Collect the IDs of the repositories that are starred for the user, so we can mark them
+ # in the returned results.
+ star_set = set()
+ if username:
+ starred_repos = model.repository.get_user_starred_repositories(user)
+ star_set = {starred.id for starred in starred_repos}
+
+ def repo_view(repo_obj):
+ repo = {
+ 'namespace': repo_obj.namespace_user.username,
+ 'name': repo_obj.name,
+ 'description': repo_obj.description,
+ 'is_public': repo_obj.visibility_id == model.repository.get_public_repo_visibility().id,
+ }
+
+ repo_id = repo_obj.rid
+
+ if parsed_args['last_modified']:
+ repo['last_modified'] = last_modified_map.get(repo_id)
+
+ if parsed_args['popularity']:
+ repo['popularity'] = float(action_sum_map.get(repo_id, 0))
+
+ if username:
+ repo['is_starred'] = repo_id in star_set
+
+ return repo
+
+ return {
+ 'repositories': [repo_view(repo) for repo in repos]
+ }, next_page_token
@resource('/v1/repository/')
@@ -180,7 +238,9 @@ class Repository(RepositoryParamResource):
'RepoUpdate': {
'type': 'object',
'description': 'Fields which can be updated in a repository.',
- 'required': ['description',],
+ 'required': [
+ 'description',
+ ],
'properties': {
'description': {
'type': 'string',
@@ -193,78 +253,116 @@ class Repository(RepositoryParamResource):
@parse_args()
@query_param('includeStats', 'Whether to include action statistics', type=truthy_bool,
default=False)
- @query_param('includeTags', 'Whether to include repository tags', type=truthy_bool,
- default=True)
@require_repo_read
@nickname('getRepo')
def get(self, namespace, repository, parsed_args):
"""Fetch the specified repository."""
logger.debug('Get repo: %s/%s' % (namespace, repository))
- include_tags = parsed_args['includeTags']
- max_tags = 500
- repo = model.get_repo(namespace, repository, get_authenticated_user(), include_tags, max_tags)
- if repo is None:
- raise NotFound()
- has_write_permission = ModifyRepositoryPermission(namespace, repository).can()
- has_write_permission = has_write_permission and repo.state == RepositoryState.NORMAL
+ def tag_view(tag):
+ tag_info = {
+ 'name': tag.name,
+ 'image_id': tag.image.docker_image_id,
+ 'size': tag.image.aggregate_size
+ }
- repo_data = repo.to_dict()
- repo_data['can_write'] = has_write_permission
- repo_data['can_admin'] = AdministerRepositoryPermission(namespace, repository).can()
+ if tag.lifetime_start_ts > 0:
+ last_modified = format_date(datetime.fromtimestamp(tag.lifetime_start_ts))
+ tag_info['last_modified'] = last_modified
- if parsed_args['includeStats'] and repo.repository_base_elements.kind_name != 'application':
- stats = []
- found_dates = {}
+ return tag_info
- for count in repo.counts:
- stats.append(count.to_dict())
- found_dates['%s/%s' % (count.date.month, count.date.day)] = True
+ repo = model.repository.get_repository(namespace, repository)
+ stats = None
+ if repo:
+ tags = model.tag.list_repository_tags(namespace, repository, include_storage=True)
+ tag_dict = {tag.name: tag_view(tag) for tag in tags}
+ can_write = ModifyRepositoryPermission(namespace, repository).can()
+ can_admin = AdministerRepositoryPermission(namespace, repository).can()
- # Fill in any missing stats with zeros.
- for day in range(1, MAX_DAYS_IN_3_MONTHS):
- day_date = datetime.now() - timedelta(days=day)
- key = '%s/%s' % (day_date.month, day_date.day)
- if key not in found_dates:
+ is_starred = (model.repository.repository_is_starred(get_authenticated_user(), repo)
+ if get_authenticated_user() else False)
+ is_public = model.repository.is_repository_public(repo)
+
+ if parsed_args['includeStats']:
+ stats = []
+ found_dates = {}
+
+ start_date = datetime.now() - timedelta(days=MAX_DAYS_IN_3_MONTHS)
+ counts = model.log.get_repository_action_counts(repo, start_date)
+ for count in counts:
stats.append({
- 'date': day_date.date().isoformat(),
- 'count': 0,
+ 'date': count.date.isoformat(),
+ 'count': count.count,
})
- repo_data['stats'] = stats
- return repo_data
+ found_dates['%s/%s' % (count.date.month, count.date.day)] = True
+
+ # Fill in any missing stats with zeros.
+ for day in range(1, MAX_DAYS_IN_3_MONTHS):
+ day_date = datetime.now() - timedelta(days=day)
+ key = '%s/%s' % (day_date.month, day_date.day)
+ if not key in found_dates:
+ stats.append({
+ 'date': day_date.date().isoformat(),
+ 'count': 0,
+ })
+
+ repo_data = {
+ 'namespace': namespace,
+ 'name': repository,
+ 'description': repo.description,
+ 'tags': tag_dict,
+ 'can_write': can_write,
+ 'can_admin': can_admin,
+ 'is_public': is_public,
+ 'is_organization': repo.namespace_user.organization,
+ 'is_starred': is_starred,
+ 'status_token': repo.badge_token if not is_public else '',
+ }
+
+ if stats is not None:
+ repo_data['stats'] = stats
+
+ return repo_data
+
+ raise NotFound()
@require_repo_write
@nickname('updateRepo')
@validate_json_request('RepoUpdate')
def put(self, namespace, repository):
""" Update the description in the specified repository. """
- if not model.repo_exists(namespace, repository):
- raise NotFound()
+ repo = model.repository.get_repository(namespace, repository)
+ if repo:
+ values = request.get_json()
+ repo.description = values['description']
+ repo.save()
- values = request.get_json()
- model.set_description(namespace, repository, values['description'])
-
- log_action('set_repo_description', namespace,
- {'repo': repository,
- 'namespace': namespace,
- 'description': values['description']}, repo_name=repository)
- return {'success': True}
+ log_action('set_repo_description', namespace,
+ {'repo': repository, 'description': values['description']},
+ repo=repo)
+ return {
+ 'success': True
+ }
+ raise NotFound()
@require_repo_admin
@nickname('deleteRepository')
def delete(self, namespace, repository):
""" Delete a repository. """
- username = model.purge_repository(namespace, repository)
+ model.repository.purge_repository(namespace, repository)
+ user = model.user.get_namespace_user(namespace)
if features.BILLING:
plan = get_namespace_plan(namespace)
- model.check_repository_usage(username, plan)
+ check_repository_usage(user, plan)
# Remove any builds from the queue.
dockerfile_build_queue.delete_namespaced_items(namespace, repository)
- log_action('delete_repo', namespace, {'repo': repository, 'namespace': namespace})
+ log_action('delete_repo', namespace,
+ {'repo': repository, 'namespace': namespace})
return '', 204
@@ -276,7 +374,9 @@ class RepositoryVisibility(RepositoryParamResource):
'ChangeVisibility': {
'type': 'object',
'description': 'Change the visibility for the repository.',
- 'required': ['visibility',],
+ 'required': [
+ 'visibility',
+ ],
'properties': {
'visibility': {
'type': 'string',
@@ -295,110 +395,15 @@ class RepositoryVisibility(RepositoryParamResource):
@validate_json_request('ChangeVisibility')
def post(self, namespace, repository):
""" Change the visibility of a repository. """
- if model.repo_exists(namespace, repository):
+ repo = model.repository.get_repository(namespace, repository)
+ if repo:
values = request.get_json()
visibility = values['visibility']
if visibility == 'private':
check_allowed_private_repos(namespace)
- model.set_repository_visibility(namespace, repository, visibility)
+ model.repository.set_repository_visibility(repo, visibility)
log_action('change_repo_visibility', namespace,
- {'repo': repository,
- 'namespace': namespace,
- 'visibility': values['visibility']}, repo_name=repository)
+ {'repo': repository, 'visibility': values['visibility']},
+ repo=repo)
return {'success': True}
-
-
-@resource('/v1/repository//changetrust')
-@path_param('repository', 'The full path of the repository. e.g. namespace/name')
-class RepositoryTrust(RepositoryParamResource):
- """ Custom verb for changing the trust settings of the repository. """
- schemas = {
- 'ChangeRepoTrust': {
- 'type': 'object',
- 'description': 'Change the trust settings for the repository.',
- 'required': ['trust_enabled',],
- 'properties': {
- 'trust_enabled': {
- 'type': 'boolean',
- 'description': 'Whether or not signing is enabled for the repository.'
- },
- }
- }
- }
-
- @show_if(features.SIGNING)
- @require_repo_admin
- @nickname('changeRepoTrust')
- @validate_json_request('ChangeRepoTrust')
- def post(self, namespace, repository):
- """ Change the visibility of a repository. """
- if not model.repo_exists(namespace, repository):
- raise NotFound()
-
- tags, _ = tuf_metadata_api.get_default_tags_with_expiration(namespace, repository)
- if tags and not tuf_metadata_api.delete_metadata(namespace, repository):
- raise DownstreamIssue('Unable to delete downstream trust metadata')
-
- values = request.get_json()
- model.set_trust(namespace, repository, values['trust_enabled'])
-
- log_action(
- 'change_repo_trust', namespace,
- {'repo': repository,
- 'namespace': namespace,
- 'trust_enabled': values['trust_enabled']}, repo_name=repository)
-
- return {'success': True}
-
-
-@resource('/v1/repository//changestate')
-@path_param('repository', 'The full path of the repository. e.g. namespace/name')
-@show_if(features.REPO_MIRROR)
-class RepositoryStateResource(RepositoryParamResource):
- """ Custom verb for changing the state of the repository. """
- schemas = {
- 'ChangeRepoState': {
- 'type': 'object',
- 'description': 'Change the state of the repository.',
- 'required': ['state'],
- 'properties': {
- 'state': {
- 'type': 'string',
- 'description': 'Determines whether pushes are allowed.',
- 'enum': ['NORMAL', 'READ_ONLY', 'MIRROR'],
- },
- }
- }
- }
-
- @require_repo_admin
- @nickname('changeRepoState')
- @validate_json_request('ChangeRepoState')
- def put(self, namespace, repository):
- """ Change the state of a repository. """
- if not model.repo_exists(namespace, repository):
- raise NotFound()
-
- values = request.get_json()
- state_name = values['state']
-
- try:
- state = RepositoryState[state_name]
- except KeyError:
- state = None
-
- if state == RepositoryState.MIRROR and not features.REPO_MIRROR:
- return {'detail': 'Unknown Repository State: %s' % state_name}, 400
-
- if state is None:
- return {'detail': '%s is not a valid Repository state.' % state_name}, 400
-
- model.set_repository_state(namespace, repository, state)
-
- log_action('change_repo_state', namespace,
- {'repo': repository,
- 'namespace': namespace,
- 'state_changed': state_name}, repo_name=repository)
-
- return {'success': True}
diff --git a/endpoints/api/repository_models_interface.py b/endpoints/api/repository_models_interface.py
deleted file mode 100644
index 3b5e06a2f..000000000
--- a/endpoints/api/repository_models_interface.py
+++ /dev/null
@@ -1,279 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from collections import namedtuple, defaultdict
-
-from datetime import datetime
-from six import add_metaclass
-
-import features
-from data.database import RepositoryState
-from endpoints.api import format_date
-
-
-class RepositoryBaseElement(
- namedtuple('RepositoryBaseElement', [
- 'namespace_name', 'repository_name', 'is_starred', 'is_public', 'kind_name', 'description',
- 'namespace_user_organization', 'namespace_user_removed_tag_expiration_s', 'last_modified',
- 'action_count', 'should_last_modified', 'should_popularity', 'should_is_starred',
- 'is_free_account', 'state'
- ])):
- """
- Repository a single quay repository
- :type namespace_name: string
- :type repository_name: string
- :type is_starred: boolean
- :type is_public: boolean
- :type kind_name: string
- :type description: string
- :type namespace_user_organization: boolean
- :type should_last_modified: boolean
- :type should_popularity: boolean
- :type should_is_starred: boolean
- """
-
- def to_dict(self):
- repo = {
- 'namespace': self.namespace_name,
- 'name': self.repository_name,
- 'description': self.description,
- 'is_public': self.is_public,
- 'kind': self.kind_name,
- 'state': self.state.name if self.state is not None else None,
- }
-
- if self.should_last_modified:
- repo['last_modified'] = self.last_modified
-
- if self.should_popularity:
- repo['popularity'] = float(self.action_count if self.action_count else 0)
-
- if self.should_is_starred:
- repo['is_starred'] = self.is_starred
-
- return repo
-
-
-class ApplicationRepository(
- namedtuple('ApplicationRepository', ['repository_base_elements', 'channels', 'releases', 'state'])):
- """
- Repository a single quay repository
- :type repository_base_elements: RepositoryBaseElement
- :type channels: [Channel]
- :type releases: [Release]
- """
-
- def to_dict(self):
- repo_data = {
- 'namespace': self.repository_base_elements.namespace_name,
- 'name': self.repository_base_elements.repository_name,
- 'kind': self.repository_base_elements.kind_name,
- 'description': self.repository_base_elements.description,
- 'is_public': self.repository_base_elements.is_public,
- 'is_organization': self.repository_base_elements.namespace_user_organization,
- 'is_starred': self.repository_base_elements.is_starred,
- 'channels': [chan.to_dict() for chan in self.channels],
- 'releases': [release.to_dict() for release in self.releases],
- 'state': self.state.name if self.state is not None else None,
- 'is_free_account': self.repository_base_elements.is_free_account,
- }
-
- return repo_data
-
-
-class ImageRepositoryRepository(
- namedtuple('NonApplicationRepository',
- ['repository_base_elements', 'tags', 'counts', 'badge_token', 'trust_enabled',
- 'state'])):
- """
- Repository a single quay repository
- :type repository_base_elements: RepositoryBaseElement
- :type tags: [Tag]
- :type counts: [count]
- :type badge_token: string
- :type trust_enabled: boolean
- """
-
- def to_dict(self):
- img_repo = {
- 'namespace': self.repository_base_elements.namespace_name,
- 'name': self.repository_base_elements.repository_name,
- 'kind': self.repository_base_elements.kind_name,
- 'description': self.repository_base_elements.description,
- 'is_public': self.repository_base_elements.is_public,
- 'is_organization': self.repository_base_elements.namespace_user_organization,
- 'is_starred': self.repository_base_elements.is_starred,
- 'status_token': self.badge_token if not self.repository_base_elements.is_public else '',
- 'trust_enabled': bool(features.SIGNING) and self.trust_enabled,
- 'tag_expiration_s': self.repository_base_elements.namespace_user_removed_tag_expiration_s,
- 'is_free_account': self.repository_base_elements.is_free_account,
- 'state': self.state.name if self.state is not None else None
- }
-
- if self.tags is not None:
- img_repo['tags'] = {tag.name: tag.to_dict() for tag in self.tags}
-
- if self.repository_base_elements.state:
- img_repo['state'] = self.repository_base_elements.state.name
-
- return img_repo
-
-
-class Repository(namedtuple('Repository', [
- 'namespace_name',
- 'repository_name',
-])):
- """
- Repository a single quay repository
- :type namespace_name: string
- :type repository_name: string
- """
-
-
-class Channel(namedtuple('Channel', ['name', 'linked_tag_name', 'linked_tag_lifetime_start'])):
- """
- Repository a single quay repository
- :type name: string
- :type linked_tag_name: string
- :type linked_tag_lifetime_start: string
- """
-
- def to_dict(self):
- return {
- 'name': self.name,
- 'release': self.linked_tag_name,
- 'last_modified': format_date(datetime.fromtimestamp(self.linked_tag_lifetime_start / 1000)),
- }
-
-
-class Release(
- namedtuple('Channel', ['name', 'lifetime_start', 'releases_channels_map'])):
- """
- Repository a single quay repository
- :type name: string
- :type last_modified: string
- :type releases_channels_map: {string -> string}
- """
-
- def to_dict(self):
- return {
- 'name': self.name,
- 'last_modified': format_date(datetime.fromtimestamp(self.lifetime_start / 1000)),
- 'channels': self.releases_channels_map[self.name],
- }
-
-
-class Tag(
- namedtuple('Tag', [
- 'name', 'image_docker_image_id', 'image_aggregate_size', 'lifetime_start_ts',
- 'tag_manifest_digest', 'lifetime_end_ts',
- ])):
- """
- :type name: string
- :type image_docker_image_id: string
- :type image_aggregate_size: int
- :type lifetime_start_ts: int
- :type lifetime_end_ts: int|None
- :type tag_manifest_digest: string
-
- """
-
- def to_dict(self):
- tag_info = {
- 'name': self.name,
- 'image_id': self.image_docker_image_id,
- 'size': self.image_aggregate_size
- }
-
- if self.lifetime_start_ts > 0:
- last_modified = format_date(datetime.fromtimestamp(self.lifetime_start_ts))
- tag_info['last_modified'] = last_modified
-
- if self.lifetime_end_ts:
- expiration = format_date(datetime.fromtimestamp(self.lifetime_end_ts))
- tag_info['expiration'] = expiration
-
- if self.tag_manifest_digest is not None:
- tag_info['manifest_digest'] = self.tag_manifest_digest
-
- return tag_info
-
-
-class Count(namedtuple('Count', ['date', 'count'])):
- """
- date: DateTime
- count: int
- """
-
- def to_dict(self):
- return {
- 'date': self.date.isoformat(),
- 'count': self.count,
- }
-
-
-@add_metaclass(ABCMeta)
-class RepositoryDataInterface(object):
- """
- Interface that represents all data store interactions required by a Repository.
- """
-
- @abstractmethod
- def get_repo(self, namespace_name, repository_name, user, include_tags=True, max_tags=500):
- """
- Returns a repository
- """
-
- @abstractmethod
- def repo_exists(self, namespace_name, repository_name):
- """
- Returns true if a repo exists and false if not
- """
-
- @abstractmethod
- def create_repo(self, namespace, name, creating_user, description, visibility='private',
- repo_kind='image'):
- """
- Returns creates a new repo
- """
-
- @abstractmethod
- def get_repo_list(self, starred, user, repo_kind, namespace, username, public, page_token,
- last_modified, popularity):
- """
- Returns a RepositoryBaseElement
- """
-
- @abstractmethod
- def set_repository_visibility(self, namespace_name, repository_name, visibility):
- """
- Sets a repository's visibility if it is found
- """
-
- @abstractmethod
- def set_trust(self, namespace_name, repository_name, trust):
- """
- Sets a repository's trust_enabled field if it is found
- """
-
- @abstractmethod
- def set_description(self, namespace_name, repository_name, description):
- """
- Sets a repository's description if it is found.
- """
-
- @abstractmethod
- def purge_repository(self, namespace_name, repository_name):
- """
- Removes a repository
- """
-
- @abstractmethod
- def check_repository_usage(self, user_name, plan_found):
- """
- Creates a notification for a user if they are over or under on their repository usage
- """
-
- @abstractmethod
- def set_repository_state(self, namespace_name, repository_name, state):
- """
- Set the State of the Repository.
- """
diff --git a/endpoints/api/repository_models_pre_oci.py b/endpoints/api/repository_models_pre_oci.py
deleted file mode 100644
index 328c5443e..000000000
--- a/endpoints/api/repository_models_pre_oci.py
+++ /dev/null
@@ -1,190 +0,0 @@
-from collections import defaultdict
-
-from datetime import datetime, timedelta
-
-from auth.permissions import ReadRepositoryPermission
-from data.database import Repository as RepositoryTable, RepositoryState
-from data import model
-from data.appr_model import channel as channel_model, release as release_model
-from data.registry_model import registry_model
-from data.registry_model.datatypes import RepositoryReference
-from endpoints.appr.models_cnr import model as appr_model
-from endpoints.api.repository_models_interface import RepositoryDataInterface, RepositoryBaseElement, Repository, \
- ApplicationRepository, ImageRepositoryRepository, Tag, Channel, Release, Count
-
-MAX_DAYS_IN_3_MONTHS = 92
-REPOS_PER_PAGE = 100
-
-
-def _create_channel(channel, releases_channels_map):
- releases_channels_map[channel.linked_tag.name].append(channel.name)
- return Channel(channel.name, channel.linked_tag.name, channel.linked_tag.lifetime_start)
-
-
-class PreOCIModel(RepositoryDataInterface):
- """
- PreOCIModel implements the data model for the Repo Email using a database schema
- before it was changed to support the OCI specification.
- """
-
- def check_repository_usage(self, username, plan_found):
- private_repos = model.user.get_private_repo_count(username)
- if plan_found is None:
- repos_allowed = 0
- else:
- repos_allowed = plan_found['privateRepos']
-
- user_or_org = model.user.get_namespace_user(username)
- if private_repos > repos_allowed:
- model.notification.create_unique_notification('over_private_usage', user_or_org,
- {'namespace': username})
- else:
- model.notification.delete_notifications_by_kind(user_or_org, 'over_private_usage')
-
- def purge_repository(self, namespace_name, repository_name):
- model.gc.purge_repository(namespace_name, repository_name)
- user = model.user.get_namespace_user(namespace_name)
- return user.username
-
- def set_description(self, namespace_name, repository_name, description):
- repo = model.repository.get_repository(namespace_name, repository_name)
- model.repository.set_description(repo, description)
-
- def set_trust(self, namespace_name, repository_name, trust):
- repo = model.repository.get_repository(namespace_name, repository_name)
- model.repository.set_trust(repo, trust)
-
- def set_repository_visibility(self, namespace_name, repository_name, visibility):
- repo = model.repository.get_repository(namespace_name, repository_name)
- model.repository.set_repository_visibility(repo, visibility)
-
- def set_repository_state(self, namespace_name, repository_name, state):
- repo = model.repository.get_repository(namespace_name, repository_name)
- model.repository.set_repository_state(repo, state)
-
- def get_repo_list(self, starred, user, repo_kind, namespace, username, public, page_token,
- last_modified, popularity):
- next_page_token = None
- # Lookup the requested repositories (either starred or non-starred.)
- if starred:
- # Return the full list of repos starred by the current user that are still visible to them.
- def can_view_repo(repo):
- can_view = ReadRepositoryPermission(repo.namespace_user.username, repo.name).can()
- return can_view or model.repository.is_repository_public(repo)
-
- unfiltered_repos = model.repository.get_user_starred_repositories(user,
- kind_filter=repo_kind)
- repos = [repo for repo in unfiltered_repos if can_view_repo(repo)]
- elif namespace:
- # Repositories filtered by namespace do not need pagination (their results are fairly small),
- # so we just do the lookup directly.
- repos = list(
- model.repository.get_visible_repositories(username=username, include_public=public,
- namespace=namespace, kind_filter=repo_kind))
- else:
- # Determine the starting offset for pagination. Note that we don't use the normal
- # model.modelutil.paginate method here, as that does not operate over UNION queries, which
- # get_visible_repositories will return if there is a logged-in user (for performance reasons).
- #
- # Also note the +1 on the limit, as paginate_query uses the extra result to determine whether
- # there is a next page.
- start_id = model.modelutil.pagination_start(page_token)
- repo_query = model.repository.get_visible_repositories(
- username=username, include_public=public, start_id=start_id, limit=REPOS_PER_PAGE + 1,
- kind_filter=repo_kind)
-
- repos, next_page_token = model.modelutil.paginate_query(repo_query, limit=REPOS_PER_PAGE,
- sort_field_name='rid')
-
- # Collect the IDs of the repositories found for subequent lookup of popularity
- # and/or last modified.
- last_modified_map = {}
- action_sum_map = {}
- if last_modified or popularity:
- repository_refs = [RepositoryReference.for_id(repo.rid) for repo in repos]
- repository_ids = [repo.rid for repo in repos]
-
- if last_modified:
- last_modified_map = registry_model.get_most_recent_tag_lifetime_start(repository_refs)
-
- if popularity:
- action_sum_map = model.log.get_repositories_action_sums(repository_ids)
-
- # Collect the IDs of the repositories that are starred for the user, so we can mark them
- # in the returned results.
- star_set = set()
- if username:
- starred_repos = model.repository.get_user_starred_repositories(user)
- star_set = {starred.id for starred in starred_repos}
-
- return [
- RepositoryBaseElement(repo.namespace_user.username, repo.name, repo.id in star_set,
- repo.visibility_id == model.repository.get_public_repo_visibility().id,
- repo_kind, repo.description, repo.namespace_user.organization,
- repo.namespace_user.removed_tag_expiration_s,
- last_modified_map.get(repo.rid),
- action_sum_map.get(repo.rid), last_modified, popularity, username,
- None, repo.state)
- for repo in repos
- ], next_page_token
-
- def repo_exists(self, namespace_name, repository_name):
- repo = model.repository.get_repository(namespace_name, repository_name)
- if repo is None:
- return False
-
- return True
-
- def create_repo(self, namespace_name, repository_name, owner, description, visibility='private',
- repo_kind='image'):
- repo = model.repository.create_repository(namespace_name, repository_name, owner, visibility,
- repo_kind=repo_kind, description=description)
- return Repository(namespace_name, repository_name)
-
- def get_repo(self, namespace_name, repository_name, user, include_tags=True, max_tags=500):
- repo = model.repository.get_repository(namespace_name, repository_name)
- if repo is None:
- return None
-
- is_starred = model.repository.repository_is_starred(user, repo) if user else False
- is_public = model.repository.is_repository_public(repo)
- kind_name = RepositoryTable.kind.get_name(repo.kind_id)
- base = RepositoryBaseElement(
- namespace_name, repository_name, is_starred, is_public, kind_name, repo.description,
- repo.namespace_user.organization, repo.namespace_user.removed_tag_expiration_s, None, None,
- False, False, False, repo.namespace_user.stripe_id is None, repo.state)
-
- if base.kind_name == 'application':
- channels = channel_model.get_repo_channels(repo, appr_model.models_ref)
- releases = release_model.get_release_objs(repo, appr_model.models_ref)
- releases_channels_map = defaultdict(list)
- return ApplicationRepository(
- base, [_create_channel(channel, releases_channels_map) for channel in channels], [
- Release(release.name, release.lifetime_start, releases_channels_map)
- for release in releases
- ], repo.state)
-
- tags = None
- repo_ref = RepositoryReference.for_repo_obj(repo)
- if include_tags:
- tags, _ = registry_model.list_repository_tag_history(repo_ref, page=1, size=max_tags,
- active_tags_only=True)
- tags = [
- Tag(tag.name,
- tag.legacy_image.docker_image_id if tag.legacy_image_if_present else None,
- tag.legacy_image.aggregate_size if tag.legacy_image_if_present else None,
- tag.lifetime_start_ts,
- tag.manifest_digest,
- tag.lifetime_end_ts) for tag in tags
- ]
-
- start_date = datetime.now() - timedelta(days=MAX_DAYS_IN_3_MONTHS)
- counts = model.log.get_repository_action_counts(repo, start_date)
-
- assert repo.state is not None
- return ImageRepositoryRepository(base, tags,
- [Count(count.date, count.count) for count in counts],
- repo.badge_token, repo.trust_enabled, repo.state)
-
-
-pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/repositorynotification.py b/endpoints/api/repositorynotification.py
index c34cbc553..a9828d518 100644
--- a/endpoints/api/repositorynotification.py
+++ b/endpoints/api/repositorynotification.py
@@ -1,19 +1,42 @@
""" List, create and manage repository events/notifications. """
-import logging
+import json
+
from flask import request
-from endpoints.api import (
- RepositoryParamResource, nickname, resource, require_repo_admin, log_action,
- validate_json_request, request_error, path_param, disallow_for_app_repositories, InvalidRequest)
+from app import notification_queue
+from endpoints.api import (RepositoryParamResource, nickname, resource, require_repo_admin,
+ log_action, validate_json_request, request_error,
+ path_param)
from endpoints.exception import NotFound
-from notifications.models_interface import Repository
-from notifications.notificationevent import NotificationEvent
-from notifications.notificationmethod import (
- NotificationMethod, CannotValidateNotificationMethodException)
-from endpoints.api.repositorynotification_models_pre_oci import pre_oci_model as model
+from endpoints.notificationevent import NotificationEvent
+from endpoints.notificationmethod import (NotificationMethod,
+ CannotValidateNotificationMethodException)
+from endpoints.notificationhelper import build_notification_data
+from data import model
-logger = logging.getLogger(__name__)
+
+def notification_view(note):
+ config = {}
+ try:
+ config = json.loads(note.config_json)
+ except:
+ config = {}
+
+ event_config = {}
+ try:
+ event_config = json.loads(note.event_config_json)
+ except:
+ event_config = {}
+
+ return {
+ 'uuid': note.uuid,
+ 'event': note.event.name,
+ 'method': note.method.name,
+ 'config': config,
+ 'title': note.title,
+ 'event_config': event_config,
+ }
@resource('/v1/repository//notification/')
@@ -57,37 +80,41 @@ class RepositoryNotificationList(RepositoryParamResource):
@require_repo_admin
@nickname('createRepoNotification')
- @disallow_for_app_repositories
@validate_json_request('NotificationCreateRequest')
- def post(self, namespace_name, repository_name):
+ def post(self, namespace, repository):
+ """ Create a new notification for the specified repository. """
+ repo = model.repository.get_repository(namespace, repository)
parsed = request.get_json()
method_handler = NotificationMethod.get_method(parsed['method'])
+ if not method_handler:
+ raise request_error(message='Unknown method')
+
try:
- method_handler.validate(namespace_name, repository_name, parsed['config'])
+ method_handler.validate(repo, parsed['config'])
except CannotValidateNotificationMethodException as ex:
raise request_error(message=ex.message)
- new_notification = model.create_repo_notification(namespace_name, repository_name,
- parsed['event'], parsed['method'],
- parsed['config'], parsed['eventConfig'],
- parsed.get('title'))
+ new_notification = model.notification.create_repo_notification(repo, parsed['event'],
+ parsed['method'], parsed['config'],
+ parsed['eventConfig'],
+ parsed.get('title', None))
- log_action('add_repo_notification', namespace_name, {
- 'repo': repository_name,
- 'namespace': namespace_name,
- 'notification_id': new_notification.uuid,
- 'event': new_notification.event_name,
- 'method': new_notification.method_name}, repo_name=repository_name)
- return new_notification.to_dict(), 201
+ resp = notification_view(new_notification)
+ log_action('add_repo_notification', namespace,
+ {'repo': repository, 'notification_id': new_notification.uuid,
+ 'event': parsed['event'], 'method': parsed['method']},
+ repo=repo)
+ return resp, 201
@require_repo_admin
@nickname('listRepoNotifications')
- @disallow_for_app_repositories
- def get(self, namespace_name, repository_name):
+ def get(self, namespace, repository):
""" List the notifications for the specified repository. """
- notifications = model.list_repo_notifications(namespace_name, repository_name)
- return {'notifications': [n.to_dict() for n in notifications]}
+ notifications = model.notification.list_repo_notifications(namespace, repository)
+ return {
+ 'notifications': [notification_view(n) for n in notifications]
+ }
@resource('/v1/repository//notification/')
@@ -95,52 +122,30 @@ class RepositoryNotificationList(RepositoryParamResource):
@path_param('uuid', 'The UUID of the notification')
class RepositoryNotification(RepositoryParamResource):
""" Resource for dealing with specific notifications. """
-
@require_repo_admin
@nickname('getRepoNotification')
- @disallow_for_app_repositories
- def get(self, namespace_name, repository_name, uuid):
+ def get(self, namespace, repository, uuid):
""" Get information for the specified notification. """
- found = model.get_repo_notification(uuid)
- if not found:
+ try:
+ found = model.notification.get_repo_notification(uuid)
+ except model.InvalidNotificationException:
raise NotFound()
- return found.to_dict()
+
+ if (found.repository.namespace_user.username != namespace or
+ found.repository.name != repository):
+ raise NotFound()
+
+ return notification_view(found)
@require_repo_admin
@nickname('deleteRepoNotification')
- @disallow_for_app_repositories
- def delete(self, namespace_name, repository_name, uuid):
+ def delete(self, namespace, repository, uuid):
""" Deletes the specified notification. """
- deleted = model.delete_repo_notification(namespace_name, repository_name, uuid)
- if not deleted:
- raise InvalidRequest("No repository notification found for: %s, %s, %s" %
- (namespace_name, repository_name, uuid))
-
- log_action('delete_repo_notification', namespace_name, {
- 'repo': repository_name,
- 'namespace': namespace_name,
- 'notification_id': uuid,
- 'event': deleted.event_name,
- 'method': deleted.method_name}, repo_name=repository_name)
-
- return 'No Content', 204
-
- @require_repo_admin
- @nickname('resetRepositoryNotificationFailures')
- @disallow_for_app_repositories
- def post(self, namespace_name, repository_name, uuid):
- """ Resets repository notification to 0 failures. """
- reset = model.reset_notification_number_of_failures(namespace_name, repository_name, uuid)
- if not reset:
- raise InvalidRequest("No repository notification found for: %s, %s, %s" %
- (namespace_name, repository_name, uuid))
-
- log_action('reset_repo_notification', namespace_name, {
- 'repo': repository_name,
- 'namespace': namespace_name,
- 'notification_id': uuid,
- 'event': reset.event_name,
- 'method': reset.method_name}, repo_name=repository_name)
+ deleted = model.notification.delete_repo_notification(namespace, repository, uuid)
+ log_action('delete_repo_notification', namespace,
+ {'repo': repository, 'notification_id': uuid,
+ 'event': deleted.event.name, 'method': deleted.method.name},
+ repo=model.repository.get_repository(namespace, repository))
return 'No Content', 204
@@ -150,15 +155,23 @@ class RepositoryNotification(RepositoryParamResource):
@path_param('uuid', 'The UUID of the notification')
class TestRepositoryNotification(RepositoryParamResource):
""" Resource for queuing a test of a notification. """
-
@require_repo_admin
@nickname('testRepoNotification')
- @disallow_for_app_repositories
- def post(self, namespace_name, repository_name, uuid):
+ def post(self, namespace, repository, uuid):
""" Queues a test notification for this repository. """
- test_note = model.queue_test_notification(uuid)
- if not test_note:
- raise InvalidRequest("No repository notification found for: %s, %s, %s" %
- (namespace_name, repository_name, uuid))
+ try:
+ test_note = model.notification.get_repo_notification(uuid)
+ except model.InvalidNotificationException:
+ raise NotFound()
- return {}, 200
+ if (test_note.repository.namespace_user.username != namespace or
+ test_note.repository.name != repository):
+ raise NotFound()
+
+ event_info = NotificationEvent.get_event(test_note.event.name)
+ sample_data = event_info.get_sample_data(test_note)
+ notification_data = build_notification_data(test_note, sample_data)
+ notification_queue.put([test_note.repository.namespace_user.username, repository,
+ test_note.event.name], json.dumps(notification_data))
+
+ return {}
diff --git a/endpoints/api/repositorynotification_models_interface.py b/endpoints/api/repositorynotification_models_interface.py
deleted file mode 100644
index ed0ebd2f7..000000000
--- a/endpoints/api/repositorynotification_models_interface.py
+++ /dev/null
@@ -1,146 +0,0 @@
-import json
-
-from abc import ABCMeta, abstractmethod
-from collections import namedtuple
-
-from six import add_metaclass
-
-
-class RepositoryNotification(
- namedtuple('RepositoryNotification', [
- 'uuid',
- 'title',
- 'event_name',
- 'method_name',
- 'config_json',
- 'event_config_json',
- 'number_of_failures',
- ])):
- """
- RepositoryNotification represents a notification for a repository.
- :type uuid: string
- :type event: string
- :type method: string
- :type config: string
- :type title: string
- :type event_config: string
- :type number_of_failures: int
- """
-
- def to_dict(self):
- try:
- config = json.loads(self.config_json)
- except ValueError:
- config = {}
-
- try:
- event_config = json.loads(self.event_config_json)
- except ValueError:
- event_config = {}
-
- return {
- 'uuid': self.uuid,
- 'title': self.title,
- 'event': self.event_name,
- 'method': self.method_name,
- 'config': config,
- 'event_config': event_config,
- 'number_of_failures': self.number_of_failures,
- }
-
-
-@add_metaclass(ABCMeta)
-class RepoNotificationInterface(object):
- """
- Interface that represents all data store interactions required by the RepositoryNotification API
- """
-
- @abstractmethod
- def create_repo_notification(self, namespace_name, repository_name, event_name, method_name,
- method_config, event_config, title=None):
- """
-
- Args:
- namespace_name: namespace of repository
- repository_name: name of repository
- event_name: name of event
- method_name: name of method
- method_config: method config, json string
- event_config: event config, json string
- title: title of the notification
-
- Returns:
- RepositoryNotification object
-
- """
- pass
-
- @abstractmethod
- def list_repo_notifications(self, namespace_name, repository_name, event_name=None):
- """
-
- Args:
- namespace_name: namespace of repository
- repository_name: name of repository
- event_name: name of event
-
- Returns:
- list(RepositoryNotification)
- """
- pass
-
- @abstractmethod
- def get_repo_notification(self, uuid):
- """
-
- Args:
- uuid: uuid of notification
-
- Returns:
- RepositoryNotification or None
-
- """
- pass
-
- @abstractmethod
- def delete_repo_notification(self, namespace_name, repository_name, uuid):
- """
-
- Args:
- namespace_name: namespace of repository
- repository_name: name of repository
- uuid: uuid of notification
-
- Returns:
- RepositoryNotification or None
-
- """
- pass
-
- @abstractmethod
- def reset_notification_number_of_failures(self, namespace_name, repository_name, uuid):
- """
-
- Args:
- namespace_name: namespace of repository
- repository_name: name of repository
- uuid: uuid of notification
-
- Returns:
- RepositoryNotification
-
- """
- pass
-
- @abstractmethod
- def queue_test_notification(self, uuid):
- """
-
- Args:
- uuid: uuid of notification
-
- Returns:
- RepositoryNotification or None
-
- """
- pass
diff --git a/endpoints/api/repositorynotification_models_pre_oci.py b/endpoints/api/repositorynotification_models_pre_oci.py
deleted file mode 100644
index b3edf43ae..000000000
--- a/endpoints/api/repositorynotification_models_pre_oci.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import json
-
-from app import notification_queue
-from data import model
-from data.model import InvalidNotificationException
-from endpoints.api.repositorynotification_models_interface import (RepoNotificationInterface,
- RepositoryNotification)
-from notifications import build_notification_data
-from notifications.notificationevent import NotificationEvent
-
-
-class RepoNotificationPreOCIModel(RepoNotificationInterface):
- def create_repo_notification(self, namespace_name, repository_name, event_name, method_name,
- method_config, event_config, title=None):
- repository = model.repository.get_repository(namespace_name, repository_name)
- return self._notification(
- model.notification.create_repo_notification(repository, event_name, method_name,
- method_config, event_config, title))
-
- def list_repo_notifications(self, namespace_name, repository_name, event_name=None):
- return [
- self._notification(n)
- for n in model.notification.list_repo_notifications(namespace_name, repository_name,
- event_name)]
-
- def get_repo_notification(self, uuid):
- try:
- found = model.notification.get_repo_notification(uuid)
- except InvalidNotificationException:
- return None
- return self._notification(found)
-
- def delete_repo_notification(self, namespace_name, repository_name, uuid):
- try:
- found = model.notification.delete_repo_notification(namespace_name, repository_name, uuid)
- except InvalidNotificationException:
- return None
- return self._notification(found)
-
- def reset_notification_number_of_failures(self, namespace_name, repository_name, uuid):
- return self._notification(
- model.notification.reset_notification_number_of_failures(namespace_name, repository_name,
- uuid))
-
- def queue_test_notification(self, uuid):
- try:
- notification = model.notification.get_repo_notification(uuid)
- except InvalidNotificationException:
- return None
-
- event_config = json.loads(notification.event_config_json or '{}')
- event_info = NotificationEvent.get_event(notification.event.name)
- sample_data = event_info.get_sample_data(notification.repository.namespace_user.username,
- notification.repository.name, event_config)
- notification_data = build_notification_data(notification, sample_data)
- notification_queue.put([
- notification.repository.namespace_user.username, notification.uuid, notification.event.name],
- json.dumps(notification_data))
- return self._notification(notification)
-
- def _notification(self, notification):
- if not notification:
- return None
-
- return RepositoryNotification(
- uuid=notification.uuid, title=notification.title, event_name=notification.event.name,
- method_name=notification.method.name, config_json=notification.config_json,
- event_config_json=notification.event_config_json,
- number_of_failures=notification.number_of_failures)
-
-
-pre_oci_model = RepoNotificationPreOCIModel()
diff --git a/endpoints/api/repotoken.py b/endpoints/api/repotoken.py
index efa25a2fb..5ef427bf9 100644
--- a/endpoints/api/repotoken.py
+++ b/endpoints/api/repotoken.py
@@ -2,11 +2,25 @@
import logging
+from flask import request
+
from endpoints.api import (resource, nickname, require_repo_admin, RepositoryParamResource,
- validate_json_request, path_param)
+ log_action, validate_json_request, path_param)
+from endpoints.exception import NotFound
+from data import model
+
logger = logging.getLogger(__name__)
+
+def token_view(token_obj):
+ return {
+ 'friendlyName': token_obj.friendly_name,
+ 'code': token_obj.code,
+ 'role': token_obj.role.name,
+ }
+
+
@resource('/v1/repository//tokens/')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
class RepositoryTokenList(RepositoryParamResource):
@@ -29,21 +43,28 @@ class RepositoryTokenList(RepositoryParamResource):
@require_repo_admin
@nickname('listRepoTokens')
- def get(self, namespace_name, repo_name):
+ def get(self, namespace, repository):
""" List the tokens for the specified repository. """
- return {
- 'message': 'Handling of access tokens is no longer supported',
- }, 410
+ tokens = model.token.get_repository_delegate_tokens(namespace, repository)
+ return {
+ 'tokens': {token.code: token_view(token) for token in tokens}
+ }
@require_repo_admin
@nickname('createToken')
@validate_json_request('NewToken')
- def post(self, namespace_name, repo_name):
+ def post(self, namespace, repository):
""" Create a new repository token. """
- return {
- 'message': 'Creation of access tokens is no longer supported',
- }, 410
+ token_params = request.get_json()
+
+ token = model.token.create_delegate_token(namespace, repository, token_params['friendlyName'])
+
+ log_action('add_repo_accesstoken', namespace,
+ {'repo': repository, 'token': token_params['friendlyName']},
+ repo=model.repository.get_repository(namespace, repository))
+
+ return token_view(token), 201
@resource('/v1/repository//tokens/')
@@ -71,30 +92,46 @@ class RepositoryToken(RepositoryParamResource):
},
},
}
-
@require_repo_admin
@nickname('getTokens')
- def get(self, namespace_name, repo_name, code):
+ def get(self, namespace, repository, code):
""" Fetch the specified repository token information. """
- return {
- 'message': 'Handling of access tokens is no longer supported',
- }, 410
+ try:
+ perm = model.token.get_repo_delegate_token(namespace, repository, code)
+ except model.InvalidTokenException:
+ raise NotFound()
+ return token_view(perm)
@require_repo_admin
@nickname('changeToken')
@validate_json_request('TokenPermission')
- def put(self, namespace_name, repo_name, code):
+ def put(self, namespace, repository, code):
""" Update the permissions for the specified repository token. """
- return {
- 'message': 'Handling of access tokens is no longer supported',
- }, 410
+ new_permission = request.get_json()
+ logger.debug('Setting permission to: %s for code %s' %
+ (new_permission['role'], code))
+
+ token = model.token.set_repo_delegate_token_role(namespace, repository, code,
+ new_permission['role'])
+
+ log_action('change_repo_permission', namespace,
+ {'repo': repository, 'token': token.friendly_name, 'code': code,
+ 'role': new_permission['role']},
+ repo=model.repository.get_repository(namespace, repository))
+
+ return token_view(token)
@require_repo_admin
@nickname('deleteToken')
- def delete(self, namespace_name, repo_name, code):
+ def delete(self, namespace, repository, code):
""" Delete the repository token. """
- return {
- 'message': 'Handling of access tokens is no longer supported',
- }, 410
+ token = model.token.delete_delegate_token(namespace, repository, code)
+
+ log_action('delete_repo_accesstoken', namespace,
+ {'repo': repository, 'token': token.friendly_name,
+ 'code': code},
+ repo=model.repository.get_repository(namespace, repository))
+
+ return '', 204
diff --git a/endpoints/api/robot.py b/endpoints/api/robot.py
index 867329323..8f1cbde73 100644
--- a/endpoints/api/robot.py
+++ b/endpoints/api/robot.py
@@ -2,64 +2,88 @@
from endpoints.api import (resource, nickname, ApiResource, log_action, related_user_resource,
require_user_admin, require_scope, path_param, parse_args,
- truthy_bool, query_param, validate_json_request, max_json_size)
-from endpoints.api.robot_models_pre_oci import pre_oci_model as model
+ truthy_bool, query_param)
from endpoints.exception import Unauthorized
from auth.permissions import AdministerOrganizationPermission, OrganizationMemberPermission
from auth.auth_context import get_authenticated_user
from auth import scopes
+from data import model
+from data.database import User, Team, Repository, FederatedLogin
from util.names import format_robot_username
-from flask import abort, request
+from flask import abort
+from app import avatar
+
+def robot_view(name, token):
+ return {
+ 'name': name,
+ 'token': token
+ }
-CREATE_ROBOT_SCHEMA = {
- 'type': 'object',
- 'description': 'Optional data for creating a robot',
- 'properties': {
- 'description': {
- 'type': 'string',
- 'description': 'Optional text description for the robot',
- 'maxLength': 255,
+def permission_view(permission):
+ return {
+ 'repository': {
+ 'name': permission.repository.name,
+ 'is_public': permission.repository.visibility.name == 'public'
},
- 'unstructured_metadata': {
- 'type': 'object',
- 'description': 'Optional unstructured metadata for the robot',
- },
- },
-}
-
-ROBOT_MAX_SIZE = 1024 * 1024 # 1 KB.
+ 'role': permission.role.name
+ }
-def robots_list(prefix, include_permissions=False, include_token=False, limit=None):
- robots = model.list_entity_robot_permission_teams(prefix, limit=limit,
- include_token=include_token,
- include_permissions=include_permissions)
- return {'robots': [robot.to_dict(include_token=include_token) for robot in robots]}
+def robots_list(prefix, include_permissions=False):
+ tuples = model.user.list_entity_robot_permission_teams(prefix,
+ include_permissions=include_permissions)
+ robots = {}
+ robot_teams = set()
+
+ for robot_tuple in tuples:
+ robot_name = robot_tuple.get(User.username)
+ if not robot_name in robots:
+ robots[robot_name] = {
+ 'name': robot_name,
+ 'token': robot_tuple.get(FederatedLogin.service_ident)
+ }
+
+ if include_permissions:
+ robots[robot_name].update({
+ 'teams': [],
+ 'repositories': []
+ })
+
+ if include_permissions:
+ team_name = robot_tuple.get(Team.name)
+ repository_name = robot_tuple.get(Repository.name)
+
+ if team_name is not None:
+ check_key = robot_name + ':' + team_name
+ if not check_key in robot_teams:
+ robot_teams.add(check_key)
+
+ robots[robot_name]['teams'].append({
+ 'name': team_name,
+ 'avatar': avatar.get_data(team_name, team_name, 'team')
+ })
+
+ if repository_name is not None:
+ if not repository_name in robots[robot_name]['repositories']:
+ robots[robot_name]['repositories'].append(repository_name)
+
+ return {'robots': robots.values()}
@resource('/v1/user/robots')
class UserRobotList(ApiResource):
""" Resource for listing user robots. """
-
@require_user_admin
@nickname('getUserRobots')
@parse_args()
@query_param('permissions',
- 'Whether to include repositories and teams in which the robots have permission.',
+ 'Whether to include repostories and teams in which the robots have permission.',
type=truthy_bool, default=False)
- @query_param('token',
- 'If false, the robot\'s token is not returned.',
- type=truthy_bool, default=True)
- @query_param('limit',
- 'If specified, the number of robots to return.',
- type=int, default=None)
def get(self, parsed_args):
""" List the available robots for the user. """
user = get_authenticated_user()
- return robots_list(user.username, include_token=parsed_args.get('token', True),
- include_permissions=parsed_args.get('permissions', False),
- limit=parsed_args.get('limit'))
+ return robots_list(user.username, include_permissions=parsed_args.get('permissions', False))
@resource('/v1/user/robots/')
@@ -67,41 +91,29 @@ class UserRobotList(ApiResource):
'The short name for the robot, without any user or organization prefix')
class UserRobot(ApiResource):
""" Resource for managing a user's robots. """
- schemas = {
- 'CreateRobot': CREATE_ROBOT_SCHEMA,
- }
-
@require_user_admin
@nickname('getUserRobot')
def get(self, robot_shortname):
""" Returns the user's robot with the specified name. """
parent = get_authenticated_user()
- robot = model.get_user_robot(robot_shortname, parent)
- return robot.to_dict(include_metadata=True, include_token=True)
+ robot, password = model.user.get_robot(robot_shortname, parent)
+ return robot_view(robot.username, password)
@require_user_admin
@nickname('createUserRobot')
- @max_json_size(ROBOT_MAX_SIZE)
- @validate_json_request('CreateRobot', optional=True)
def put(self, robot_shortname):
""" Create a new user robot with the specified name. """
parent = get_authenticated_user()
- create_data = request.get_json() or {}
- robot = model.create_user_robot(robot_shortname, parent, create_data.get('description'),
- create_data.get('unstructured_metadata'))
- log_action('create_robot', parent.username, {
- 'robot': robot_shortname,
- 'description': create_data.get('description'),
- 'unstructured_metadata': create_data.get('unstructured_metadata'),
- })
- return robot.to_dict(include_metadata=True, include_token=True), 201
+ robot, password = model.user.create_robot(robot_shortname, parent)
+ log_action('create_robot', parent.username, {'robot': robot_shortname})
+ return robot_view(robot.username, password), 201
@require_user_admin
@nickname('deleteUserRobot')
def delete(self, robot_shortname):
""" Delete an existing robot. """
parent = get_authenticated_user()
- model.delete_robot(format_robot_username(parent.username, robot_shortname))
+ model.user.delete_robot(format_robot_username(parent.username, robot_shortname))
log_action('delete_robot', parent.username, {'robot': robot_shortname})
return '', 204
@@ -111,30 +123,17 @@ class UserRobot(ApiResource):
@related_user_resource(UserRobotList)
class OrgRobotList(ApiResource):
""" Resource for listing an organization's robots. """
-
@require_scope(scopes.ORG_ADMIN)
@nickname('getOrgRobots')
@parse_args()
@query_param('permissions',
'Whether to include repostories and teams in which the robots have permission.',
type=truthy_bool, default=False)
- @query_param('token',
- 'If false, the robot\'s token is not returned.',
- type=truthy_bool, default=True)
- @query_param('limit',
- 'If specified, the number of robots to return.',
- type=int, default=None)
def get(self, orgname, parsed_args):
""" List the organization's robots. """
permission = OrganizationMemberPermission(orgname)
if permission.can():
- include_token = (AdministerOrganizationPermission(orgname).can() and
- parsed_args.get('token', True))
- include_permissions = (AdministerOrganizationPermission(orgname).can() and
- parsed_args.get('permissions', False))
- return robots_list(orgname, include_permissions=include_permissions,
- include_token=include_token,
- limit=parsed_args.get('limit'))
+ return robots_list(orgname, include_permissions=parsed_args.get('permissions', False))
raise Unauthorized()
@@ -146,38 +145,28 @@ class OrgRobotList(ApiResource):
@related_user_resource(UserRobot)
class OrgRobot(ApiResource):
""" Resource for managing an organization's robots. """
- schemas = {
- 'CreateRobot': CREATE_ROBOT_SCHEMA,
- }
-
@require_scope(scopes.ORG_ADMIN)
@nickname('getOrgRobot')
def get(self, orgname, robot_shortname):
""" Returns the organization's robot with the specified name. """
permission = AdministerOrganizationPermission(orgname)
if permission.can():
- robot = model.get_org_robot(robot_shortname, orgname)
- return robot.to_dict(include_metadata=True, include_token=True)
+ parent = model.organization.get_organization(orgname)
+ robot, password = model.user.get_robot(robot_shortname, parent)
+ return robot_view(robot.username, password)
raise Unauthorized()
@require_scope(scopes.ORG_ADMIN)
@nickname('createOrgRobot')
- @max_json_size(ROBOT_MAX_SIZE)
- @validate_json_request('CreateRobot', optional=True)
def put(self, orgname, robot_shortname):
""" Create a new robot in the organization. """
permission = AdministerOrganizationPermission(orgname)
if permission.can():
- create_data = request.get_json() or {}
- robot = model.create_org_robot(robot_shortname, orgname, create_data.get('description'),
- create_data.get('unstructured_metadata'))
- log_action('create_robot', orgname, {
- 'robot': robot_shortname,
- 'description': create_data.get('description'),
- 'unstructured_metadata': create_data.get('unstructured_metadata'),
- })
- return robot.to_dict(include_metadata=True, include_token=True), 201
+ parent = model.organization.get_organization(orgname)
+ robot, password = model.user.create_robot(robot_shortname, parent)
+ log_action('create_robot', orgname, {'robot': robot_shortname})
+ return robot_view(robot.username, password), 201
raise Unauthorized()
@@ -187,7 +176,7 @@ class OrgRobot(ApiResource):
""" Delete an existing organization robot. """
permission = AdministerOrganizationPermission(orgname)
if permission.can():
- model.delete_robot(format_robot_username(orgname, robot_shortname))
+ model.user.delete_robot(format_robot_username(orgname, robot_shortname))
log_action('delete_robot', orgname, {'robot': robot_shortname})
return '', 204
@@ -199,17 +188,16 @@ class OrgRobot(ApiResource):
'The short name for the robot, without any user or organization prefix')
class UserRobotPermissions(ApiResource):
""" Resource for listing the permissions a user's robot has in the system. """
-
@require_user_admin
@nickname('getUserRobotPermissions')
def get(self, robot_shortname):
""" Returns the list of repository permissions for the user's robot. """
parent = get_authenticated_user()
- robot = model.get_user_robot(robot_shortname, parent)
- permissions = model.list_robot_permissions(robot.name)
+ robot, _ = model.user.get_robot(robot_shortname, parent)
+ permissions = model.permission.list_robot_permissions(robot.username)
return {
- 'permissions': [permission.to_dict() for permission in permissions]
+ 'permissions': [permission_view(permission) for permission in permissions]
}
@@ -220,18 +208,18 @@ class UserRobotPermissions(ApiResource):
@related_user_resource(UserRobotPermissions)
class OrgRobotPermissions(ApiResource):
""" Resource for listing the permissions an org's robot has in the system. """
-
@require_user_admin
@nickname('getOrgRobotPermissions')
def get(self, orgname, robot_shortname):
""" Returns the list of repository permissions for the org's robot. """
permission = AdministerOrganizationPermission(orgname)
if permission.can():
- robot = model.get_org_robot(robot_shortname, orgname)
- permissions = model.list_robot_permissions(robot.name)
+ parent = model.organization.get_organization(orgname)
+ robot, _ = model.user.get_robot(robot_shortname, parent)
+ permissions = model.permission.list_robot_permissions(robot.username)
return {
- 'permissions': [permission.to_dict() for permission in permissions]
+ 'permissions': [permission_view(permission) for permission in permissions]
}
abort(403)
@@ -242,15 +230,14 @@ class OrgRobotPermissions(ApiResource):
'The short name for the robot, without any user or organization prefix')
class RegenerateUserRobot(ApiResource):
""" Resource for regenerate an organization's robot's token. """
-
@require_user_admin
@nickname('regenerateUserRobotToken')
def post(self, robot_shortname):
""" Regenerates the token for a user's robot. """
parent = get_authenticated_user()
- robot = model.regenerate_user_robot_token(robot_shortname, parent)
+ robot, password = model.user.regenerate_robot_token(robot_shortname, parent)
log_action('regenerate_robot_token', parent.username, {'robot': robot_shortname})
- return robot.to_dict(include_token=True)
+ return robot_view(robot.username, password)
@resource('/v1/organization//robots//regenerate')
@@ -260,15 +247,15 @@ class RegenerateUserRobot(ApiResource):
@related_user_resource(RegenerateUserRobot)
class RegenerateOrgRobot(ApiResource):
""" Resource for regenerate an organization's robot's token. """
-
@require_scope(scopes.ORG_ADMIN)
@nickname('regenerateOrgRobotToken')
def post(self, orgname, robot_shortname):
""" Regenerates the token for an organization robot. """
permission = AdministerOrganizationPermission(orgname)
if permission.can():
- robot = model.regenerate_org_robot_token(robot_shortname, orgname)
+ parent = model.organization.get_organization(orgname)
+ robot, password = model.user.regenerate_robot_token(robot_shortname, parent)
log_action('regenerate_robot_token', orgname, {'robot': robot_shortname})
- return robot.to_dict(include_token=True)
+ return robot_view(robot.username, password)
raise Unauthorized()
diff --git a/endpoints/api/robot_models_interface.py b/endpoints/api/robot_models_interface.py
deleted file mode 100644
index c4a07d304..000000000
--- a/endpoints/api/robot_models_interface.py
+++ /dev/null
@@ -1,196 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from collections import namedtuple
-
-from six import add_metaclass
-
-from endpoints.api import format_date
-
-
-class Permission(namedtuple('Permission', ['repository_name', 'repository_visibility_name', 'role_name'])):
- """
- Permission the relationship between a robot and a repository and whether that robot can see the repo.
- """
-
- def to_dict(self):
- return {
- 'repository': {
- 'name': self.repository_name,
- 'is_public': self.repository_visibility_name == 'public'
- },
- 'role': self.role_name
- }
-
-
-class Team(namedtuple('Team', ['name', 'avatar'])):
- """
- Team represents a team entry for a robot list entry.
- :type name: string
- :type avatar: {string -> string}
- """
- def to_dict(self):
- return {
- 'name': self.name,
- 'avatar': self.avatar,
- }
-
-
-class RobotWithPermissions(
- namedtuple('RobotWithPermissions', [
- 'name',
- 'password',
- 'created',
- 'last_accessed',
- 'teams',
- 'repository_names',
- 'description',
- ])):
- """
- RobotWithPermissions is a list of robot entries.
- :type name: string
- :type password: string
- :type created: datetime|None
- :type last_accessed: datetime|None
- :type teams: [Team]
- :type repository_names: [string]
- :type description: string
- """
-
- def to_dict(self, include_token=False):
- data = {
- 'name': self.name,
- 'created': format_date(self.created) if self.created is not None else None,
- 'last_accessed': format_date(self.last_accessed) if self.last_accessed is not None else None,
- 'teams': [team.to_dict() for team in self.teams],
- 'repositories': self.repository_names,
- 'description': self.description,
- }
-
- if include_token:
- data['token'] = self.password
-
- return data
-
-
-class Robot(
- namedtuple('Robot', [
- 'name',
- 'password',
- 'created',
- 'last_accessed',
- 'description',
- 'unstructured_metadata',
- ])):
- """
- Robot represents a robot entity.
- :type name: string
- :type password: string
- :type created: datetime|None
- :type last_accessed: datetime|None
- :type description: string
- :type unstructured_metadata: dict
- """
-
- def to_dict(self, include_metadata=False, include_token=False):
- data = {
- 'name': self.name,
- 'created': format_date(self.created) if self.created is not None else None,
- 'last_accessed': format_date(self.last_accessed) if self.last_accessed is not None else None,
- 'description': self.description,
- }
-
- if include_token:
- data['token'] = self.password
-
- if include_metadata:
- data['unstructured_metadata'] = self.unstructured_metadata
-
- return data
-
-
-@add_metaclass(ABCMeta)
-class RobotInterface(object):
- """
- Interface that represents all data store interactions required by the Robot API
- """
-
- @abstractmethod
- def get_org_robot(self, robot_shortname, orgname):
- """
-
- Returns:
- Robot object
-
- """
-
- @abstractmethod
- def get_user_robot(self, robot_shortname, owning_user):
- """
-
- Returns:
- Robot object
-
- """
-
- @abstractmethod
- def create_user_robot(self, robot_shortname, owning_user):
- """
-
- Returns:
- Robot object
-
- """
-
- @abstractmethod
- def create_org_robot(self, robot_shortname, orgname):
- """
-
- Returns:
- Robot object
-
- """
-
- @abstractmethod
- def delete_robot(self, robot_username):
- """
-
- Returns:
- Robot object
-
- """
-
- @abstractmethod
- def regenerate_user_robot_token(self, robot_shortname, owning_user):
- """
-
- Returns:
- Robot object
-
- """
-
- @abstractmethod
- def regenerate_org_robot_token(self, robot_shortname, orgname):
- """
-
- Returns:
- Robot object
-
- """
-
- @abstractmethod
- def list_entity_robot_permission_teams(self, prefix, include_permissions=False,
- include_token=False, limit=None):
- """
-
- Returns:
- list of RobotWithPermissions objects
-
- """
-
- @abstractmethod
- def list_robot_permissions(self, username):
- """
-
- Returns:
- list of Robot objects
-
- """
diff --git a/endpoints/api/robot_models_pre_oci.py b/endpoints/api/robot_models_pre_oci.py
deleted file mode 100644
index ad83decdf..000000000
--- a/endpoints/api/robot_models_pre_oci.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import features
-
-from app import avatar
-from data import model
-from active_migration import ActiveDataMigration, ERTMigrationFlags
-from data.database import (User, FederatedLogin, RobotAccountToken, Team as TeamTable, Repository,
- RobotAccountMetadata)
-from endpoints.api.robot_models_interface import (RobotInterface, Robot, RobotWithPermissions, Team,
- Permission)
-
-
-class RobotPreOCIModel(RobotInterface):
- def list_robot_permissions(self, username):
- permissions = model.permission.list_robot_permissions(username)
- return [Permission(permission.repository.name, permission.repository.visibility.name, permission.role.name) for
- permission in permissions]
-
- def list_entity_robot_permission_teams(self, prefix, include_token=False,
- include_permissions=False, limit=None):
- tuples = model.user.list_entity_robot_permission_teams(prefix, limit=limit,
- include_permissions=include_permissions)
- robots = {}
- robot_teams = set()
-
- for robot_tuple in tuples:
- robot_name = robot_tuple.get(User.username)
- if robot_name not in robots:
- token = None
- if include_token:
- # TODO(remove-unenc): Remove branches once migrated.
- if robot_tuple.get(RobotAccountToken.token):
- token = robot_tuple.get(RobotAccountToken.token).decrypt()
-
- if token is None and ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
- token = robot_tuple.get(FederatedLogin.service_ident)
- assert not token.startswith('robot:')
-
- robot_dict = {
- 'name': robot_name,
- 'token': token,
- 'created': robot_tuple.get(User.creation_date),
- 'last_accessed': (robot_tuple.get(User.last_accessed)
- if features.USER_LAST_ACCESSED else None),
- 'description': robot_tuple.get(RobotAccountMetadata.description),
- 'unstructured_metadata': robot_tuple.get(RobotAccountMetadata.unstructured_json),
- }
-
- if include_permissions:
- robot_dict.update({
- 'teams': [],
- 'repositories': [],
- })
-
- robots[robot_name] = Robot(robot_dict['name'], robot_dict['token'], robot_dict['created'],
- robot_dict['last_accessed'], robot_dict['description'],
- robot_dict['unstructured_metadata'])
- if include_permissions:
- team_name = robot_tuple.get(TeamTable.name)
- repository_name = robot_tuple.get(Repository.name)
-
- if team_name is not None:
- check_key = robot_name + ':' + team_name
- if check_key not in robot_teams:
- robot_teams.add(check_key)
-
- robot_dict['teams'].append(Team(
- team_name,
- avatar.get_data(team_name, team_name, 'team')
- ))
-
- if repository_name is not None:
- if repository_name not in robot_dict['repositories']:
- robot_dict['repositories'].append(repository_name)
- robots[robot_name] = RobotWithPermissions(robot_dict['name'], robot_dict['token'],
- robot_dict['created'],
- (robot_dict['last_accessed']
- if features.USER_LAST_ACCESSED else None),
- robot_dict['teams'],
- robot_dict['repositories'],
- robot_dict['description'])
-
- return robots.values()
-
- def regenerate_user_robot_token(self, robot_shortname, owning_user):
- robot, password, metadata = model.user.regenerate_robot_token(robot_shortname, owning_user)
- return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
- metadata.description, metadata.unstructured_json)
-
- def regenerate_org_robot_token(self, robot_shortname, orgname):
- parent = model.organization.get_organization(orgname)
- robot, password, metadata = model.user.regenerate_robot_token(robot_shortname, parent)
- return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
- metadata.description, metadata.unstructured_json)
-
- def delete_robot(self, robot_username):
- model.user.delete_robot(robot_username)
-
- def create_user_robot(self, robot_shortname, owning_user, description, unstructured_metadata):
- robot, password = model.user.create_robot(robot_shortname, owning_user, description or '',
- unstructured_metadata)
- return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
- description or '', unstructured_metadata)
-
- def create_org_robot(self, robot_shortname, orgname, description, unstructured_metadata):
- parent = model.organization.get_organization(orgname)
- robot, password = model.user.create_robot(robot_shortname, parent, description or '',
- unstructured_metadata)
- return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
- description or '', unstructured_metadata)
-
- def get_org_robot(self, robot_shortname, orgname):
- parent = model.organization.get_organization(orgname)
- robot, password, metadata = model.user.get_robot_and_metadata(robot_shortname, parent)
- return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
- metadata.description, metadata.unstructured_json)
-
- def get_user_robot(self, robot_shortname, owning_user):
- robot, password, metadata = model.user.get_robot_and_metadata(robot_shortname, owning_user)
- return Robot(robot.username, password, robot.creation_date, robot.last_accessed,
- metadata.description, metadata.unstructured_json)
-
-
-pre_oci_model = RobotPreOCIModel()
diff --git a/endpoints/api/search.py b/endpoints/api/search.py
index 0ddbbc3fa..018ab713c 100644
--- a/endpoints/api/search.py
+++ b/endpoints/api/search.py
@@ -1,19 +1,15 @@
""" Conduct searches against all registry context. """
-import features
-
from endpoints.api import (ApiResource, parse_args, query_param, truthy_bool, nickname, resource,
require_scope, path_param, internal_only, Unauthorized, InvalidRequest,
show_if)
-from data.database import Repository
from data import model
-from data.registry_model import registry_model
from auth.permissions import (OrganizationMemberPermission, ReadRepositoryPermission,
UserAdminPermission, AdministerOrganizationPermission,
ReadRepositoryPermission)
from auth.auth_context import get_authenticated_user
from auth import scopes
-from app import app, avatar, authentication
+from app import avatar, authentication
from flask import abort
from operator import itemgetter
from stringscore import liquidmetal
@@ -22,12 +18,6 @@ from util.names import parse_robot_username
import anunidecode # Don't listen to pylint's lies. This import is required.
import math
-
-ENTITY_SEARCH_SCORE = 1
-TEAM_SEARCH_SCORE = 2
-REPOSITORY_SEARCH_SCORE = 4
-
-
@resource('/v1/entities/link/')
@internal_only
class LinkExternalEntity(ApiResource):
@@ -110,8 +100,7 @@ class EntitySearch(ApiResource):
robot_namespace = namespace_name
# Lookup users in the database for the prefix query.
- users = model.user.get_matching_users(prefix, robot_namespace, organization, limit=10,
- exact_matches_only=not features.PARTIAL_USER_AUTOCOMPLETE)
+ users = model.user.get_matching_users(prefix, robot_namespace, organization, limit=10)
# Lookup users via the user system for the prefix query. We'll filter out any users that
# already exist in the database.
@@ -169,13 +158,11 @@ class EntitySearch(ApiResource):
def search_entity_view(username, entity, get_short_name=None):
kind = 'user'
- title = 'user'
avatar_data = avatar.get_data_for_user(entity)
href = '/user/' + entity.username
if entity.organization:
kind = 'organization'
- title = 'org'
avatar_data = avatar.get_data_for_org(entity)
href = '/organization/' + entity.username
elif entity.robot:
@@ -186,15 +173,13 @@ def search_entity_view(username, entity, get_short_name=None):
href = '/organization/' + parts[0] + '?tab=robots&showRobot=' + entity.username
kind = 'robot'
- title = 'robot'
avatar_data = None
data = {
- 'title': title,
'kind': kind,
'avatar': avatar_data,
'name': entity.username,
- 'score': ENTITY_SEARCH_SCORE,
+ 'score': 1,
'href': href
}
@@ -218,7 +203,7 @@ def conduct_team_search(username, query, encountered_teams, results):
'name': team.name,
'organization': search_entity_view(username, team.organization),
'avatar': avatar.get_data_for_team(team),
- 'score': TEAM_SEARCH_SCORE,
+ 'score': 2,
'href': '/organization/' + team.organization.username + '/teams/' + team.name
})
@@ -237,20 +222,40 @@ def conduct_admined_team_search(username, query, encountered_teams, results):
'name': team.name,
'organization': search_entity_view(username, team.organization),
'avatar': avatar.get_data_for_team(team),
- 'score': TEAM_SEARCH_SCORE,
+ 'score': 2,
'href': '/organization/' + team.organization.username + '/teams/' + team.name
})
-def conduct_repo_search(username, query, results, offset=0, limit=5):
+def conduct_repo_search(username, query, results):
""" Finds matching repositories. """
- matching_repos = model.repository.get_filtered_matching_repositories(query, username, limit=limit,
- repo_kind=None,
- offset=offset)
+ def can_read(repo):
+ if repo.is_public:
+ return True
+
+ return ReadRepositoryPermission(repo.namespace_user.username, repo.name).can()
+
+ only_public = username is None
+ matching_repos = model.repository.get_sorted_matching_repositories(query, only_public, can_read,
+ limit=5)
for repo in matching_repos:
- # TODO: make sure the repo.kind.name doesn't cause extra queries
- results.append(repo_result_view(repo, username))
+ repo_score = math.log(repo.count or 1, 10) or 1
+
+ # If the repository is under the user's namespace, give it 20% more weight.
+ namespace = repo.namespace_user.username
+ if OrganizationMemberPermission(namespace).can() or namespace == username:
+ repo_score = repo_score * 1.2
+
+ results.append({
+ 'kind': 'repository',
+ 'namespace': search_entity_view(username, repo.namespace_user),
+ 'name': repo.name,
+ 'description': repo.description,
+ 'is_public': repo.is_public,
+ 'score': repo_score,
+ 'href': '/repository/' + repo.namespace_user.username + '/' + repo.name
+ })
def conduct_namespace_search(username, query, results):
@@ -270,30 +275,6 @@ def conduct_robot_search(username, query, results):
results.append(search_entity_view(username, robot, get_short_name))
-def repo_result_view(repo, username, last_modified=None, stars=None, popularity=None):
- kind = 'application' if Repository.kind.get_name(repo.kind_id) == 'application' else 'repository'
- view = {
- 'kind': kind,
- 'title': 'app' if kind == 'application' else 'repo',
- 'namespace': search_entity_view(username, repo.namespace_user),
- 'name': repo.name,
- 'description': repo.description,
- 'is_public': model.repository.is_repository_public(repo),
- 'score': REPOSITORY_SEARCH_SCORE,
- 'href': '/' + kind + '/' + repo.namespace_user.username + '/' + repo.name
- }
-
- if last_modified is not None:
- view['last_modified'] = last_modified
-
- if stars is not None:
- view['stars'] = stars
-
- if popularity is not None:
- view['popularity'] = popularity
-
- return view
-
@resource('/v1/find/all')
class ConductSearch(ApiResource):
""" Resource for finding users, repositories, teams, etc. """
@@ -334,49 +315,3 @@ class ConductSearch(ApiResource):
result['score'] = result['score'] * lm_score
return {'results': sorted(results, key=itemgetter('score'), reverse=True)}
-
-
-MAX_PER_PAGE = app.config.get('SEARCH_RESULTS_PER_PAGE', 10)
-MAX_RESULT_PAGE_COUNT = app.config.get('SEARCH_MAX_RESULT_PAGE_COUNT', 10)
-
-@resource('/v1/find/repositories')
-class ConductRepositorySearch(ApiResource):
- """ Resource for finding repositories. """
- @parse_args()
- @query_param('query', 'The search query.', type=str, default='')
- @query_param('page', 'The page.', type=int, default=1)
- @nickname('conductRepoSearch')
- def get(self, parsed_args):
- """ Get a list of apps and repositories that match the specified query. """
- query = parsed_args['query']
- page = min(max(1, parsed_args['page']), MAX_RESULT_PAGE_COUNT)
- offset = (page - 1) * MAX_PER_PAGE
- limit = offset + MAX_PER_PAGE + 1
-
- username = get_authenticated_user().username if get_authenticated_user() else None
-
- # Lookup matching repositories.
- matching_repos = list(model.repository.get_filtered_matching_repositories(query, username,
- repo_kind=None,
- limit=limit,
- offset=offset))
-
- # Load secondary information such as last modified time, star count and action count.
- repository_ids = [repo.id for repo in matching_repos]
- last_modified_map = registry_model.get_most_recent_tag_lifetime_start(matching_repos)
- star_map = model.repository.get_stars(repository_ids)
- action_sum_map = model.log.get_repositories_action_sums(repository_ids)
-
- # Build the results list.
- results = [repo_result_view(repo, username, last_modified_map.get(repo.id),
- star_map.get(repo.id, 0),
- float(action_sum_map.get(repo.id, 0)))
- for repo in matching_repos]
-
- return {
- 'results': results[0:MAX_PER_PAGE],
- 'has_additional': len(results) > MAX_PER_PAGE,
- 'page': page,
- 'page_size': MAX_PER_PAGE,
- 'start_index': offset,
- }
diff --git a/endpoints/api/secscan.py b/endpoints/api/secscan.py
index 71422184f..f8727140c 100644
--- a/endpoints/api/secscan.py
+++ b/endpoints/api/secscan.py
@@ -3,106 +3,69 @@
import logging
import features
-from app import app, secscan_api
-from auth.decorators import process_basic_auth_no_pass
-from data.registry_model import registry_model
-from data.registry_model.datatypes import SecurityScanStatus
+from app import secscan_api
+from data import model
from endpoints.api import (require_repo_read, path_param,
RepositoryParamResource, resource, nickname, show_if, parse_args,
- query_param, truthy_bool, disallow_for_app_repositories)
+ query_param, truthy_bool)
from endpoints.exception import NotFound, DownstreamIssue
-from endpoints.api.manifest import MANIFEST_DIGEST_ROUTE
from util.secscan.api import APIRequestFailure
logger = logging.getLogger(__name__)
-def _security_info(manifest_or_legacy_image, include_vulnerabilities=True):
- """ Returns a dict representing the result of a call to the security status API for the given
- manifest or image.
- """
- status = registry_model.get_security_status(manifest_or_legacy_image)
- if status is None:
- raise NotFound()
- if status != SecurityScanStatus.SCANNED:
- return {
- 'status': status.value,
- }
-
- try:
- if include_vulnerabilities:
- data = secscan_api.get_layer_data(manifest_or_legacy_image, include_vulnerabilities=True)
- else:
- data = secscan_api.get_layer_data(manifest_or_legacy_image, include_features=True)
- except APIRequestFailure as arf:
- raise DownstreamIssue(arf.message)
-
- if data is None:
- # If no data was found but we reached this point, then it indicates we have incorrect security
- # status for the manifest or legacy image. Mark the manifest or legacy image as unindexed
- # so it automatically gets re-indexed.
- if app.config.get('REGISTRY_STATE', 'normal') == 'normal':
- registry_model.reset_security_status(manifest_or_legacy_image)
-
- return {
- 'status': SecurityScanStatus.QUEUED.value,
- }
-
- return {
- 'status': status.value,
- 'data': data,
- }
+class SCAN_STATUS(object):
+ """ Security scan status enum """
+ SCANNED = 'scanned'
+ FAILED = 'failed'
+ QUEUED = 'queued'
+
+
+def _get_status(repo_image):
+ if repo_image.security_indexed_engine is not None and repo_image.security_indexed_engine >= 0:
+ return SCAN_STATUS.SCANNED if repo_image.security_indexed else SCAN_STATUS.FAILED
+
+ return SCAN_STATUS.QUEUED
-@resource('/v1/repository//image//security')
@show_if(features.SECURITY_SCANNER)
+@resource('/v1/repository//image//security')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
@path_param('imageid', 'The image ID')
class RepositoryImageSecurity(RepositoryParamResource):
""" Operations for managing the vulnerabilities in a repository image. """
- @process_basic_auth_no_pass
@require_repo_read
@nickname('getRepoImageSecurity')
- @disallow_for_app_repositories
@parse_args()
@query_param('vulnerabilities', 'Include vulnerabilities informations', type=truthy_bool,
default=False)
def get(self, namespace, repository, imageid, parsed_args):
""" Fetches the features and vulnerabilities (if any) for a repository image. """
- repo_ref = registry_model.lookup_repository(namespace, repository)
- if repo_ref is None:
+ repo_image = model.image.get_repo_image(namespace, repository, imageid)
+ if repo_image is None:
raise NotFound()
- legacy_image = registry_model.get_legacy_image(repo_ref, imageid)
- if legacy_image is None:
+ if not repo_image.security_indexed:
+ logger.debug('Image %s under repository %s/%s not security indexed',
+ repo_image.docker_image_id, namespace, repository)
+ return {
+ 'status': _get_status(repo_image),
+ }
+
+ try:
+ if parsed_args.vulnerabilities:
+ data = secscan_api.get_layer_data(repo_image, include_vulnerabilities=True)
+ else:
+ data = secscan_api.get_layer_data(repo_image, include_features=True)
+ except APIRequestFailure as arf:
+ raise DownstreamIssue({'message': arf.message})
+
+ if data is None:
raise NotFound()
- return _security_info(legacy_image, parsed_args.vulnerabilities)
-
-
-@resource(MANIFEST_DIGEST_ROUTE + '/security')
-@show_if(features.SECURITY_SCANNER)
-@path_param('repository', 'The full path of the repository. e.g. namespace/name')
-@path_param('manifestref', 'The digest of the manifest')
-class RepositoryManifestSecurity(RepositoryParamResource):
- """ Operations for managing the vulnerabilities in a repository manifest. """
-
- @process_basic_auth_no_pass
- @require_repo_read
- @nickname('getRepoManifestSecurity')
- @disallow_for_app_repositories
- @parse_args()
- @query_param('vulnerabilities', 'Include vulnerabilities informations', type=truthy_bool,
- default=False)
- def get(self, namespace, repository, manifestref, parsed_args):
- repo_ref = registry_model.lookup_repository(namespace, repository)
- if repo_ref is None:
- raise NotFound()
-
- manifest = registry_model.lookup_manifest_by_digest(repo_ref, manifestref, allow_dead=True)
- if manifest is None:
- raise NotFound()
-
- return _security_info(manifest, parsed_args.vulnerabilities)
+ return {
+ 'status': _get_status(repo_image),
+ 'data': data,
+ }
diff --git a/endpoints/api/signing.py b/endpoints/api/signing.py
deleted file mode 100644
index eb2e942ec..000000000
--- a/endpoints/api/signing.py
+++ /dev/null
@@ -1,29 +0,0 @@
-""" List and manage repository signing information """
-
-import logging
-import features
-
-from app import tuf_metadata_api
-from endpoints.api import (require_repo_read, path_param,
- RepositoryParamResource, resource, nickname, show_if,
- disallow_for_app_repositories, NotFound)
-from endpoints.api.signing_models_pre_oci import pre_oci_model as model
-
-logger = logging.getLogger(__name__)
-
-
-@resource('/v1/repository//signatures')
-@show_if(features.SIGNING)
-@path_param('repository', 'The full path of the repository. e.g. namespace/name')
-class RepositorySignatures(RepositoryParamResource):
- """ Operations for managing the signatures in a repository image. """
-
- @require_repo_read
- @nickname('getRepoSignatures')
- @disallow_for_app_repositories
- def get(self, namespace, repository):
- """ Fetches the list of signed tags for the repository. """
- if not model.is_trust_enabled(namespace, repository):
- raise NotFound()
-
- return {'delegations': tuf_metadata_api.get_all_tags_with_expiration(namespace, repository)}
diff --git a/endpoints/api/signing_models_interface.py b/endpoints/api/signing_models_interface.py
deleted file mode 100644
index 6e5ce4ca4..000000000
--- a/endpoints/api/signing_models_interface.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-
-@add_metaclass(ABCMeta)
-class SigningInterface(object):
- """
- Interface that represents all data store interactions required by the signing API endpoint.
- """
- @abstractmethod
- def is_trust_enabled(self, namespace_name, repo_name):
- """
- Returns whether the repository with the given namespace name and repository name exists and
- has trust enabled.
- """
diff --git a/endpoints/api/signing_models_pre_oci.py b/endpoints/api/signing_models_pre_oci.py
deleted file mode 100644
index 03afb1104..000000000
--- a/endpoints/api/signing_models_pre_oci.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from data import model
-from endpoints.api.signing_models_interface import SigningInterface
-
-
-class PreOCIModel(SigningInterface):
- """
- PreOCIModel implements the data model for signing using a database schema
- before it was changed to support the OCI specification.
- """
- def is_trust_enabled(self, namespace_name, repo_name):
- repo = model.repository.get_repository(namespace_name, repo_name)
- if repo is None:
- return False
-
- return repo.trust_enabled
-
-
-pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/subscribe.py b/endpoints/api/subscribe.py
index b526e25d2..a0be987eb 100644
--- a/endpoints/api/subscribe.py
+++ b/endpoints/api/subscribe.py
@@ -1,28 +1,32 @@
""" Subscribe to plans. """
+
import logging
import stripe
-import features
+
from app import billing
from endpoints.api import request_error, log_action
-from data.billing import PLANS
-from endpoints.api.subscribe_models_pre_oci import data_model as model
from endpoints.exception import NotFound
+from data import model
+from data.billing import PLANS
+
+import features
logger = logging.getLogger(__name__)
def check_repository_usage(user_or_org, plan_found):
- private_repos = model.get_private_repo_count(user_or_org.username)
+ private_repos = model.user.get_private_repo_count(user_or_org.username)
if plan_found is None:
repos_allowed = 0
else:
repos_allowed = plan_found['privateRepos']
if private_repos > repos_allowed:
- model.create_unique_notification('over_private_usage', user_or_org.username, {'namespace': user_or_org.username})
+ model.notification.create_unique_notification('over_private_usage', user_or_org,
+ {'namespace': user_or_org.username})
else:
- model.delete_notifications_by_kind(user_or_org.username, 'over_private_usage')
+ model.notification.delete_notifications_by_kind(user_or_org, 'over_private_usage')
def carderror_response(exc):
@@ -66,7 +70,7 @@ def subscribe(user, plan, token, require_business_plan):
user.username)
raise request_error(message='No matching plan found')
- private_repos = model.get_private_repo_count(user.username)
+ private_repos = model.user.get_private_repo_count(user.username)
# This is the default response
response_json = {
@@ -88,9 +92,9 @@ def subscribe(user, plan, token, require_business_plan):
user.save()
check_repository_usage(user, plan_found)
log_action('account_change_plan', user.username, {'plan': plan})
- except stripe.error.CardError as e:
+ except stripe.CardError as e:
return carderror_response(e)
- except stripe.error.APIConnectionError as e:
+ except stripe.APIConnectionError as e:
return connection_response(e)
response_json = subscription_view(cus.subscription, private_repos)
@@ -100,17 +104,19 @@ def subscribe(user, plan, token, require_business_plan):
# Change the plan
try:
cus = billing.Customer.retrieve(user.stripe_id)
- except stripe.error.APIConnectionError as e:
+ except stripe.APIConnectionError as e:
return connection_response(e)
if plan_found['price'] == 0:
if cus.subscription is not None:
# We only have to cancel the subscription if they actually have one
try:
- cus.subscription.delete()
- except stripe.error.APIConnectionError as e:
+ cus.cancel_subscription()
+ cus.save()
+ except stripe.APIConnectionError as e:
return connection_response(e)
+
check_repository_usage(user, plan_found)
log_action('account_change_plan', user.username, {'plan': plan})
@@ -123,9 +129,9 @@ def subscribe(user, plan, token, require_business_plan):
try:
cus.save()
- except stripe.error.CardError as e:
+ except stripe.CardError as e:
return carderror_response(e)
- except stripe.error.APIConnectionError as e:
+ except stripe.APIConnectionError as e:
return connection_response(e)
response_json = subscription_view(cus.subscription, private_repos)
diff --git a/endpoints/api/subscribe_models_interface.py b/endpoints/api/subscribe_models_interface.py
deleted file mode 100644
index fbc7a8a70..000000000
--- a/endpoints/api/subscribe_models_interface.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-
-
-@add_metaclass(ABCMeta)
-class SubscribeInterface(object):
- """
- Interface that represents all data store interactions required by the subscribe API endpoint.
- """
- @abstractmethod
- def get_private_repo_count(self, username):
- """
- Returns the number of private repositories for a given username or namespace.
- """
-
- @abstractmethod
- def create_unique_notification(self, kind_name, target_username, metadata={}):
- """
- Creates a notification using the given parameters.
- """
-
- @abstractmethod
- def delete_notifications_by_kind(self, target_username, kind_name):
- """
- Remove notifications for a target based on given kind.
- """
diff --git a/endpoints/api/subscribe_models_pre_oci.py b/endpoints/api/subscribe_models_pre_oci.py
deleted file mode 100644
index a5ca83149..000000000
--- a/endpoints/api/subscribe_models_pre_oci.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from data.model.notification import create_unique_notification, delete_notifications_by_kind
-from data.model.user import get_private_repo_count, get_user_or_org
-from endpoints.api.subscribe_models_interface import SubscribeInterface
-
-
-class PreOCIModel(SubscribeInterface):
- """
- PreOCIModel implements the data model for build triggers using a database schema
- before it was changed to support the OCI specification.
- """
- def get_private_repo_count(self, username):
- return get_private_repo_count(username)
-
- def create_unique_notification(self, kind_name, target_username, metadata={}):
- target = get_user_or_org(target_username)
- create_unique_notification(kind_name, target, metadata)
-
- def delete_notifications_by_kind(self, target_username, kind_name):
- target = get_user_or_org(target_username)
- delete_notifications_by_kind(target, kind_name)
-
-
-data_model = PreOCIModel()
diff --git a/endpoints/api/suconfig.py b/endpoints/api/suconfig.py
index a96a7356b..db5050489 100644
--- a/endpoints/api/suconfig.py
+++ b/endpoints/api/suconfig.py
@@ -3,14 +3,25 @@
import logging
import os
import signal
-import subprocess
from flask import abort
+from endpoints.api import (ApiResource, nickname, resource, internal_only, show_if,
+ require_fresh_login, request, validate_json_request, verify_not_prod,
+ InvalidRequest)
-from app import app, config_provider
+from endpoints.common import common_login
+from app import app, config_provider, superusers, OVERRIDE_CONFIG_DIRECTORY
+from data import model
+from data.database import configure
from auth.permissions import SuperUserPermission
-from endpoints.api.suconfig_models_pre_oci import pre_oci_model as model
-from endpoints.api import (ApiResource, nickname, resource, internal_only, show_if, verify_not_prod)
+from auth.auth_context import get_authenticated_user
+from data.database import User
+from util.config.configutil import add_enterprise_config_defaults
+from util.config.database import sync_database_with_config
+from util.config.validator import validate_service_for_config, CONFIG_FILENAMES
+from util.license import decode_license, LicenseDecodeError
+from data.runmigration import run_alembic_migration
+from data.users import get_federated_service_name, get_users_handler
import features
@@ -23,12 +34,16 @@ def database_is_valid():
if app.config['TESTING']:
return False
- return model.is_valid()
+ try:
+ list(User.select().limit(1))
+ return True
+ except:
+ return False
def database_has_users():
""" Returns whether the database has any users defined. """
- return model.has_users()
+ return bool(list(User.select().limit(1)))
@resource('/v1/superuser/registrystatus')
@@ -42,15 +57,41 @@ class SuperUserRegistryStatus(ApiResource):
@verify_not_prod
def get(self):
""" Returns the status of the registry. """
+
# If we have SETUP_COMPLETE, then we're ready to go!
if app.config.get('SETUP_COMPLETE', False):
return {
'provider_id': config_provider.provider_id,
+ 'requires_restart': config_provider.requires_restart(app.config),
'status': 'ready'
}
+ # If there is no conf/stack volume, then report that status.
+ if not config_provider.volume_exists():
+ return {
+ 'status': 'missing-config-dir'
+ }
+
+ # If there is no license file, we need to ask the user to upload it.
+ if not config_provider.has_license_file():
+ return {
+ 'status': 'upload-license'
+ }
+
+ # If there is no config file, we need to setup the database.
+ if not config_provider.config_exists():
+ return {
+ 'status': 'config-db'
+ }
+
+ # If the database isn't yet valid, then we need to set it up.
+ if not database_is_valid():
+ return {
+ 'status': 'setup-db'
+ }
+
return {
- 'status': 'setup-incomplete'
+ 'status': 'create-superuser' if not database_has_users() else 'config'
}
@@ -65,20 +106,40 @@ class _AlembicLogHandler(logging.Handler):
'message': record.getMessage()
})
-# From: https://stackoverflow.com/a/44712205
-def get_process_id(name):
- """Return process ids found by (partial) name or regex.
+@resource('/v1/superuser/setupdb')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserSetupDatabase(ApiResource):
+ """ Resource for invoking alembic to setup the database. """
+ @verify_not_prod
+ @nickname('scSetupDatabase')
+ def get(self):
+ """ Invokes the alembic upgrade process. """
+ # Note: This method is called after the database configured is saved, but before the
+ # database has any tables. Therefore, we only allow it to be run in that unique case.
+ if config_provider.config_exists() and not database_is_valid():
+ # Note: We need to reconfigure the database here as the config has changed.
+ combined = dict(**app.config)
+ combined.update(config_provider.get_config())
+
+ configure(combined)
+ app.config['DB_URI'] = combined['DB_URI']
+
+ log_handler = _AlembicLogHandler()
+
+ try:
+ run_alembic_migration(log_handler)
+ except Exception as ex:
+ return {
+ 'error': str(ex)
+ }
+
+ return {
+ 'logs': log_handler.records
+ }
+
+ abort(403)
- >>> get_process_id('kthreadd')
- [2]
- >>> get_process_id('watchdog')
- [10, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56, 61] # ymmv
- >>> get_process_id('non-existent process')
- []
- """
- child = subprocess.Popen(['pgrep', name], stdout=subprocess.PIPE, shell=False)
- response = child.communicate()[0]
- return [int(pid) for pid in response.split()]
@resource('/v1/superuser/shutdown')
@@ -98,7 +159,292 @@ class SuperUserShutdown(ApiResource):
if app.config.get('DEBUGGING') == True:
return {}
- os.kill(get_process_id('my_init')[0], signal.SIGINT)
+ os.kill(1, signal.SIGINT)
return {}
abort(403)
+
+
+@resource('/v1/superuser/config')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserConfig(ApiResource):
+ """ Resource for fetching and updating the current configuration, if any. """
+ schemas = {
+ 'UpdateConfig': {
+ 'type': 'object',
+ 'description': 'Updates the YAML config file',
+ 'required': [
+ 'config',
+ 'hostname'
+ ],
+ 'properties': {
+ 'config': {
+ 'type': 'object'
+ },
+ 'hostname': {
+ 'type': 'string'
+ },
+ 'password': {
+ 'type': 'string'
+ },
+ },
+ },
+ }
+
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('scGetConfig')
+ def get(self):
+ """ Returns the currently defined configuration, if any. """
+ if SuperUserPermission().can():
+ config_object = config_provider.get_config()
+ return {
+ 'config': config_object
+ }
+
+ abort(403)
+
+ @nickname('scUpdateConfig')
+ @verify_not_prod
+ @validate_json_request('UpdateConfig')
+ def put(self):
+ """ Updates the config override file. """
+ # Note: This method is called to set the database configuration before super users exists,
+ # so we also allow it to be called if there is no valid registry configuration setup.
+ if not config_provider.config_exists() or SuperUserPermission().can():
+ config_object = request.get_json()['config']
+ hostname = request.get_json()['hostname']
+
+ # Add any enterprise defaults missing from the config.
+ add_enterprise_config_defaults(config_object, app.config['SECRET_KEY'], hostname)
+
+ # Write the configuration changes to the config override file.
+ config_provider.save_config(config_object)
+
+ # If the authentication system is not the database, link the superuser account to the
+ # the authentication system chosen.
+ if config_object.get('AUTHENTICATION_TYPE', 'Database') != 'Database':
+ current_user = get_authenticated_user()
+ if current_user is None:
+ abort(401)
+
+ service_name = get_federated_service_name(config_object['AUTHENTICATION_TYPE'])
+ if not model.user.lookup_federated_login(current_user, service_name):
+ # Verify the user's credentials and retrieve the user's external username+email.
+ handler = get_users_handler(config_object, config_provider, OVERRIDE_CONFIG_DIRECTORY)
+ (result, err_msg) = handler.verify_credentials(current_user.username,
+ request.get_json().get('password', ''))
+ if not result:
+ logger.error('Could not save configuration due to external auth failure: %s', err_msg)
+ abort(400)
+
+ # Link the existing user to the external user.
+ model.user.attach_federated_login(current_user, service_name, result.username)
+
+ # Ensure database is up-to-date with config
+ sync_database_with_config(config_object)
+
+ return {
+ 'exists': True,
+ 'config': config_object
+ }
+
+ abort(403)
+
+
+@resource('/v1/superuser/config/license')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserSetAndValidateLicense(ApiResource):
+ """ Resource for setting and validating a license. """
+ schemas = {
+ 'ValidateLicense': {
+ 'type': 'object',
+ 'description': 'Validates and sets a license',
+ 'required': [
+ 'license',
+ ],
+ 'properties': {
+ 'license': {
+ 'type': 'string'
+ },
+ },
+ },
+ }
+
+ @nickname('suSetAndValidateLicense')
+ @verify_not_prod
+ @validate_json_request('ValidateLicense')
+ def post(self):
+ """ Validates the given license contents and then saves it to the config volume. """
+ if config_provider.has_license_file():
+ abort(403)
+
+ license_contents = request.get_json()['license']
+ try:
+ decoded_license = decode_license(license_contents)
+ except LicenseDecodeError as le:
+ raise InvalidRequest(le.message)
+
+ statuses = decoded_license.validate({})
+ all_met = all(status.is_met() for status in statuses)
+ if all_met:
+ config_provider.save_license(license_contents)
+
+ return {
+ 'status': [status.as_dict(for_private=True) for status in statuses],
+ 'success': all_met,
+ }
+
+
+@resource('/v1/superuser/config/file/')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserConfigFile(ApiResource):
+ """ Resource for fetching the status of config files and overriding them. """
+ @nickname('scConfigFileExists')
+ @verify_not_prod
+ def get(self, filename):
+ """ Returns whether the configuration file with the given name exists. """
+ if not filename in CONFIG_FILENAMES:
+ abort(404)
+
+ if SuperUserPermission().can():
+ return {
+ 'exists': config_provider.volume_file_exists(filename)
+ }
+
+ abort(403)
+
+ @nickname('scUpdateConfigFile')
+ @verify_not_prod
+ def post(self, filename):
+ """ Updates the configuration file with the given name. """
+ if not filename in CONFIG_FILENAMES:
+ abort(404)
+
+ # Note: This method can be called before the configuration exists
+ # to upload the database SSL cert.
+ if not config_provider.config_exists() or SuperUserPermission().can():
+ uploaded_file = request.files['file']
+ if not uploaded_file:
+ abort(400)
+
+ config_provider.save_volume_file(filename, uploaded_file)
+ return {
+ 'status': True
+ }
+
+ abort(403)
+
+
+@resource('/v1/superuser/config/createsuperuser')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserCreateInitialSuperUser(ApiResource):
+ """ Resource for creating the initial super user. """
+ schemas = {
+ 'CreateSuperUser': {
+ 'type': 'object',
+ 'description': 'Information for creating the initial super user',
+ 'required': [
+ 'username',
+ 'password',
+ 'email'
+ ],
+ 'properties': {
+ 'username': {
+ 'type': 'string',
+ 'description': 'The username for the superuser'
+ },
+ 'password': {
+ 'type': 'string',
+ 'description': 'The password for the superuser'
+ },
+ 'email': {
+ 'type': 'string',
+ 'description': 'The e-mail address for the superuser'
+ },
+ },
+ },
+ }
+
+ @nickname('scCreateInitialSuperuser')
+ @verify_not_prod
+ @validate_json_request('CreateSuperUser')
+ def post(self):
+ """ Creates the initial super user, updates the underlying configuration and
+ sets the current session to have that super user. """
+
+ # Special security check: This method is only accessible when:
+ # - There is a valid config YAML file.
+ # - There are currently no users in the database (clean install)
+ #
+ # We do this special security check because at the point this method is called, the database
+ # is clean but does not (yet) have any super users for our permissions code to check against.
+ if config_provider.config_exists() and not database_has_users():
+ data = request.get_json()
+ username = data['username']
+ password = data['password']
+ email = data['email']
+
+ # Create the user in the database.
+ superuser = model.user.create_user(username, password, email, auto_verify=True)
+
+ # Add the user to the config.
+ config_object = config_provider.get_config()
+ config_object['SUPER_USERS'] = [username]
+ config_provider.save_config(config_object)
+
+ # Update the in-memory config for the new superuser.
+ superusers.register_superuser(username)
+
+ # Conduct login with that user.
+ common_login(superuser)
+
+ return {
+ 'status': True
+ }
+
+
+ abort(403)
+
+
+@resource('/v1/superuser/config/validate/')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserConfigValidate(ApiResource):
+ """ Resource for validating a block of configuration against an external service. """
+ schemas = {
+ 'ValidateConfig': {
+ 'type': 'object',
+ 'description': 'Validates configuration',
+ 'required': [
+ 'config'
+ ],
+ 'properties': {
+ 'config': {
+ 'type': 'object'
+ },
+ 'password': {
+ 'type': 'string',
+ 'description': 'The users password, used for auth validation'
+ }
+ },
+ },
+ }
+
+ @nickname('scValidateConfig')
+ @verify_not_prod
+ @validate_json_request('ValidateConfig')
+ def post(self, service):
+ """ Validates the given config for the given service. """
+ # Note: This method is called to validate the database configuration before super users exists,
+ # so we also allow it to be called if there is no valid registry configuration setup. Note that
+ # this is also safe since this method does not access any information not given in the request.
+ if not config_provider.config_exists() or SuperUserPermission().can():
+ config = request.get_json()['config']
+ return validate_service_for_config(service, config, request.get_json().get('password', ''))
+
+ abort(403)
diff --git a/endpoints/api/suconfig_models_interface.py b/endpoints/api/suconfig_models_interface.py
deleted file mode 100644
index 9f8cbd0cb..000000000
--- a/endpoints/api/suconfig_models_interface.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-
-
-@add_metaclass(ABCMeta)
-class SuperuserConfigDataInterface(object):
- """
- Interface that represents all data store interactions required by the superuser config API.
- """
-
- @abstractmethod
- def is_valid(self):
- """
- Returns true if the configured database is valid.
- """
-
- @abstractmethod
- def has_users(self):
- """
- Returns true if there are any users defined.
- """
-
- @abstractmethod
- def create_superuser(self, username, password, email):
- """
- Creates a new superuser with the given username, password and email. Returns the user's UUID.
- """
-
- @abstractmethod
- def has_federated_login(self, username, service_name):
- """
- Returns true if the matching user has a federated login under the matching service.
- """
-
- @abstractmethod
- def attach_federated_login(self, username, service_name, federated_username):
- """
- Attaches a federatated login to the matching user, under the given service.
- """
diff --git a/endpoints/api/suconfig_models_pre_oci.py b/endpoints/api/suconfig_models_pre_oci.py
deleted file mode 100644
index 9bcb40acd..000000000
--- a/endpoints/api/suconfig_models_pre_oci.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from data import model
-from data.database import User
-from endpoints.api.suconfig_models_interface import SuperuserConfigDataInterface
-
-class PreOCIModel(SuperuserConfigDataInterface):
- def is_valid(self):
- try:
- list(User.select().limit(1))
- return True
- except:
- return False
-
- def has_users(self):
- return bool(list(User.select().limit(1)))
-
- def create_superuser(self, username, password, email):
- return model.user.create_user(username, password, email, auto_verify=True).uuid
-
- def has_federated_login(self, username, service_name):
- user = model.user.get_user(username)
- if user is None:
- return False
-
- return bool(model.user.lookup_federated_login(user, service_name))
-
- def attach_federated_login(self, username, service_name, federated_username):
- user = model.user.get_user(username)
- if user is None:
- return False
-
- model.user.attach_federated_login(user, service_name, federated_username)
-
-pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/superuser.py b/endpoints/api/superuser.py
index ec1a4992f..9a1da3dea 100644
--- a/endpoints/api/superuser.py
+++ b/endpoints/api/superuser.py
@@ -1,8 +1,10 @@
""" Superuser API. """
+
import logging
import os
import string
-import socket
+
+import pathvalidate
from datetime import datetime
from random import SystemRandom
@@ -11,26 +13,26 @@ from flask import request, make_response, jsonify
import features
-from app import app, avatar, superusers, authentication, config_provider
+from app import (app, avatar, superusers, authentication, config_provider, license_validator,
+ all_queues, log_archive, build_logs)
from auth import scopes
from auth.auth_context import get_authenticated_user
from auth.permissions import SuperUserPermission
-from data.database import ServiceKeyApprovalType
-from data.logs_model import logs_model
+from data.buildlogs import BuildStatusRetrievalError
from endpoints.api import (ApiResource, nickname, resource, validate_json_request,
internal_only, require_scope, show_if, parse_args,
- query_param, require_fresh_login, path_param, verify_not_prod,
- page_support, log_action, format_date, truthy_bool,
- InvalidRequest, NotFound, Unauthorized, InvalidResponse)
-from endpoints.api.build import get_logs_or_log_url
-from endpoints.api.superuser_models_pre_oci import (pre_oci_model, ServiceKeyDoesNotExist,
- ServiceKeyAlreadyApproved,
- InvalidRepositoryBuildException)
-from endpoints.api.logs import _validate_logs_arguments
-from util.request import get_request_ip
+ query_param, abort, require_fresh_login, path_param, verify_not_prod,
+ page_support, log_action, InvalidRequest)
+from endpoints.api.build import build_status_view, get_logs_or_log_url
+from endpoints.api.logs import get_logs, get_aggregate_logs
+from data import model
+from data.database import ServiceKeyApprovalType
+from endpoints.exception import NotFound
from util.useremails import send_confirmation_email, send_recovery_email
-from util.validation import validate_service_key_name
-from _init import ROOT_DIR
+from util.license import decode_license, LicenseDecodeError
+from util.security.ssl import load_certificate, CertInvalidException
+from util.config.validator import EXTRA_CA_DIRECTORY
+
logger = logging.getLogger(__name__)
@@ -45,11 +47,60 @@ def get_services():
return services
+@resource('/v1/superuser/systemlogs/')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserGetLogsForService(ApiResource):
+ """ Resource for fetching the kinds of system logs in the system. """
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('getSystemLogs')
+ @require_scope(scopes.SUPERUSER)
+ def get(self, service):
+ """ Returns the logs for the specific service. """
+ if SuperUserPermission().can():
+ if not service in get_services():
+ abort(404)
+
+ logs = []
+ try:
+ with open(app.config['SYSTEM_LOGS_FILE'], 'r') as f:
+ logs = [line for line in f if line.find(service + '[') >= 0]
+
+ except Exception:
+ logger.exception('Cannot read logs')
+ abort(400)
+
+ return {
+ 'logs': '\n'.join(logs)
+ }
+
+ abort(403)
+
+
+@resource('/v1/superuser/systemlogs/')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserSystemLogServices(ApiResource):
+ """ Resource for fetching the kinds of system logs in the system. """
+ @require_fresh_login
+ @verify_not_prod
+ @nickname('listSystemLogServices')
+ @require_scope(scopes.SUPERUSER)
+ def get(self):
+ """ List the system logs for the current system. """
+ if SuperUserPermission().can():
+ return {
+ 'services': list(get_services())
+ }
+
+ abort(403)
+
+
@resource('/v1/superuser/aggregatelogs')
@internal_only
class SuperUserAggregateLogs(ApiResource):
""" Resource for fetching aggregated logs for the current user. """
-
@require_fresh_login
@verify_not_prod
@nickname('listAllAggregateLogs')
@@ -59,23 +110,19 @@ class SuperUserAggregateLogs(ApiResource):
def get(self, parsed_args):
""" Returns the aggregated logs for the current system. """
if SuperUserPermission().can():
- (start_time, end_time) = _validate_logs_arguments(parsed_args['starttime'],
- parsed_args['endtime'])
- aggregated_logs = logs_model.get_aggregated_log_counts(start_time, end_time)
- return {
- 'aggregated': [log.to_dict() for log in aggregated_logs]
- }
+ start_time = parsed_args['starttime']
+ end_time = parsed_args['endtime']
- raise Unauthorized()
+ return get_aggregate_logs(start_time, end_time)
+
+ abort(403)
-LOGS_PER_PAGE = 20
@resource('/v1/superuser/logs')
@internal_only
@show_if(features.SUPER_USERS)
class SuperUserLogs(ApiResource):
""" Resource for fetching all logs in the system. """
-
@require_fresh_login
@verify_not_prod
@nickname('listAllLogs')
@@ -91,15 +138,9 @@ class SuperUserLogs(ApiResource):
start_time = parsed_args['starttime']
end_time = parsed_args['endtime']
- (start_time, end_time) = _validate_logs_arguments(start_time, end_time)
- log_entry_page = logs_model.lookup_logs(start_time, end_time, page_token=page_token)
- return {
- 'start_time': format_date(start_time),
- 'end_time': format_date(end_time),
- 'logs': [log.to_dict(avatar, include_namespace=True) for log in log_entry_page.logs],
- }, log_entry_page.next_page_token
+ return get_logs(start_time, end_time, page_token=page_token)
- raise Unauthorized()
+ abort(403)
def org_view(org):
@@ -109,7 +150,6 @@ def org_view(org):
'avatar': avatar.get_data_for_org(org),
}
-
def user_view(user, password=None):
user_data = {
'kind': 'user',
@@ -127,13 +167,11 @@ def user_view(user, password=None):
return user_data
-
@resource('/v1/superuser/changelog/')
@internal_only
@show_if(features.SUPER_USERS)
class ChangeLog(ApiResource):
""" Resource for returning the change log for enterprise customers. """
-
@require_fresh_login
@verify_not_prod
@nickname('getChangeLog')
@@ -141,12 +179,13 @@ class ChangeLog(ApiResource):
def get(self):
""" Returns the change log for this installation. """
if SuperUserPermission().can():
- with open(os.path.join(ROOT_DIR, 'CHANGELOG.md'), 'r') as f:
+ with open('CHANGELOG.md', 'r') as f:
return {
'log': f.read()
}
- raise Unauthorized()
+ abort(403)
+
@resource('/v1/superuser/organizations/')
@@ -154,7 +193,6 @@ class ChangeLog(ApiResource):
@show_if(features.SUPER_USERS)
class SuperUserOrganizationList(ApiResource):
""" Resource for listing organizations in the system. """
-
@require_fresh_login
@verify_not_prod
@nickname('listAllOrganizations')
@@ -162,14 +200,16 @@ class SuperUserOrganizationList(ApiResource):
def get(self):
""" Returns a list of all organizations in the system. """
if SuperUserPermission().can():
+ orgs = model.organization.get_organizations()
return {
- 'organizations': [org.to_dict() for org in pre_oci_model.get_organizations()]
+ 'organizations': [org_view(org) for org in orgs]
}
- raise Unauthorized()
+ abort(403)
@resource('/v1/superuser/users/')
+@internal_only
@show_if(features.SUPER_USERS)
class SuperUserList(ApiResource):
""" Resource for listing users in the system. """
@@ -195,19 +235,17 @@ class SuperUserList(ApiResource):
@require_fresh_login
@verify_not_prod
@nickname('listAllUsers')
- @parse_args()
- @query_param('disabled', 'If false, only enabled users will be returned.', type=truthy_bool,
- default=True)
@require_scope(scopes.SUPERUSER)
- def get(self, parsed_args):
+ def get(self):
""" Returns a list of all users in the system. """
if SuperUserPermission().can():
- users = pre_oci_model.get_active_users(disabled=parsed_args['disabled'])
+ users = model.user.get_active_users()
return {
- 'users': [user.to_dict() for user in users]
+ 'users': [user_view(user) for user in users]
}
- raise Unauthorized()
+ abort(403)
+
@require_fresh_login
@verify_not_prod
@@ -218,7 +256,7 @@ class SuperUserList(ApiResource):
""" Creates a new user. """
# Ensure that we are using database auth.
if app.config['AUTHENTICATION_TYPE'] != 'Database':
- raise InvalidRequest('Cannot create a user in a non-database auth system')
+ abort(400)
user_information = request.get_json()
if SuperUserPermission().can():
@@ -229,9 +267,14 @@ class SuperUserList(ApiResource):
# Create the user.
username = user_information['username']
email = user_information.get('email')
- install_user, confirmation_code = pre_oci_model.create_install_user(username, password, email)
+ prompts = model.user.get_default_user_prompts(features)
+ user = model.user.create_user(username, password, email, auto_verify=not features.MAILING,
+ email_required=features.MAILING, prompts=prompts)
+
+ # If mailing is turned on, send the user a verification email.
if features.MAILING:
- send_confirmation_email(install_user.username, install_user.email, confirmation_code)
+ confirmation = model.user.create_confirm_email_code(user)
+ send_confirmation_email(user.username, user.email, confirmation.code)
return {
'username': username,
@@ -240,7 +283,7 @@ class SuperUserList(ApiResource):
'encrypted_password': authentication.encrypt_user_password(password),
}
- raise Unauthorized()
+ abort(403)
@resource('/v1/superusers/users//sendrecovery')
@@ -249,7 +292,6 @@ class SuperUserList(ApiResource):
@show_if(features.MAILING)
class SuperUserSendRecoveryEmail(ApiResource):
""" Resource for sending a recovery user on behalf of a user. """
-
@require_fresh_login
@verify_not_prod
@nickname('sendInstallUserRecoveryEmail')
@@ -257,23 +299,23 @@ class SuperUserSendRecoveryEmail(ApiResource):
def post(self, username):
# Ensure that we are using database auth.
if app.config['AUTHENTICATION_TYPE'] != 'Database':
- raise InvalidRequest('Cannot send a recovery e-mail for non-database auth')
+ abort(400)
if SuperUserPermission().can():
- user = pre_oci_model.get_nonrobot_user(username)
- if user is None:
- raise NotFound()
+ user = model.user.get_nonrobot_user(username)
+ if not user:
+ abort(404)
if superusers.is_superuser(username):
- raise InvalidRequest('Cannot send a recovery email for a superuser')
+ abort(403)
- code = pre_oci_model.create_reset_password_email_code(user.email)
- send_recovery_email(user.email, code)
+ code = model.user.create_reset_password_email_code(user.email)
+ send_recovery_email(user.email, code.code)
return {
'email': user.email
}
- raise Unauthorized()
+ abort(403)
@resource('/v1/superuser/users/')
@@ -311,13 +353,13 @@ class SuperUserManagement(ApiResource):
def get(self, username):
""" Returns information about the specified user. """
if SuperUserPermission().can():
- user = pre_oci_model.get_nonrobot_user(username)
- if user is None:
- raise NotFound()
+ user = model.user.get_nonrobot_user(username)
+ if not user:
+ abort(404)
- return user.to_dict()
+ return user_view(user)
- raise Unauthorized()
+ abort(403)
@require_fresh_login
@verify_not_prod
@@ -326,17 +368,17 @@ class SuperUserManagement(ApiResource):
def delete(self, username):
""" Deletes the specified user. """
if SuperUserPermission().can():
- user = pre_oci_model.get_nonrobot_user(username)
- if user is None:
- raise NotFound()
+ user = model.user.get_nonrobot_user(username)
+ if not user:
+ abort(404)
if superusers.is_superuser(username):
- raise InvalidRequest('Cannot delete a superuser')
+ abort(403)
- pre_oci_model.mark_user_for_deletion(username)
+ model.user.delete_user(user, all_queues, force=True)
return '', 204
- raise Unauthorized()
+ abort(403)
@require_fresh_login
@verify_not_prod
@@ -346,31 +388,32 @@ class SuperUserManagement(ApiResource):
def put(self, username):
""" Updates information about the specified user. """
if SuperUserPermission().can():
- user = pre_oci_model.get_nonrobot_user(username)
- if user is None:
- raise NotFound()
+ user = model.user.get_nonrobot_user(username)
+ if not user:
+ abort(404)
if superusers.is_superuser(username):
- raise InvalidRequest('Cannot update a superuser')
+ abort(403)
user_data = request.get_json()
if 'password' in user_data:
# Ensure that we are using database auth.
if app.config['AUTHENTICATION_TYPE'] != 'Database':
- raise InvalidRequest('Cannot change password in non-database auth')
+ abort(400)
- pre_oci_model.change_password(username, user_data['password'])
+ model.user.change_password(user, user_data['password'])
if 'email' in user_data:
# Ensure that we are using database auth.
- if app.config['AUTHENTICATION_TYPE'] not in ['Database', 'AppToken']:
- raise InvalidRequest('Cannot change e-mail in non-database auth')
+ if app.config['AUTHENTICATION_TYPE'] != 'Database':
+ abort(400)
- pre_oci_model.update_email(username, user_data['email'], auto_verify=True)
+ model.user.update_email(user, user_data['email'], auto_verify=True)
if 'enabled' in user_data:
# Disable/enable the user.
- pre_oci_model.update_enabled(username, bool(user_data['enabled']))
+ user.enabled = bool(user_data['enabled'])
+ user.save()
if 'superuser' in user_data:
config_object = config_provider.get_config()
@@ -384,16 +427,9 @@ class SuperUserManagement(ApiResource):
config_object['SUPER_USERS'] = list(superusers_set)
config_provider.save_config(config_object)
- return_value = user.to_dict()
- if user_data.get('password') is not None:
- password = user_data.get('password')
- return_value['encrypted_password'] = authentication.encrypt_user_password(password)
- if user_data.get('email') is not None:
- return_value['email'] = user_data.get('email')
+ return user_view(user, password=user_data.get('password'))
- return return_value
-
- raise Unauthorized()
+ abort(403)
@resource('/v1/superuser/takeownership/')
@@ -402,7 +438,6 @@ class SuperUserManagement(ApiResource):
@show_if(features.SUPER_USERS)
class SuperUserTakeOwnership(ApiResource):
""" Resource for a superuser to take ownership of a namespace. """
-
@require_fresh_login
@verify_not_prod
@nickname('takeOwnership')
@@ -412,16 +447,25 @@ class SuperUserTakeOwnership(ApiResource):
if SuperUserPermission().can():
# Disallow for superusers.
if superusers.is_superuser(namespace):
- raise InvalidRequest('Cannot take ownership of a superuser')
+ abort(400)
+
+ entity = model.user.get_user_or_org(namespace)
+ if entity is None:
+ abort(404)
authed_user = get_authenticated_user()
- entity_id, was_user = pre_oci_model.take_ownership(namespace, authed_user)
- if entity_id is None:
- raise NotFound()
+ was_user = not entity.organization
+ if entity.organization:
+ # Add the superuser as an admin to the owners team of the org.
+ model.organization.add_user_as_admin(authed_user, entity)
+ else:
+ # If the entity is a user, convert it to an organization and add the current superuser
+ # as the admin.
+ model.organization.convert_user_to_organization(entity, get_authenticated_user())
# Log the change.
log_metadata = {
- 'entity_id': entity_id,
+ 'entity_id': entity.id,
'namespace': namespace,
'was_user': was_user,
'superuser': authed_user.username,
@@ -433,11 +477,12 @@ class SuperUserTakeOwnership(ApiResource):
'namespace': namespace
})
- raise Unauthorized()
+ abort(403)
@resource('/v1/superuser/organizations/')
@path_param('name', 'The name of the organizaton being managed')
+@internal_only
@show_if(features.SUPER_USERS)
class SuperUserOrganizationManagement(ApiResource):
""" Resource for managing organizations in the system. """
@@ -462,10 +507,12 @@ class SuperUserOrganizationManagement(ApiResource):
def delete(self, name):
""" Deletes the specified organization. """
if SuperUserPermission().can():
- pre_oci_model.mark_organization_for_deletion(name)
+ org = model.organization.get_organization(name)
+
+ model.user.delete_user(org, all_queues)
return '', 204
- raise Unauthorized()
+ abort(403)
@require_fresh_login
@verify_not_prod
@@ -475,12 +522,15 @@ class SuperUserOrganizationManagement(ApiResource):
def put(self, name):
""" Updates information about the specified user. """
if SuperUserPermission().can():
+ org = model.organization.get_organization(name)
org_data = request.get_json()
- old_name = org_data['name'] if 'name' in org_data else None
- org = pre_oci_model.change_organization_name(name, old_name)
- return org.to_dict()
- raise Unauthorized()
+ if 'name' in org_data:
+ org = model.user.change_username(org.id, org_data['name'])
+
+ return org_view(org)
+
+ abort(403)
def key_view(key):
@@ -546,13 +596,13 @@ class SuperUserServiceKeyManagement(ApiResource):
@require_scope(scopes.SUPERUSER)
def get(self):
if SuperUserPermission().can():
- keys = pre_oci_model.list_all_service_keys()
+ keys = model.service_keys.list_all_keys()
return jsonify({
- 'keys': [key.to_dict() for key in keys],
+ 'keys': [key_view(key) for key in keys],
})
- raise Unauthorized()
+ abort(403)
@require_fresh_login
@verify_not_prod
@@ -562,20 +612,17 @@ class SuperUserServiceKeyManagement(ApiResource):
def post(self):
if SuperUserPermission().can():
body = request.get_json()
- key_name = body.get('name', '')
- if not validate_service_key_name(key_name):
- raise InvalidRequest('Invalid service key friendly name: %s' % key_name)
# Ensure we have a valid expiration date if specified.
expiration_date = body.get('expiration', None)
if expiration_date is not None:
try:
expiration_date = datetime.utcfromtimestamp(float(expiration_date))
- except ValueError as ve:
- raise InvalidRequest('Invalid expiration date: %s' % ve)
+ except ValueError:
+ abort(400)
if expiration_date <= datetime.now():
- raise InvalidRequest('Expiration date cannot be in the past')
+ abort(400)
# Create the metadata for the key.
user = get_authenticated_user()
@@ -583,23 +630,23 @@ class SuperUserServiceKeyManagement(ApiResource):
metadata.update({
'created_by': 'Quay Superuser Panel',
'creator': user.username,
- 'ip': get_request_ip(),
+ 'ip': request.remote_addr,
})
# Generate a key with a private key that we *never save*.
- (private_key, key_id) = pre_oci_model.generate_service_key(body['service'], expiration_date,
- metadata=metadata,
- name=key_name)
+ (private_key, key) = model.service_keys.generate_service_key(body['service'], expiration_date,
+ metadata=metadata,
+ name=body.get('name', ''))
# Auto-approve the service key.
- pre_oci_model.approve_service_key(key_id, user, ServiceKeyApprovalType.SUPERUSER,
- notes=body.get('notes', ''))
+ model.service_keys.approve_service_key(key.kid, user, ServiceKeyApprovalType.SUPERUSER,
+ notes=body.get('notes', ''))
# Log the creation and auto-approval of the service key.
key_log_metadata = {
- 'kid': key_id,
+ 'kid': key.kid,
'preshared': True,
'service': body['service'],
- 'name': key_name,
+ 'name': body.get('name', ''),
'expiration_date': expiration_date,
'auto_approved': True,
}
@@ -608,14 +655,14 @@ class SuperUserServiceKeyManagement(ApiResource):
log_action('service_key_approve', None, key_log_metadata)
return jsonify({
- 'kid': key_id,
- 'name': key_name,
+ 'kid': key.kid,
+ 'name': body.get('name', ''),
'service': body['service'],
'public_key': private_key.publickey().exportKey('PEM'),
'private_key': private_key.exportKey('PEM'),
})
- raise Unauthorized()
+ abort(403)
@resource('/v1/superuser/keys/')
@@ -651,12 +698,12 @@ class SuperUserServiceKey(ApiResource):
def get(self, kid):
if SuperUserPermission().can():
try:
- key = pre_oci_model.get_service_key(kid, approved_only=False, alive_only=False)
- return jsonify(key.to_dict())
- except ServiceKeyDoesNotExist:
- raise NotFound()
+ key = model.service_keys.get_service_key(kid, approved_only=False, alive_only=False)
+ return jsonify(key_view(key))
+ except model.service_keys.ServiceKeyDoesNotExist:
+ abort(404)
- raise Unauthorized()
+ abort(403)
@require_fresh_login
@verify_not_prod
@@ -667,9 +714,9 @@ class SuperUserServiceKey(ApiResource):
if SuperUserPermission().can():
body = request.get_json()
try:
- key = pre_oci_model.get_service_key(kid, approved_only=False, alive_only=False)
- except ServiceKeyDoesNotExist:
- raise NotFound()
+ key = model.service_keys.get_service_key(kid, approved_only=False, alive_only=False)
+ except model.service_keys.ServiceKeyDoesNotExist:
+ abort(404)
key_log_metadata = {
'kid': key.kid,
@@ -683,11 +730,11 @@ class SuperUserServiceKey(ApiResource):
if expiration_date is not None and expiration_date != '':
try:
expiration_date = datetime.utcfromtimestamp(float(expiration_date))
- except ValueError as ve:
- raise InvalidRequest('Invalid expiration date: %s' % ve)
+ except ValueError:
+ abort(400)
if expiration_date <= datetime.now():
- raise InvalidRequest('Cannot have an expiration date in the past')
+ abort(400)
key_log_metadata.update({
'old_expiration_date': key.expiration_date,
@@ -695,20 +742,17 @@ class SuperUserServiceKey(ApiResource):
})
log_action('service_key_extend', None, key_log_metadata)
- pre_oci_model.set_key_expiration(kid, expiration_date)
+ model.service_keys.set_key_expiration(kid, expiration_date)
+
if 'name' in body or 'metadata' in body:
- key_name = body.get('name')
- if not validate_service_key_name(key_name):
- raise InvalidRequest('Invalid service key friendly name: %s' % key_name)
-
- pre_oci_model.update_service_key(kid, key_name, body.get('metadata'))
+ model.service_keys.update_service_key(kid, body.get('name'), body.get('metadata'))
log_action('service_key_modify', None, key_log_metadata)
- updated_key = pre_oci_model.get_service_key(kid, approved_only=False, alive_only=False)
- return jsonify(updated_key.to_dict())
+ updated_key = model.service_keys.get_service_key(kid, approved_only=False, alive_only=False)
+ return jsonify(key_view(updated_key))
- raise Unauthorized()
+ abort(403)
@require_fresh_login
@verify_not_prod
@@ -717,9 +761,9 @@ class SuperUserServiceKey(ApiResource):
def delete(self, kid):
if SuperUserPermission().can():
try:
- key = pre_oci_model.delete_service_key(kid)
- except ServiceKeyDoesNotExist:
- raise NotFound()
+ key = model.service_keys.delete_service_key(kid)
+ except model.service_keys.ServiceKeyDoesNotExist:
+ abort(404)
key_log_metadata = {
'kid': kid,
@@ -732,7 +776,7 @@ class SuperUserServiceKey(ApiResource):
log_action('service_key_delete', None, key_log_metadata)
return make_response('', 204)
- raise Unauthorized()
+ abort(403)
@resource('/v1/superuser/approvedkeys/')
@@ -765,8 +809,8 @@ class SuperUserServiceKeyApproval(ApiResource):
notes = request.get_json().get('notes', '')
approver = get_authenticated_user()
try:
- key = pre_oci_model.approve_service_key(kid, approver, ServiceKeyApprovalType.SUPERUSER,
- notes=notes)
+ key = model.service_keys.approve_service_key(kid, approver, ServiceKeyApprovalType.SUPERUSER,
+ notes=notes)
# Log the approval of the service key.
key_log_metadata = {
@@ -777,14 +821,168 @@ class SuperUserServiceKeyApproval(ApiResource):
}
log_action('service_key_approve', None, key_log_metadata)
- except ServiceKeyDoesNotExist:
- raise NotFound()
- except ServiceKeyAlreadyApproved:
+ except model.ServiceKeyDoesNotExist:
+ abort(404)
+ except model.ServiceKeyAlreadyApproved:
pass
return make_response('', 201)
- raise Unauthorized()
+ abort(403)
+
+
+@resource('/v1/superuser/customcerts')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserCustomCertificates(ApiResource):
+ """ Resource for managing custom certificates. """
+ @nickname('getCustomCertificates')
+ @require_fresh_login
+ @require_scope(scopes.SUPERUSER)
+ @verify_not_prod
+ def get(self):
+ if SuperUserPermission().can():
+ has_extra_certs_path = config_provider.volume_file_exists(EXTRA_CA_DIRECTORY)
+ extra_certs_found = config_provider.list_volume_directory(EXTRA_CA_DIRECTORY)
+ if extra_certs_found is None:
+ return {
+ 'status': 'file' if has_extra_certs_path else 'none',
+ }
+
+ cert_views = []
+ for extra_cert_path in extra_certs_found:
+ try:
+ cert_full_path = os.path.join(EXTRA_CA_DIRECTORY, extra_cert_path)
+ with config_provider.get_volume_file(cert_full_path) as f:
+ certificate = load_certificate(f.read())
+ cert_views.append({
+ 'path': extra_cert_path,
+ 'names': list(certificate.names),
+ 'expired': certificate.expired,
+ })
+ except CertInvalidException as cie:
+ cert_views.append({
+ 'path': extra_cert_path,
+ 'error': cie.message,
+ })
+ except IOError as ioe:
+ cert_views.append({
+ 'path': extra_cert_path,
+ 'error': ioe.message,
+ })
+
+ return {
+ 'status': 'directory',
+ 'certs': cert_views,
+ }
+
+ abort(403)
+
+
+@resource('/v1/superuser/customcerts/')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserCustomCertificate(ApiResource):
+ """ Resource for managing a custom certificate. """
+ @nickname('uploadCustomCertificate')
+ @require_fresh_login
+ @require_scope(scopes.SUPERUSER)
+ @verify_not_prod
+ def post(self, certpath):
+ if SuperUserPermission().can():
+ uploaded_file = request.files['file']
+ if not uploaded_file:
+ abort(400)
+
+ certpath = pathvalidate.sanitize_filename(certpath)
+ cert_full_path = os.path.join(EXTRA_CA_DIRECTORY, certpath)
+ config_provider.save_volume_file(cert_full_path, uploaded_file)
+ return '', 204
+
+ abort(403)
+
+ @nickname('deleteCustomCertificate')
+ @require_fresh_login
+ @require_scope(scopes.SUPERUSER)
+ @verify_not_prod
+ def delete(self, certpath):
+ if SuperUserPermission().can():
+ cert_full_path = os.path.join(EXTRA_CA_DIRECTORY, certpath)
+ config_provider.remove_volume_file(cert_full_path)
+ return '', 204
+
+ abort(403)
+
+
+@resource('/v1/superuser/license')
+@internal_only
+@show_if(features.SUPER_USERS)
+class SuperUserLicense(ApiResource):
+ """ Resource for getting and setting a license. """
+ schemas = {
+ 'UpdateLicense': {
+ 'type': 'object',
+ 'description': 'Updates a license',
+ 'required': [
+ 'license',
+ ],
+ 'properties': {
+ 'license': {
+ 'type': 'string'
+ },
+ },
+ },
+ }
+
+ @nickname('getLicense')
+ @require_fresh_login
+ @require_scope(scopes.SUPERUSER)
+ @verify_not_prod
+ def get(self):
+ """ Returns the current decoded license. """
+ if SuperUserPermission().can():
+ try:
+ decoded_license = config_provider.get_license()
+ except LicenseDecodeError as le:
+ raise InvalidRequest(le.message)
+
+ statuses = decoded_license.validate(app.config)
+ all_met = all(status.is_met() for status in statuses)
+
+ return {
+ 'status': [status.as_dict(for_private=True) for status in statuses],
+ 'success': all_met,
+ }
+
+ abort(403)
+
+ @nickname('updateLicense')
+ @require_fresh_login
+ @require_scope(scopes.SUPERUSER)
+ @verify_not_prod
+ @validate_json_request('UpdateLicense')
+ def put(self):
+ """ Validates the given license contents and then saves it to the config volume. """
+ if SuperUserPermission().can():
+ license_contents = request.get_json()['license']
+ try:
+ decoded_license = decode_license(license_contents)
+ except LicenseDecodeError as le:
+ raise InvalidRequest(le.message)
+
+ statuses = decoded_license.validate(app.config)
+ all_met = all(status.is_met() for status in statuses)
+ if all_met:
+ # Save the license and update the license check thread.
+ config_provider.save_license(license_contents)
+ license_validator.compute_license_sufficiency()
+
+ return {
+ 'status': [status.as_dict(for_private=True) for status in statuses],
+ 'success': all_met,
+ }
+
+ abort(403)
@resource('/v1/superuser//logs')
@@ -792,21 +990,16 @@ class SuperUserServiceKeyApproval(ApiResource):
@show_if(features.SUPER_USERS)
class SuperUserRepositoryBuildLogs(ApiResource):
""" Resource for loading repository build logs for the superuser. """
-
@require_fresh_login
@verify_not_prod
@nickname('getRepoBuildLogsSuperUser')
@require_scope(scopes.SUPERUSER)
def get(self, build_uuid):
""" Return the build logs for the build specified by the build uuid. """
- if SuperUserPermission().can():
- try:
- repo_build = pre_oci_model.get_repository_build(build_uuid)
- return get_logs_or_log_url(repo_build)
- except InvalidRepositoryBuildException as e:
- raise InvalidResponse(str(e))
+ if not SuperUserPermission().can():
+ abort(403)
- raise Unauthorized()
+ return get_logs_or_log_url(model.build.get_repository_build(build_uuid))
@resource('/v1/superuser//status')
@@ -815,21 +1008,16 @@ class SuperUserRepositoryBuildLogs(ApiResource):
@show_if(features.SUPER_USERS)
class SuperUserRepositoryBuildStatus(ApiResource):
""" Resource for dealing with repository build status. """
-
@require_fresh_login
@verify_not_prod
@nickname('getRepoBuildStatusSuperUser')
@require_scope(scopes.SUPERUSER)
def get(self, build_uuid):
""" Return the status for the builds specified by the build uuids. """
- if SuperUserPermission().can():
- try:
- build = pre_oci_model.get_repository_build(build_uuid)
- except InvalidRepositoryBuildException as e:
- raise InvalidResponse(str(e))
- return build.to_dict()
-
- raise Unauthorized()
+ if not SuperUserPermission().can():
+ abort(403)
+ build = model.build.get_repository_build(build_uuid)
+ return build_status_view(build)
@resource('/v1/superuser//build')
@@ -838,19 +1026,18 @@ class SuperUserRepositoryBuildStatus(ApiResource):
@show_if(features.SUPER_USERS)
class SuperUserRepositoryBuildResource(ApiResource):
""" Resource for dealing with repository builds as a super user. """
-
@require_fresh_login
@verify_not_prod
@nickname('getRepoBuildSuperUser')
@require_scope(scopes.SUPERUSER)
def get(self, build_uuid):
""" Returns information about a build. """
- if SuperUserPermission().can():
- try:
- build = pre_oci_model.get_repository_build(build_uuid)
- except InvalidRepositoryBuildException:
- raise NotFound()
+ if not SuperUserPermission().can():
+ abort(403)
- return build.to_dict()
+ try:
+ build = model.build.get_repository_build(build_uuid)
+ except model.build.InvalidRepositoryBuildException:
+ raise NotFound()
- raise Unauthorized()
+ return build_status_view(build)
diff --git a/endpoints/api/superuser_models_interface.py b/endpoints/api/superuser_models_interface.py
deleted file mode 100644
index e03d98e8c..000000000
--- a/endpoints/api/superuser_models_interface.py
+++ /dev/null
@@ -1,335 +0,0 @@
-import json
-from abc import ABCMeta, abstractmethod
-from collections import namedtuple
-from datetime import datetime
-
-from dateutil.relativedelta import relativedelta
-from six import add_metaclass
-from tzlocal import get_localzone
-
-from app import avatar, superusers
-from buildtrigger.basehandler import BuildTriggerHandler
-from data import model
-from endpoints.api import format_date
-from util.morecollections import AttrDict
-
-
-def user_view(user):
- return {
- 'name': user.username,
- 'kind': 'user',
- 'is_robot': user.robot,
- }
-
-
-class BuildTrigger(
- namedtuple('BuildTrigger', ['uuid', 'service_name', 'pull_robot', 'can_read', 'can_admin', 'for_build'])):
- """
- BuildTrigger represent a trigger that is associated with a build
- :type uuid: string
- :type service_name: string
- :type pull_robot: User
- :type can_read: boolean
- :type can_admin: boolean
- :type for_build: boolean
- """
-
- def to_dict(self):
- if not self.uuid:
- return None
-
- build_trigger = BuildTriggerHandler.get_handler(self)
- build_source = build_trigger.config.get('build_source')
-
- repo_url = build_trigger.get_repository_url() if build_source else None
- can_read = self.can_read or self.can_admin
-
- trigger_data = {
- 'id': self.uuid,
- 'service': self.service_name,
- 'is_active': build_trigger.is_active(),
-
- 'build_source': build_source if can_read else None,
- 'repository_url': repo_url if can_read else None,
-
- 'config': build_trigger.config if self.can_admin else {},
- 'can_invoke': self.can_admin,
- }
-
- if not self.for_build and self.can_admin and self.pull_robot:
- trigger_data['pull_robot'] = user_view(self.pull_robot)
-
- return trigger_data
-
-
-class RepositoryBuild(namedtuple('RepositoryBuild',
- ['uuid', 'logs_archived', 'repository_namespace_user_username', 'repository_name',
- 'can_write', 'can_read', 'pull_robot', 'resource_key', 'trigger', 'display_name',
- 'started', 'job_config', 'phase', 'status', 'error', 'archive_url'])):
- """
- RepositoryBuild represents a build associated with a repostiory
- :type uuid: string
- :type logs_archived: boolean
- :type repository_namespace_user_username: string
- :type repository_name: string
- :type can_write: boolean
- :type can_write: boolean
- :type pull_robot: User
- :type resource_key: string
- :type trigger: Trigger
- :type display_name: string
- :type started: boolean
- :type job_config: {Any -> Any}
- :type phase: string
- :type status: string
- :type error: string
- :type archive_url: string
- """
-
- def to_dict(self):
-
- resp = {
- 'id': self.uuid,
- 'phase': self.phase,
- 'started': format_date(self.started),
- 'display_name': self.display_name,
- 'status': self.status or {},
- 'subdirectory': self.job_config.get('build_subdir', ''),
- 'dockerfile_path': self.job_config.get('build_subdir', ''),
- 'context': self.job_config.get('context', ''),
- 'tags': self.job_config.get('docker_tags', []),
- 'manual_user': self.job_config.get('manual_user', None),
- 'is_writer': self.can_write,
- 'trigger': self.trigger.to_dict(),
- 'trigger_metadata': self.job_config.get('trigger_metadata', None) if self.can_read else None,
- 'resource_key': self.resource_key,
- 'pull_robot': user_view(self.pull_robot) if self.pull_robot else None,
- 'repository': {
- 'namespace': self.repository_namespace_user_username,
- 'name': self.repository_name
- },
- 'error': self.error,
- }
-
- if self.can_write:
- if self.resource_key is not None:
- resp['archive_url'] = self.archive_url
- elif self.job_config.get('archive_url', None):
- resp['archive_url'] = self.job_config['archive_url']
-
- return resp
-
-
-class Approval(namedtuple('Approval', ['approver', 'approval_type', 'approved_date', 'notes'])):
- """
- Approval represents whether a key has been approved or not
- :type approver: User
- :type approval_type: string
- :type approved_date: Date
- :type notes: string
- """
-
- def to_dict(self):
- return {
- 'approver': self.approver.to_dict() if self.approver else None,
- 'approval_type': self.approval_type,
- 'approved_date': self.approved_date,
- 'notes': self.notes,
- }
-
-
-class ServiceKey(namedtuple('ServiceKey', ['name', 'kid', 'service', 'jwk', 'metadata', 'created_date',
- 'expiration_date', 'rotation_duration', 'approval'])):
- """
- ServiceKey is an apostille signing key
- :type name: string
- :type kid: int
- :type service: string
- :type jwk: string
- :type metadata: string
- :type created_date: Date
- :type expiration_date: Date
- :type rotation_duration: Date
- :type approval: Approval
-
- """
-
- def to_dict(self):
- return {
- 'name': self.name,
- 'kid': self.kid,
- 'service': self.service,
- 'jwk': self.jwk,
- 'metadata': self.metadata,
- 'created_date': self.created_date,
- 'expiration_date': self.expiration_date,
- 'rotation_duration': self.rotation_duration,
- 'approval': self.approval.to_dict() if self.approval is not None else None,
- }
-
-
-class User(namedtuple('User', ['username', 'email', 'verified', 'enabled', 'robot'])):
- """
- User represents a single user.
- :type username: string
- :type email: string
- :type verified: boolean
- :type enabled: boolean
- :type robot: User
- """
-
- def to_dict(self):
- user_data = {
- 'kind': 'user',
- 'name': self.username,
- 'username': self.username,
- 'email': self.email,
- 'verified': self.verified,
- 'avatar': avatar.get_data_for_user(self),
- 'super_user': superusers.is_superuser(self.username),
- 'enabled': self.enabled,
- }
-
- return user_data
-
-
-class Organization(namedtuple('Organization', ['username', 'email'])):
- """
- Organization represents a single org.
- :type username: string
- :type email: string
- """
-
- def to_dict(self):
- return {
- 'name': self.username,
- 'email': self.email,
- 'avatar': avatar.get_data_for_org(self),
- }
-
-
-@add_metaclass(ABCMeta)
-class SuperuserDataInterface(object):
- """
- Interface that represents all data store interactions required by a superuser api.
- """
-
- @abstractmethod
- def get_organizations(self):
- """
- Returns a list of Organization
- """
-
- @abstractmethod
- def get_active_users(self):
- """
- Returns a list of User
- """
-
- @abstractmethod
- def create_install_user(self, username, password, email):
- """
- Returns the created user and confirmation code for email confirmation
- """
-
- @abstractmethod
- def get_nonrobot_user(self, username):
- """
- Returns a User
- """
-
- @abstractmethod
- def create_reset_password_email_code(self, email):
- """
- Returns a recover password code
- """
-
- @abstractmethod
- def mark_user_for_deletion(self, username):
- """
- Returns None
- """
-
- @abstractmethod
- def change_password(self, username, password):
- """
- Returns None
- """
-
- @abstractmethod
- def update_email(self, username, email, auto_verify):
- """
- Returns None
- """
-
- @abstractmethod
- def update_enabled(self, username, enabled):
- """
- Returns None
- """
-
- @abstractmethod
- def take_ownership(self, namespace, authed_user):
- """
- Returns id of entity and whether the entity was a user
- """
-
- @abstractmethod
- def mark_organization_for_deletion(self, name):
- """
- Returns None
- """
-
- @abstractmethod
- def change_organization_name(self, old_org_name, new_org_name):
- """
- Returns updated Organization
- """
-
- @abstractmethod
- def list_all_service_keys(self):
- """
- Returns a list of service keys
- """
-
- @abstractmethod
- def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None):
- """
- Returns a tuple of private key and public key id
- """
-
- @abstractmethod
- def approve_service_key(self, kid, approver, approval_type, notes=''):
- """
- Returns the approved Key
- """
-
- @abstractmethod
- def get_service_key(self, kid, service=None, alive_only=True, approved_only=True):
- """
- Returns ServiceKey
- """
-
- @abstractmethod
- def set_key_expiration(self, kid, expiration_date):
- """
- Returns None
- """
-
- @abstractmethod
- def update_service_key(self, kid, name=None, metadata=None):
- """
- Returns None
- """
-
- @abstractmethod
- def delete_service_key(self, kid):
- """
- Returns deleted ServiceKey
- """
-
- @abstractmethod
- def get_repository_build(self, uuid):
- """
- Returns RepositoryBuild
- """
diff --git a/endpoints/api/superuser_models_pre_oci.py b/endpoints/api/superuser_models_pre_oci.py
deleted file mode 100644
index 0458f9226..000000000
--- a/endpoints/api/superuser_models_pre_oci.py
+++ /dev/null
@@ -1,182 +0,0 @@
-import features
-
-from flask import request
-
-from app import all_queues, userfiles, namespace_gc_queue
-from auth.permissions import ReadRepositoryPermission, ModifyRepositoryPermission, AdministerRepositoryPermission
-from data import model, database
-from endpoints.api.build import get_job_config, _get_build_status
-from endpoints.api.superuser_models_interface import BuildTrigger
-from endpoints.api.superuser_models_interface import SuperuserDataInterface, Organization, User, \
- ServiceKey, Approval, RepositoryBuild
-from util.request import get_request_ip
-
-
-def _create_user(user):
- if user is None:
- return None
- return User(user.username, user.email, user.verified, user.enabled, user.robot)
-
-
-def _create_key(key):
- approval = None
- if key.approval is not None:
- approval = Approval(_create_user(key.approval.approver), key.approval.approval_type, key.approval.approved_date,
- key.approval.notes)
-
- return ServiceKey(key.name, key.kid, key.service, key.jwk, key.metadata, key.created_date, key.expiration_date,
- key.rotation_duration, approval)
-
-
-class ServiceKeyDoesNotExist(Exception):
- pass
-
-
-class ServiceKeyAlreadyApproved(Exception):
- pass
-
-
-class InvalidRepositoryBuildException(Exception):
- pass
-
-
-class PreOCIModel(SuperuserDataInterface):
- """
- PreOCIModel implements the data model for the SuperUser using a database schema
- before it was changed to support the OCI specification.
- """
-
- def get_repository_build(self, uuid):
- try:
- build = model.build.get_repository_build(uuid)
- except model.InvalidRepositoryBuildException as e:
- raise InvalidRepositoryBuildException(str(e))
-
- repo_namespace = build.repository_namespace_user_username
- repo_name = build.repository_name
-
- can_read = ReadRepositoryPermission(repo_namespace, repo_name).can()
- can_write = ModifyRepositoryPermission(repo_namespace, repo_name).can()
- can_admin = AdministerRepositoryPermission(repo_namespace, repo_name).can()
- job_config = get_job_config(build.job_config)
- phase, status, error = _get_build_status(build)
- url = userfiles.get_file_url(self.resource_key, get_request_ip(), requires_cors=True)
-
- return RepositoryBuild(build.uuid, build.logs_archived, repo_namespace, repo_name, can_write, can_read,
- _create_user(build.pull_robot), build.resource_key,
- BuildTrigger(build.trigger.uuid, build.trigger.service.name,
- _create_user(build.trigger.pull_robot), can_read, can_admin, True),
- build.display_name, build.display_name, build.started, job_config, phase, status, error, url)
-
- def delete_service_key(self, kid):
- try:
- key = model.service_keys.delete_service_key(kid)
- except model.ServiceKeyDoesNotExist:
- raise ServiceKeyDoesNotExist
- return _create_key(key)
-
- def update_service_key(self, kid, name=None, metadata=None):
- model.service_keys.update_service_key(kid, name, metadata)
-
- def set_key_expiration(self, kid, expiration_date):
- model.service_keys.set_key_expiration(kid, expiration_date)
-
- def get_service_key(self, kid, service=None, alive_only=True, approved_only=True):
- try:
- key = model.service_keys.get_service_key(kid, approved_only=approved_only, alive_only=alive_only)
- return _create_key(key)
- except model.ServiceKeyDoesNotExist:
- raise ServiceKeyDoesNotExist
-
- def approve_service_key(self, kid, approver, approval_type, notes=''):
- try:
- key = model.service_keys.approve_service_key(kid, approval_type, approver=approver, notes=notes)
- return _create_key(key)
- except model.ServiceKeyDoesNotExist:
- raise ServiceKeyDoesNotExist
- except model.ServiceKeyAlreadyApproved:
- raise ServiceKeyAlreadyApproved
-
- def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None):
- (private_key, key) = model.service_keys.generate_service_key(service, expiration_date, metadata=metadata, name=name)
-
- return private_key, key.kid
-
- def list_all_service_keys(self):
- keys = model.service_keys.list_all_keys()
- return [_create_key(key) for key in keys]
-
- def change_organization_name(self, old_org_name, new_org_name):
- org = model.organization.get_organization(old_org_name)
- if new_org_name is not None:
- org = model.user.change_username(org.id, new_org_name)
-
- return Organization(org.username, org.email)
-
- def mark_organization_for_deletion(self, name):
- org = model.organization.get_organization(name)
- model.user.mark_namespace_for_deletion(org, all_queues, namespace_gc_queue, force=True)
-
- def take_ownership(self, namespace, authed_user):
- entity = model.user.get_user_or_org(namespace)
- if entity is None:
- return None, False
-
- was_user = not entity.organization
- if entity.organization:
- # Add the superuser as an admin to the owners team of the org.
- model.organization.add_user_as_admin(authed_user, entity)
- else:
- # If the entity is a user, convert it to an organization and add the current superuser
- # as the admin.
- model.organization.convert_user_to_organization(entity, authed_user)
- return entity.id, was_user
-
- def update_enabled(self, username, enabled):
- user = model.user.get_nonrobot_user(username)
- model.user.update_enabled(user, bool(enabled))
-
- def update_email(self, username, email, auto_verify):
- user = model.user.get_nonrobot_user(username)
- model.user.update_email(user, email, auto_verify)
-
- def change_password(self, username, password):
- user = model.user.get_nonrobot_user(username)
- model.user.change_password(user, password)
-
- def mark_user_for_deletion(self, username):
- user = model.user.get_nonrobot_user(username)
- model.user.mark_namespace_for_deletion(user, all_queues, namespace_gc_queue, force=True)
-
- def create_reset_password_email_code(self, email):
- code = model.user.create_reset_password_email_code(email)
- return code
-
- def get_nonrobot_user(self, username):
- user = model.user.get_nonrobot_user(username)
- if user is None:
- return None
- return _create_user(user)
-
- def create_install_user(self, username, password, email):
- prompts = model.user.get_default_user_prompts(features)
- user = model.user.create_user(username, password, email, auto_verify=not features.MAILING,
- email_required=features.MAILING, prompts=prompts)
-
- return_user = _create_user(user)
- # If mailing is turned on, send the user a verification email.
- if features.MAILING:
- confirmation_code = model.user.create_confirm_email_code(user)
- return return_user, confirmation_code
-
- return return_user, ''
-
- def get_active_users(self, disabled=True):
- users = model.user.get_active_users(disabled=disabled)
- return [_create_user(user) for user in users]
-
- def get_organizations(self):
- return [Organization(org.username, org.email) for org in model.organization.get_organizations()]
-
-
-pre_oci_model = PreOCIModel()
diff --git a/endpoints/api/tag.py b/endpoints/api/tag.py
index 573f0fc97..51ace9d9c 100644
--- a/endpoints/api/tag.py
+++ b/endpoints/api/tag.py
@@ -1,87 +1,59 @@
""" Manage the tags of a repository. """
-from datetime import datetime
+
from flask import request, abort
-from app import storage, docker_v2_signing_key
-from auth.auth_context import get_authenticated_user
-from data.registry_model import registry_model
from endpoints.api import (resource, nickname, require_repo_read, require_repo_write,
- RepositoryParamResource, log_action, validate_json_request, path_param,
- parse_args, query_param, truthy_bool, disallow_for_app_repositories,
- format_date, disallow_for_non_normal_repositories)
-from endpoints.api.image import image_dict
-from endpoints.exception import NotFound, InvalidRequest
+ RepositoryParamResource, log_action, validate_json_request,
+ path_param, parse_args, query_param, truthy_bool)
+from endpoints.exception import NotFound
+from endpoints.api.image import image_view
+from data import model
+from auth.auth_context import get_authenticated_user
from util.names import TAG_ERROR, TAG_REGEX
-def _tag_dict(tag):
- tag_info = {
- 'name': tag.name,
- 'reversion': tag.reversion,
- }
-
- if tag.lifetime_start_ts > 0:
- tag_info['start_ts'] = tag.lifetime_start_ts
-
- if tag.lifetime_end_ts > 0:
- tag_info['end_ts'] = tag.lifetime_end_ts
-
- # TODO: Remove this once fully on OCI data model.
- if tag.legacy_image_if_present:
- tag_info['docker_image_id'] = tag.legacy_image.docker_image_id
- tag_info['image_id'] = tag.legacy_image.docker_image_id
- tag_info['size'] = tag.legacy_image.aggregate_size
-
- # TODO: Remove this check once fully on OCI data model.
- if tag.manifest_digest:
- tag_info['manifest_digest'] = tag.manifest_digest
-
- if tag.manifest:
- tag_info['is_manifest_list'] = tag.manifest.is_manifest_list
-
- if tag.lifetime_start_ts > 0:
- last_modified = format_date(datetime.utcfromtimestamp(tag.lifetime_start_ts))
- tag_info['last_modified'] = last_modified
-
- if tag.lifetime_end_ts is not None:
- expiration = format_date(datetime.utcfromtimestamp(tag.lifetime_end_ts))
- tag_info['expiration'] = expiration
-
- return tag_info
-
-
@resource('/v1/repository//tag/')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
class ListRepositoryTags(RepositoryParamResource):
""" Resource for listing full repository tag history, alive *and dead*. """
@require_repo_read
- @disallow_for_app_repositories
@parse_args()
@query_param('specificTag', 'Filters the tags to the specific tag.', type=str, default='')
- @query_param('limit', 'Limit to the number of results to return per page. Max 100.', type=int,
- default=50)
+ @query_param('limit', 'Limit to the number of results to return per page. Max 100.', type=int, default=50)
@query_param('page', 'Page index for the results. Default 1.', type=int, default=1)
- @query_param('onlyActiveTags', 'Filter to only active tags.', type=truthy_bool, default=False)
@nickname('listRepoTags')
def get(self, namespace, repository, parsed_args):
- specific_tag = parsed_args.get('specificTag') or None
- page = max(1, parsed_args.get('page', 1))
- limit = min(100, max(1, parsed_args.get('limit', 50)))
- active_tags_only = parsed_args.get('onlyActiveTags')
-
- repo_ref = registry_model.lookup_repository(namespace, repository)
- if repo_ref is None:
+ repo = model.repository.get_repository(namespace, repository)
+ if not repo:
raise NotFound()
- history, has_more = registry_model.list_repository_tag_history(repo_ref, page=page,
- size=limit,
- specific_tag_name=specific_tag,
- active_tags_only=active_tags_only)
+ def tag_view(tag):
+ tag_info = {
+ 'name': tag.name,
+ 'docker_image_id': tag.image.docker_image_id,
+ 'reversion': tag.reversion,
+ }
+
+ if tag.lifetime_start_ts > 0:
+ tag_info['start_ts'] = tag.lifetime_start_ts
+
+ if tag.lifetime_end_ts > 0:
+ tag_info['end_ts'] = tag.lifetime_end_ts
+
+ return tag_info
+
+ specific_tag = parsed_args.get('specificTag') or None
+
+ page = max(1, parsed_args.get('page', 1))
+ limit = min(100, max(1, parsed_args.get('limit', 50)))
+ tags, has_additional = model.tag.list_repository_tag_history(repo, page=page, size=limit,
+ specific_tag=specific_tag)
+
return {
- 'tags': [_tag_dict(tag) for tag in history],
+ 'tags': [tag_view(tag) for tag in tags],
'page': page,
- 'has_additional': has_more,
+ 'has_additional': has_additional,
}
@@ -91,134 +63,64 @@ class ListRepositoryTags(RepositoryParamResource):
class RepositoryTag(RepositoryParamResource):
""" Resource for managing repository tags. """
schemas = {
- 'ChangeTag': {
+ 'MoveTag': {
'type': 'object',
- 'description': 'Makes changes to a specific tag',
+ 'description': 'Description of to which image a new or existing tag should point',
+ 'required': [
+ 'image',
+ ],
'properties': {
'image': {
- 'type': ['string', 'null'],
- 'description': '(Deprecated: Use `manifest_digest`) Image to which the tag should point.',
- },
- 'manifest_digest': {
- 'type': ['string', 'null'],
- 'description': '(If specified) The manifest digest to which the tag should point',
- },
- 'expiration': {
- 'type': ['number', 'null'],
- 'description': '(If specified) The expiration for the image',
+ 'type': 'string',
+ 'description': 'Image identifier to which the tag should point',
},
},
},
}
@require_repo_write
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
- @nickname('changeTag')
- @validate_json_request('ChangeTag')
+ @nickname('changeTagImage')
+ @validate_json_request('MoveTag')
def put(self, namespace, repository, tag):
""" Change which image a tag points to or create a new tag."""
+
if not TAG_REGEX.match(tag):
abort(400, TAG_ERROR)
- repo_ref = registry_model.lookup_repository(namespace, repository)
- if repo_ref is None:
+ image_id = request.get_json()['image']
+ image = model.image.get_repo_image(namespace, repository, image_id)
+ if not image:
raise NotFound()
- if 'expiration' in request.get_json():
- tag_ref = registry_model.get_repo_tag(repo_ref, tag)
- if tag_ref is None:
- raise NotFound()
+ original_image_id = None
+ try:
+ original_tag_image = model.tag.get_repo_tag_image(image.repository, tag)
+ if original_tag_image:
+ original_image_id = original_tag_image.docker_image_id
+ except model.DataModelException:
+ # This is a new tag.
+ pass
- expiration = request.get_json().get('expiration')
- expiration_date = None
- if expiration is not None:
- try:
- expiration_date = datetime.utcfromtimestamp(float(expiration))
- except ValueError:
- abort(400)
+ model.tag.create_or_update_tag(namespace, repository, tag, image_id)
- if expiration_date <= datetime.now():
- abort(400)
-
- existing_end_ts, ok = registry_model.change_repository_tag_expiration(tag_ref,
- expiration_date)
- if ok:
- if not (existing_end_ts is None and expiration_date is None):
- log_action('change_tag_expiration', namespace, {
- 'username': get_authenticated_user().username,
- 'repo': repository,
- 'tag': tag,
- 'namespace': namespace,
- 'expiration_date': expiration_date,
- 'old_expiration_date': existing_end_ts
- }, repo_name=repository)
- else:
- raise InvalidRequest('Could not update tag expiration; Tag has probably changed')
-
- if 'image' in request.get_json() or 'manifest_digest' in request.get_json():
- existing_tag = registry_model.get_repo_tag(repo_ref, tag, include_legacy_image=True)
-
- manifest_or_image = None
- image_id = None
- manifest_digest = None
-
- if 'image' in request.get_json():
- image_id = request.get_json()['image']
- manifest_or_image = registry_model.get_legacy_image(repo_ref, image_id)
- else:
- manifest_digest = request.get_json()['manifest_digest']
- manifest_or_image = registry_model.lookup_manifest_by_digest(repo_ref, manifest_digest,
- require_available=True)
-
- if manifest_or_image is None:
- raise NotFound()
-
- # TODO: Remove this check once fully on V22
- existing_manifest_digest = None
- if existing_tag:
- existing_manifest = registry_model.get_manifest_for_tag(existing_tag)
- existing_manifest_digest = existing_manifest.digest if existing_manifest else None
-
- if not registry_model.retarget_tag(repo_ref, tag, manifest_or_image, storage,
- docker_v2_signing_key):
- raise InvalidRequest('Could not move tag')
-
- username = get_authenticated_user().username
-
- log_action('move_tag' if existing_tag else 'create_tag', namespace, {
- 'username': username,
- 'repo': repository,
- 'tag': tag,
- 'namespace': namespace,
- 'image': image_id,
- 'manifest_digest': manifest_digest,
- 'original_image': (existing_tag.legacy_image.docker_image_id
- if existing_tag and existing_tag.legacy_image_if_present
- else None),
- 'original_manifest_digest': existing_manifest_digest,
- }, repo_name=repository)
+ username = get_authenticated_user().username
+ log_action('move_tag' if original_image_id else 'create_tag', namespace,
+ {'username': username, 'repo': repository, 'tag': tag,
+ 'image': image_id, 'original_image': original_image_id},
+ repo=model.repository.get_repository(namespace, repository))
return 'Updated', 201
@require_repo_write
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
@nickname('deleteFullTag')
def delete(self, namespace, repository, tag):
""" Delete the specified repository tag. """
- repo_ref = registry_model.lookup_repository(namespace, repository)
- if repo_ref is None:
- raise NotFound()
-
- registry_model.delete_tag(repo_ref, tag)
+ model.tag.delete_tag(namespace, repository, tag)
username = get_authenticated_user().username
log_action('delete_tag', namespace,
- {'username': username,
- 'repo': repository,
- 'namespace': namespace,
- 'tag': tag}, repo_name=repository)
+ {'username': username, 'repo': repository, 'tag': tag},
+ repo=model.repository.get_repository(namespace, repository))
return '', 204
@@ -228,109 +130,95 @@ class RepositoryTag(RepositoryParamResource):
@path_param('tag', 'The name of the tag')
class RepositoryTagImages(RepositoryParamResource):
""" Resource for listing the images in a specific repository tag. """
-
@require_repo_read
@nickname('listTagImages')
- @disallow_for_app_repositories
@parse_args()
@query_param('owned', 'If specified, only images wholely owned by this tag are returned.',
type=truthy_bool, default=False)
def get(self, namespace, repository, tag, parsed_args):
""" List the images for the specified repository tag. """
- repo_ref = registry_model.lookup_repository(namespace, repository)
- if repo_ref is None:
+ try:
+ tag_image = model.tag.get_tag_image(namespace, repository, tag)
+ except model.DataModelException:
raise NotFound()
- tag_ref = registry_model.get_repo_tag(repo_ref, tag, include_legacy_image=True)
- if tag_ref is None:
- raise NotFound()
+ parent_images = model.image.get_parent_images(namespace, repository, tag_image)
+ image_map = {}
- if tag_ref.legacy_image_if_present is None:
- return {'images': []}
+ image_map[str(tag_image.id)] = tag_image
- image_id = tag_ref.legacy_image.docker_image_id
+ for image in parent_images:
+ image_map[str(image.id)] = image
- all_images = None
+ image_map_all = dict(image_map)
+ all_images = [tag_image] + list(parent_images)
+
+ # Filter the images returned to those not found in the ancestry of any of the other tags in
+ # the repository.
if parsed_args['owned']:
- # TODO: Remove the `owned` image concept once we are fully on V2_2.
- all_images = registry_model.get_legacy_images_owned_by_tag(tag_ref)
- else:
- image_with_parents = registry_model.get_legacy_image(repo_ref, image_id, include_parents=True)
- if image_with_parents is None:
- raise NotFound()
+ all_tags = model.tag.list_repository_tags(namespace, repository)
+ for current_tag in all_tags:
+ if current_tag.name == tag:
+ continue
- all_images = [image_with_parents] + image_with_parents.parents
+ # Remove the tag's image ID.
+ tag_image_id = str(current_tag.image_id)
+ image_map.pop(tag_image_id, None)
+
+ # Remove any ancestors:
+ for ancestor_id in current_tag.image.ancestors.split('/'):
+ image_map.pop(ancestor_id, None)
return {
- 'images': [image_dict(image) for image in all_images],
+ 'images': [image_view(image, image_map_all) for image in all_images
+ if not parsed_args['owned'] or (str(image.id) in image_map)]
}
-@resource('/v1/repository//tag//restore')
+
+@resource('/v1/repository//tag//revert')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
@path_param('tag', 'The name of the tag')
-class RestoreTag(RepositoryParamResource):
- """ Resource for restoring a repository tag back to a previous image. """
+class RevertTag(RepositoryParamResource):
+ """ Resource for reverting a repository tag back to a previous image. """
schemas = {
- 'RestoreTag': {
+ 'RevertTag': {
'type': 'object',
- 'description': 'Restores a tag to a specific image',
+ 'description': 'Reverts a tag to a specific image',
+ 'required': [
+ 'image',
+ ],
'properties': {
'image': {
'type': 'string',
- 'description': '(Deprecated: use `manifest_digest`) Image to which the tag should point',
- },
- 'manifest_digest': {
- 'type': 'string',
- 'description': 'If specified, the manifest digest that should be used',
+ 'description': 'Image identifier to which the tag should point',
},
},
},
}
@require_repo_write
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
- @nickname('restoreTag')
- @validate_json_request('RestoreTag')
+ @nickname('revertTag')
+ @validate_json_request('RevertTag')
def post(self, namespace, repository, tag):
- """ Restores a repository tag back to a previous image in the repository. """
- repo_ref = registry_model.lookup_repository(namespace, repository)
- if repo_ref is None:
+ """ Reverts a repository tag back to a previous image in the repository. """
+ try:
+ tag_image = model.tag.get_tag_image(namespace, repository, tag)
+ except model.DataModelException:
raise NotFound()
- # Restore the tag back to the previous image.
- image_id = request.get_json().get('image', None)
- manifest_digest = request.get_json().get('manifest_digest', None)
+ # Revert the tag back to the previous image.
+ image_id = request.get_json()['image']
+ model.tag.revert_tag(tag_image.repository, tag, image_id)
- if image_id is None and manifest_digest is None:
- raise InvalidRequest('Missing manifest_digest')
-
- # Data for logging the reversion/restoration.
+ # Log the reversion.
username = get_authenticated_user().username
- log_data = {
- 'username': username,
- 'repo': repository,
- 'tag': tag,
- 'image': image_id,
- 'manifest_digest': manifest_digest,
+ log_action('revert_tag', namespace,
+ {'username': username, 'repo': repository, 'tag': tag,
+ 'image': image_id, 'original_image': tag_image.docker_image_id},
+ repo=model.repository.get_repository(namespace, repository))
+
+ return {
+ 'image_id': image_id,
+ 'original_image_id': tag_image.docker_image_id
}
-
- manifest_or_legacy_image = None
- if manifest_digest is not None:
- manifest_or_legacy_image = registry_model.lookup_manifest_by_digest(repo_ref, manifest_digest,
- allow_dead=True,
- require_available=True)
- elif image_id is not None:
- manifest_or_legacy_image = registry_model.get_legacy_image(repo_ref, image_id)
-
- if manifest_or_legacy_image is None:
- raise NotFound()
-
- if not registry_model.retarget_tag(repo_ref, tag, manifest_or_legacy_image, storage,
- docker_v2_signing_key, is_reversion=True):
- raise InvalidRequest('Could not restore tag')
-
- log_action('revert_tag', namespace, log_data, repo_name=repository)
-
- return {}
diff --git a/endpoints/api/team.py b/endpoints/api/team.py
index b00a14393..a427c472a 100644
--- a/endpoints/api/team.py
+++ b/endpoints/api/team.py
@@ -1,28 +1,19 @@
""" Create, list and manage an organization's teams. """
-import json
-
-from functools import wraps
-
from flask import request
import features
-from app import avatar, authentication
-from auth.permissions import (AdministerOrganizationPermission, ViewTeamPermission,
- SuperUserPermission)
-
+from endpoints.api import (resource, nickname, ApiResource, validate_json_request, request_error,
+ log_action, internal_only, require_scope, path_param, query_param,
+ truthy_bool, parse_args, require_user_admin, show_if)
+from endpoints.exception import Unauthorized, NotFound
+from auth.permissions import AdministerOrganizationPermission, ViewTeamPermission
from auth.auth_context import get_authenticated_user
from auth import scopes
from data import model
-from data.database import Team
-from endpoints.api import (resource, nickname, ApiResource, validate_json_request, request_error,
- log_action, internal_only, require_scope, path_param, query_param,
- truthy_bool, parse_args, require_user_admin, show_if, format_date,
- verify_not_prod, require_fresh_login)
-from endpoints.exception import Unauthorized, NotFound, InvalidRequest
from util.useremails import send_org_invite_email
-from util.names import parse_robot_username
+from app import avatar
def permission_view(permission):
return {
@@ -33,6 +24,7 @@ def permission_view(permission):
'role': permission.role.name
}
+
def try_accept_invite(code, user):
(team, inviter) = model.team.confirm_team_invite(code, user)
@@ -48,6 +40,7 @@ def try_accept_invite(code, user):
return team
+
def handle_addinvite_team(inviter, team, user=None, email=None):
requires_invite = features.MAILING and features.REQUIRE_TEAM_INVITE
invite = model.team.add_or_invite_to_team(inviter, team, user, email,
@@ -69,15 +62,15 @@ def handle_addinvite_team(inviter, team, user=None, email=None):
orgname, team.name, inviter.username, invite.invite_token)
return invite
-def team_view(orgname, team, is_new_team=False):
+def team_view(orgname, team):
view_permission = ViewTeamPermission(orgname, team.name)
+ role = model.team.get_team_org_role(team).name
return {
'name': team.name,
'description': team.description,
'can_view': view_permission.can(),
- 'role': Team.role.get_name(team.role_id),
- 'avatar': avatar.get_data_for_team(team),
- 'new_team': is_new_team,
+ 'role': role,
+ 'avatar': avatar.get_data_for_team(team)
}
def member_view(member, invited=False):
@@ -89,6 +82,7 @@ def member_view(member, invited=False):
'invited': invited,
}
+
def invite_view(invite):
if invite.user:
return member_view(invite.user, invited=True)
@@ -100,30 +94,6 @@ def invite_view(invite):
'invited': True
}
-def disallow_for_synced_team(except_robots=False):
- """ Disallows the decorated operation for a team that is marked as being synced from an internal
- auth provider such as LDAP. If except_robots is True, then the operation is allowed if the
- member specified on the operation is a robot account.
- """
- def inner(func):
- @wraps(func)
- def wrapper(self, *args, **kwargs):
- # Team syncing can only be enabled if we have a federated service.
- if features.TEAM_SYNCING and authentication.federated_service:
- orgname = kwargs['orgname']
- teamname = kwargs['teamname']
- if model.team.get_team_sync_information(orgname, teamname):
- if not except_robots or not parse_robot_username(kwargs.get('membername', '')):
- raise InvalidRequest('Cannot call this method on an auth-synced team')
-
- return func(self, *args, **kwargs)
- return wrapper
- return inner
-
-
-disallow_nonrobots_for_synced_team = disallow_for_synced_team(except_robots=True)
-disallow_all_for_synced_team = disallow_for_synced_team(except_robots=False)
-
@resource('/v1/organization//team/')
@path_param('orgname', 'The name of the organization')
@@ -187,13 +157,13 @@ class OrganizationTeam(ApiResource):
{'team': teamname, 'description': team.description})
if 'role' in details:
- role = Team.role.get_name(team.role_id)
+ role = model.team.get_team_org_role(team).name
if role != details['role']:
team = model.team.set_team_org_permission(team, details['role'],
get_authenticated_user().username)
log_action('org_set_team_role', orgname, {'team': teamname, 'role': details['role']})
- return team_view(orgname, team, is_new_team=not is_existing), 200
+ return team_view(orgname, team), 200
raise Unauthorized()
@@ -210,64 +180,6 @@ class OrganizationTeam(ApiResource):
raise Unauthorized()
-def _syncing_setup_allowed(orgname):
- """ Returns whether syncing setup is allowed for the current user over the matching org. """
- if not features.NONSUPERUSER_TEAM_SYNCING_SETUP and not SuperUserPermission().can():
- return False
-
- return AdministerOrganizationPermission(orgname).can()
-
-
-@resource('/v1/organization//team//syncing')
-@path_param('orgname', 'The name of the organization')
-@path_param('teamname', 'The name of the team')
-@show_if(features.TEAM_SYNCING)
-class OrganizationTeamSyncing(ApiResource):
- """ Resource for managing syncing of a team by a backing group. """
- @require_scope(scopes.ORG_ADMIN)
- @require_scope(scopes.SUPERUSER)
- @nickname('enableOrganizationTeamSync')
- @verify_not_prod
- @require_fresh_login
- def post(self, orgname, teamname):
- if _syncing_setup_allowed(orgname):
- try:
- team = model.team.get_organization_team(orgname, teamname)
- except model.InvalidTeamException:
- raise NotFound()
-
- config = request.get_json()
-
- # Ensure that the specified config points to a valid group.
- status, err = authentication.check_group_lookup_args(config)
- if not status:
- raise InvalidRequest('Could not sync to group: %s' % err)
-
- # Set the team's syncing config.
- model.team.set_team_syncing(team, authentication.federated_service, config)
-
- return team_view(orgname, team)
-
- raise Unauthorized()
-
- @require_scope(scopes.ORG_ADMIN)
- @require_scope(scopes.SUPERUSER)
- @nickname('disableOrganizationTeamSync')
- @verify_not_prod
- @require_fresh_login
- def delete(self, orgname, teamname):
- if _syncing_setup_allowed(orgname):
- try:
- team = model.team.get_organization_team(orgname, teamname)
- except model.InvalidTeamException:
- raise NotFound()
-
- model.team.remove_team_syncing(orgname, teamname)
- return team_view(orgname, team)
-
- raise Unauthorized()
-
-
@resource('/v1/organization//team//members')
@path_param('orgname', 'The name of the organization')
@path_param('teamname', 'The name of the team')
@@ -299,29 +211,9 @@ class TeamMemberList(ApiResource):
data = {
'name': teamname,
'members': [member_view(m) for m in members] + [invite_view(i) for i in invites],
- 'can_edit': edit_permission.can(),
+ 'can_edit': edit_permission.can()
}
- if features.TEAM_SYNCING and authentication.federated_service:
- if _syncing_setup_allowed(orgname):
- data['can_sync'] = {
- 'service': authentication.federated_service,
- }
-
- data['can_sync'].update(authentication.service_metadata())
-
- sync_info = model.team.get_team_sync_information(orgname, teamname)
- if sync_info is not None:
- data['synced'] = {
- 'service': sync_info.service.name,
- }
-
- if SuperUserPermission().can():
- data['synced'].update({
- 'last_updated': format_date(sync_info.last_updated),
- 'config': json.loads(sync_info.config),
- })
-
return data
raise Unauthorized()
@@ -336,7 +228,6 @@ class TeamMember(ApiResource):
@require_scope(scopes.ORG_ADMIN)
@nickname('updateOrganizationTeamMember')
- @disallow_nonrobots_for_synced_team
def put(self, orgname, teamname, membername):
""" Adds or invites a member to an existing team. """
permission = AdministerOrganizationPermission(orgname)
@@ -374,7 +265,6 @@ class TeamMember(ApiResource):
@require_scope(scopes.ORG_ADMIN)
@nickname('deleteOrganizationTeamMember')
- @disallow_nonrobots_for_synced_team
def delete(self, orgname, teamname, membername):
""" Delete a member of a team. If the user is merely invited to join
the team, then the invite is removed instead.
@@ -418,7 +308,6 @@ class InviteTeamMember(ApiResource):
""" Resource for inviting a team member via email address. """
@require_scope(scopes.ORG_ADMIN)
@nickname('inviteTeamMemberEmail')
- @disallow_all_for_synced_team
def put(self, orgname, teamname, email):
""" Invites an email address to an existing team. """
permission = AdministerOrganizationPermission(orgname)
@@ -458,9 +347,7 @@ class InviteTeamMember(ApiResource):
raise NotFound()
# Delete the invite.
- if not model.team.delete_team_email_invite(team, email):
- raise NotFound()
-
+ model.team.delete_team_email_invite(team, email)
log_action('org_delete_team_member_invite', orgname, {
'email': email,
'team': teamname,
@@ -518,7 +405,7 @@ class TeamMemberInvite(ApiResource):
@nickname('declineOrganizationTeamInvite')
@require_user_admin
def delete(self, code):
- """ Delete an existing invitation to join a team. """
+ """ Delete an existing member of a team. """
(team, inviter) = model.team.delete_team_invite(code, user_obj=get_authenticated_user())
model.notification.delete_matching_notifications(get_authenticated_user(), 'org_team_invite',
diff --git a/endpoints/api/test/__init__.py b/endpoints/api/test/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/endpoints/api/test/shared.py b/endpoints/api/test/shared.py
deleted file mode 100644
index c5a553f09..000000000
--- a/endpoints/api/test/shared.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from endpoints.test.shared import conduct_call
-from endpoints.api import api
-
-def conduct_api_call(client, resource, method, params, body=None, expected_code=200, headers=None):
- """ Conducts an API call to the given resource via the given client, and ensures its returned
- status matches the code given.
-
- Returns the response.
- """
- return conduct_call(client, resource, api.url_for, method, params, body, expected_code,
- headers=headers)
diff --git a/endpoints/api/test/test_appspecifictoken.py b/endpoints/api/test/test_appspecifictoken.py
deleted file mode 100644
index 28e2bcd00..000000000
--- a/endpoints/api/test/test_appspecifictoken.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from datetime import datetime, timedelta
-
-from data import model
-from endpoints.api.appspecifictokens import AppTokens, AppToken
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.test.shared import client_with_identity
-from test.fixtures import *
-
-def test_app_specific_tokens(app, client):
- with client_with_identity('devtable', client) as cl:
- # Add an app specific token.
- token_data = {'title': 'Testing 123'}
- resp = conduct_api_call(cl, AppTokens, 'POST', None, token_data, 200).json
- token_uuid = resp['token']['uuid']
- assert 'token_code' in resp['token']
-
- # List the tokens and ensure we have the one added.
- resp = conduct_api_call(cl, AppTokens, 'GET', None, None, 200).json
- assert len(resp['tokens'])
- assert token_uuid in set([token['uuid'] for token in resp['tokens']])
- assert not set([token['token_code'] for token in resp['tokens'] if 'token_code' in token])
-
- # List the tokens expiring soon and ensure the one added is not present.
- resp = conduct_api_call(cl, AppTokens, 'GET', {'expiring': True}, None, 200).json
- assert token_uuid not in set([token['uuid'] for token in resp['tokens']])
-
- # Get the token and ensure we have its code.
- resp = conduct_api_call(cl, AppToken, 'GET', {'token_uuid': token_uuid}, None, 200).json
- assert resp['token']['uuid'] == token_uuid
- assert 'token_code' in resp['token']
-
- # Delete the token.
- conduct_api_call(cl, AppToken, 'DELETE', {'token_uuid': token_uuid}, None, 204)
-
- # Ensure the token no longer exists.
- resp = conduct_api_call(cl, AppTokens, 'GET', None, None, 200).json
- assert len(resp['tokens'])
- assert token_uuid not in set([token['uuid'] for token in resp['tokens']])
-
- conduct_api_call(cl, AppToken, 'GET', {'token_uuid': token_uuid}, None, 404)
-
-
-def test_delete_expired_app_token(app, client):
- user = model.user.get_user('devtable')
- expiration = datetime.now() - timedelta(seconds=10)
- token = model.appspecifictoken.create_token(user, 'some token', expiration)
-
- with client_with_identity('devtable', client) as cl:
- # Delete the token.
- conduct_api_call(cl, AppToken, 'DELETE', {'token_uuid': token.uuid}, None, 204)
diff --git a/endpoints/api/test/test_build.py b/endpoints/api/test/test_build.py
deleted file mode 100644
index bf98ad4eb..000000000
--- a/endpoints/api/test/test_build.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import pytest
-
-from endpoints.api.build import RepositoryBuildList
-
-
-@pytest.mark.parametrize('request_json,subdir,context', [
- ({}, '/Dockerfile', '/'),
- ({'context': '/some_context'}, '/some_context/Dockerfile', '/some_context'),
- ({'subdirectory': 'some_context'}, 'some_context/Dockerfile', 'some_context'),
- ({'subdirectory': 'some_context/'}, 'some_context/Dockerfile', 'some_context/'),
- ({'dockerfile_path': 'some_context/Dockerfile'}, 'some_context/Dockerfile', 'some_context'),
- ({'dockerfile_path': 'some_context/Dockerfile', 'context': '/'}, 'some_context/Dockerfile', '/'),
- ({'dockerfile_path': 'some_context/Dockerfile',
- 'context': '/',
- 'subdirectory': 'slime'}, 'some_context/Dockerfile', '/'),
-])
-def test_extract_dockerfile_args(request_json, subdir, context):
- actual_context, actual_subdir = RepositoryBuildList.get_dockerfile_context(request_json)
- assert subdir == actual_subdir
- assert context == actual_context
diff --git a/endpoints/api/test/test_disallow_for_apps.py b/endpoints/api/test/test_disallow_for_apps.py
deleted file mode 100644
index b9112c291..000000000
--- a/endpoints/api/test/test_disallow_for_apps.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import pytest
-
-from data import model
-from endpoints.api.repository import Repository
-from endpoints.api.build import (RepositoryBuildList, RepositoryBuildResource,
- RepositoryBuildStatus, RepositoryBuildLogs)
-from endpoints.api.image import RepositoryImageList, RepositoryImage
-from endpoints.api.manifest import RepositoryManifestLabels, ManageRepositoryManifestLabel
-from endpoints.api.repositorynotification import (RepositoryNotification,
- RepositoryNotificationList,
- TestRepositoryNotification)
-from endpoints.api.secscan import RepositoryImageSecurity, RepositoryManifestSecurity
-from endpoints.api.signing import RepositorySignatures
-from endpoints.api.tag import ListRepositoryTags, RepositoryTag, RepositoryTagImages, RestoreTag
-from endpoints.api.trigger import (BuildTriggerList, BuildTrigger, BuildTriggerSubdirs,
- BuildTriggerActivate, BuildTriggerAnalyze, ActivateBuildTrigger,
- TriggerBuildList, BuildTriggerFieldValues, BuildTriggerSources,
- BuildTriggerSourceNamespaces)
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.test.shared import client_with_identity
-from test.fixtures import *
-
-BUILD_ARGS = {'build_uuid': '1234'}
-IMAGE_ARGS = {'imageid': '1234', 'image_id': 1234}
-MANIFEST_ARGS = {'manifestref': 'sha256:abcd1234'}
-LABEL_ARGS = {'manifestref': 'sha256:abcd1234', 'labelid': '1234'}
-NOTIFICATION_ARGS = {'uuid': '1234'}
-TAG_ARGS = {'tag': 'foobar'}
-TRIGGER_ARGS = {'trigger_uuid': '1234'}
-FIELD_ARGS = {'trigger_uuid': '1234', 'field_name': 'foobar'}
-
-@pytest.mark.parametrize('resource, method, params', [
- (RepositoryBuildList, 'get', None),
- (RepositoryBuildList, 'post', None),
- (RepositoryBuildResource, 'get', BUILD_ARGS),
- (RepositoryBuildResource, 'delete', BUILD_ARGS),
- (RepositoryBuildStatus, 'get', BUILD_ARGS),
- (RepositoryBuildLogs, 'get', BUILD_ARGS),
- (RepositoryImageList, 'get', None),
- (RepositoryImage, 'get', IMAGE_ARGS),
- (RepositoryManifestLabels, 'get', MANIFEST_ARGS),
- (RepositoryManifestLabels, 'post', MANIFEST_ARGS),
- (ManageRepositoryManifestLabel, 'get', LABEL_ARGS),
- (ManageRepositoryManifestLabel, 'delete', LABEL_ARGS),
- (RepositoryNotificationList, 'get', None),
- (RepositoryNotificationList, 'post', None),
- (RepositoryNotification, 'get', NOTIFICATION_ARGS),
- (RepositoryNotification, 'delete', NOTIFICATION_ARGS),
- (RepositoryNotification, 'post', NOTIFICATION_ARGS),
- (TestRepositoryNotification, 'post', NOTIFICATION_ARGS),
- (RepositoryImageSecurity, 'get', IMAGE_ARGS),
- (RepositoryManifestSecurity, 'get', MANIFEST_ARGS),
- (RepositorySignatures, 'get', None),
- (ListRepositoryTags, 'get', None),
- (RepositoryTag, 'put', TAG_ARGS),
- (RepositoryTag, 'delete', TAG_ARGS),
- (RepositoryTagImages, 'get', TAG_ARGS),
- (RestoreTag, 'post', TAG_ARGS),
- (BuildTriggerList, 'get', None),
- (BuildTrigger, 'get', TRIGGER_ARGS),
- (BuildTrigger, 'delete', TRIGGER_ARGS),
- (BuildTriggerSubdirs, 'post', TRIGGER_ARGS),
- (BuildTriggerActivate, 'post', TRIGGER_ARGS),
- (BuildTriggerAnalyze, 'post', TRIGGER_ARGS),
- (ActivateBuildTrigger, 'post', TRIGGER_ARGS),
- (TriggerBuildList, 'get', TRIGGER_ARGS),
- (BuildTriggerFieldValues, 'post', FIELD_ARGS),
- (BuildTriggerSources, 'post', TRIGGER_ARGS),
- (BuildTriggerSourceNamespaces, 'get', TRIGGER_ARGS),
-])
-def test_disallowed_for_apps(resource, method, params, client):
- namespace = 'devtable'
- repository = 'someapprepo'
-
- devtable = model.user.get_user('devtable')
- model.repository.create_repository(namespace, repository, devtable, repo_kind='application')
-
- params = params or {}
- params['repository'] = '%s/%s' % (namespace, repository)
-
- with client_with_identity('devtable', client) as cl:
- conduct_api_call(cl, resource, method, params, None, 501)
-
diff --git a/endpoints/api/test/test_disallow_for_nonnormal.py b/endpoints/api/test/test_disallow_for_nonnormal.py
deleted file mode 100644
index 7d8ace845..000000000
--- a/endpoints/api/test/test_disallow_for_nonnormal.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import pytest
-
-from data import model
-from data.database import RepositoryState
-from endpoints.api.build import RepositoryBuildList, RepositoryBuildResource
-from endpoints.api.manifest import RepositoryManifestLabels, ManageRepositoryManifestLabel
-from endpoints.api.tag import RepositoryTag, RestoreTag
-from endpoints.api.trigger import (BuildTrigger, BuildTriggerSubdirs,
- BuildTriggerActivate, BuildTriggerAnalyze, ActivateBuildTrigger,
- BuildTriggerFieldValues, BuildTriggerSources)
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.test.shared import client_with_identity
-from test.fixtures import *
-
-BUILD_ARGS = {'build_uuid': '1234'}
-IMAGE_ARGS = {'imageid': '1234', 'image_id': 1234}
-MANIFEST_ARGS = {'manifestref': 'sha256:abcd1234'}
-LABEL_ARGS = {'manifestref': 'sha256:abcd1234', 'labelid': '1234'}
-NOTIFICATION_ARGS = {'uuid': '1234'}
-TAG_ARGS = {'tag': 'foobar'}
-TRIGGER_ARGS = {'trigger_uuid': '1234'}
-FIELD_ARGS = {'trigger_uuid': '1234', 'field_name': 'foobar'}
-
-
-@pytest.mark.parametrize('state', [
- RepositoryState.MIRROR,
- RepositoryState.READ_ONLY,
-])
-@pytest.mark.parametrize('resource, method, params', [
- (RepositoryBuildList, 'post', None),
- (RepositoryBuildResource, 'delete', BUILD_ARGS),
-
- (RepositoryManifestLabels, 'post', MANIFEST_ARGS),
- (ManageRepositoryManifestLabel, 'delete', LABEL_ARGS),
-
- (RepositoryTag, 'put', TAG_ARGS),
- (RepositoryTag, 'delete', TAG_ARGS),
-
- (RestoreTag, 'post', TAG_ARGS),
-
- (BuildTrigger, 'delete', TRIGGER_ARGS),
- (BuildTriggerSubdirs, 'post', TRIGGER_ARGS),
- (BuildTriggerActivate, 'post', TRIGGER_ARGS),
- (BuildTriggerAnalyze, 'post', TRIGGER_ARGS),
- (ActivateBuildTrigger, 'post', TRIGGER_ARGS),
-
- (BuildTriggerFieldValues, 'post', FIELD_ARGS),
- (BuildTriggerSources, 'post', TRIGGER_ARGS),
-
-])
-def test_disallowed_for_nonnormal(state, resource, method, params, client):
- namespace = 'devtable'
- repository = 'somenewstaterepo'
-
- devtable = model.user.get_user('devtable')
- repo = model.repository.create_repository(namespace, repository, devtable)
- repo.state = state
- repo.save()
-
- params = params or {}
- params['repository'] = '%s/%s' % (namespace, repository)
-
- with client_with_identity('devtable', client) as cl:
- conduct_api_call(cl, resource, method, params, None, 503)
diff --git a/endpoints/api/test/test_endtoend_auth.py b/endpoints/api/test/test_endtoend_auth.py
deleted file mode 100644
index 0bcf9c7e4..000000000
--- a/endpoints/api/test/test_endtoend_auth.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import pytest
-
-from mock import patch
-
-from endpoints.api.search import EntitySearch, LinkExternalEntity
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.test.shared import client_with_identity
-
-from test.test_ldap import mock_ldap
-from test.test_external_jwt_authn import fake_jwt
-from test.test_keystone_auth import fake_keystone
-
-from test.fixtures import *
-
-
-@pytest.fixture(params=[
- mock_ldap,
- fake_jwt,
- fake_keystone,
-])
-def auth_engine(request):
- return request.param
-
-
-@pytest.fixture(params=[
- False,
- True,
-])
-def requires_email(request):
- return request.param
-
-
-def test_entity_search(auth_engine, requires_email, client):
- with auth_engine(requires_email=requires_email) as auth:
- with patch('endpoints.api.search.authentication', auth):
- # Try an unknown prefix.
- response = conduct_api_call(client, EntitySearch, 'GET', params=dict(prefix='unknown'))
- results = response.json['results']
- assert len(results) == 0
-
- # Try a known prefix.
- response = conduct_api_call(client, EntitySearch, 'GET', params=dict(prefix='cool'))
- results = response.json['results']
- entity = results[0]
- assert entity['name'] == 'cool.user'
- assert entity['kind'] == 'external'
-
-
-def test_link_external_entity(auth_engine, requires_email, client):
- with auth_engine(requires_email=requires_email) as auth:
- with patch('endpoints.api.search.authentication', auth):
- with client_with_identity('devtable', client) as cl:
- # Try an unknown user.
- conduct_api_call(cl, LinkExternalEntity, 'POST', params=dict(username='unknownuser'),
- expected_code=400)
-
- # Try a known user.
- response = conduct_api_call(cl, LinkExternalEntity, 'POST',
- params=dict(username='cool.user'))
-
- entity = response.json['entity']
- assert entity['name'] == 'cool_user'
- assert entity['kind'] == 'user'
diff --git a/endpoints/api/test/test_logs.py b/endpoints/api/test/test_logs.py
deleted file mode 100644
index a73561bfa..000000000
--- a/endpoints/api/test/test_logs.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import os
-import time
-
-from mock import patch
-
-from app import export_action_logs_queue
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.api.logs import ExportOrgLogs
-from endpoints.test.shared import client_with_identity
-
-from test.fixtures import *
-
-@pytest.mark.skipif(os.environ.get('TEST_DATABASE_URI', '').find('mysql') >= 0,
- reason="Queue code is very sensitive to times on MySQL, making this flaky")
-def test_export_logs(client):
- with client_with_identity('devtable', client) as cl:
- assert export_action_logs_queue.get() is None
-
- timecode = time.time()
- def get_time():
- return timecode - 2
-
- with patch('time.time', get_time):
- # Call to export logs.
- body = {
- 'callback_url': 'http://some/url',
- 'callback_email': 'a@b.com',
- }
-
- conduct_api_call(cl, ExportOrgLogs, 'POST', {'orgname': 'buynlarge'},
- body, expected_code=200)
-
- # Ensure the request was queued.
- assert export_action_logs_queue.get() is not None
diff --git a/endpoints/api/test/test_manifest.py b/endpoints/api/test/test_manifest.py
deleted file mode 100644
index 164c26061..000000000
--- a/endpoints/api/test/test_manifest.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from data.registry_model import registry_model
-from endpoints.api.manifest import RepositoryManifest
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.test.shared import client_with_identity
-
-from test.fixtures import *
-
-def test_repository_manifest(client):
- with client_with_identity('devtable', client) as cl:
- repo_ref = registry_model.lookup_repository('devtable', 'simple')
- tags = registry_model.list_all_active_repository_tags(repo_ref)
- for tag in tags:
- manifest_digest = tag.manifest_digest
- if manifest_digest is None:
- continue
-
- params = {
- 'repository': 'devtable/simple',
- 'manifestref': manifest_digest,
- }
- result = conduct_api_call(cl, RepositoryManifest, 'GET', params, None, 200).json
- assert result['digest'] == manifest_digest
- assert result['manifest_data']
- assert result['image']
diff --git a/endpoints/api/test/test_mirror.py b/endpoints/api/test/test_mirror.py
deleted file mode 100644
index 8fcd9ef4a..000000000
--- a/endpoints/api/test/test_mirror.py
+++ /dev/null
@@ -1,235 +0,0 @@
-from datetime import datetime
-
-import pytest
-
-from data import model
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.api.mirror import RepoMirrorResource
-from endpoints.test.shared import client_with_identity
-
-from test.fixtures import *
-
-def _setup_mirror():
- repo = model.repository.get_repository('devtable', 'simple')
- assert repo
- robot = model.user.lookup_robot('devtable+dtrobot')
- assert robot
- rule = model.repo_mirror.create_rule(repo, ['latest', '3.3*', 'foo'])
- assert rule
- mirror_kwargs = {
- 'is_enabled': True,
- 'external_reference': 'quay.io/redhat/quay',
- 'sync_interval': 5000,
- 'sync_start_date': datetime(2020, 01, 02, 6, 30, 0),
- 'external_registry_username': 'fakeUsername',
- 'external_registry_password': 'fakePassword',
- 'external_registry_config': {
- 'verify_tls': True,
- 'proxy': {
- 'http_proxy': 'http://insecure.proxy.corp',
- 'https_proxy': 'https://secure.proxy.corp',
- 'no_proxy': 'mylocalhost'
- }
- }
- }
- mirror = model.repo_mirror.enable_mirroring_for_repository(repo, root_rule=rule,
- internal_robot=robot, **mirror_kwargs)
- assert mirror
- return mirror
-
-
-@pytest.mark.parametrize('existing_robot_permission, expected_permission', [
- (None, 'write'),
- ('read', 'write'),
- ('write', 'write'),
- ('admin', 'admin'),
-])
-def test_create_mirror_sets_permissions(existing_robot_permission, expected_permission, client):
- mirror_bot, _ = model.user.create_robot('newmirrorbot', model.user.get_namespace_user('devtable'))
-
- if existing_robot_permission:
- model.permission.set_user_repo_permission(mirror_bot.username, 'devtable', 'simple',
- existing_robot_permission)
-
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/simple'}
- request_body = {
- 'external_reference': 'quay.io/foobar/barbaz',
- 'sync_interval': 100,
- 'sync_start_date': '2019-08-20T17:51:00Z',
- 'root_rule': {
- 'rule_kind': 'tag_glob_csv',
- 'rule_value': ['latest','foo', 'bar']
- },
- 'robot_username': 'devtable+newmirrorbot',
- }
- conduct_api_call(cl, RepoMirrorResource, 'POST', params, request_body, 201)
-
- # Check the status of the robot.
- permissions = model.permission.get_user_repository_permissions(mirror_bot, 'devtable', 'simple')
- assert permissions[0].role.name == expected_permission
-
- config = model.repo_mirror.get_mirror(model.repository.get_repository('devtable', 'simple'))
- assert config.root_rule.rule_value == ['latest', 'foo', 'bar']
-
-
-def test_get_mirror_does_not_exist(client):
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/simple'}
- resp = conduct_api_call(cl, RepoMirrorResource, 'GET', params, None, 404)
-
-
-def test_get_repo_does_not_exist(client):
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/unicorn'}
- resp = conduct_api_call(cl, RepoMirrorResource, 'GET', params, None, 404)
-
-
-def test_get_mirror(client):
- """ Verify that performing a `GET` request returns expected and accurate data. """
- mirror = _setup_mirror()
-
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/simple'}
- resp = conduct_api_call(cl, RepoMirrorResource, 'GET', params, None, 200).json
-
- assert resp['is_enabled'] == True
- assert resp['external_reference'] == 'quay.io/redhat/quay'
- assert resp['sync_interval'] == 5000
- assert resp['sync_start_date'] == '2020-01-02T06:30:00Z'
- assert resp['external_registry_username'] == 'fakeUsername'
- assert 'external_registry_password' not in resp
- assert 'external_registry_config' in resp
- assert resp['external_registry_config']['verify_tls'] == True
- assert 'proxy' in resp['external_registry_config']
- assert resp['external_registry_config']['proxy']['http_proxy'] == 'http://insecure.proxy.corp'
- assert resp['external_registry_config']['proxy']['https_proxy'] == 'https://secure.proxy.corp'
- assert resp['external_registry_config']['proxy']['no_proxy'] == 'mylocalhost'
-
-
-@pytest.mark.parametrize('key, value, expected_status', [
-
- ('is_enabled', True, 201),
- ('is_enabled', False, 201),
- ('is_enabled', None, 400),
- ('is_enabled', 'foo', 400),
-
- ('external_reference', 'example.com/foo/bar', 201),
- ('external_reference', 'example.com/foo', 201),
- ('external_reference', 'example.com', 201),
-
- ('external_registry_username', 'newTestUsername', 201),
- ('external_registry_username', None, 201),
- ('external_registry_username', 123, 400),
-
- ('external_registry_password', 'newTestPassword', 400),
- ('external_registry_password', None, 400),
- ('external_registry_password', 41, 400),
-
- ('robot_username', 'devtable+dtrobot', 201),
- ('robot_username', 'devtable+doesntExist', 400),
-
- ('sync_start_date', '2020-01-01T00:00:00Z', 201),
- ('sync_start_date', 'January 1 2020', 400),
- ('sync_start_date', '2020-01-01T00:00:00.00Z', 400),
- ('sync_start_date', 'Wed, 01 Jan 2020 00:00:00 -0000', 400),
- ('sync_start_date', 'Wed, 02 Oct 2002 08:00:00 EST', 400),
-
- ('sync_interval', 2000, 201),
- ('sync_interval', -5, 400),
-
- ('https_proxy', 'https://proxy.corp.example.com', 201),
- ('https_proxy', None, 201),
- ('https_proxy', 'proxy.example.com; rm -rf /', 201), # Safe; values only set in env, not eval'ed
-
- ('http_proxy', 'http://proxy.corp.example.com', 201),
- ('http_proxy', None, 201),
- ('http_proxy', 'proxy.example.com; rm -rf /', 201), # Safe; values only set in env, not eval'ed
-
- ('no_proxy', 'quay.io', 201),
- ('no_proxy', None, 201),
- ('no_proxy', 'quay.io; rm -rf /', 201), # Safe because proxy values are not eval'ed
-
- ('verify_tls', True, 201),
- ('verify_tls', False, 201),
- ('verify_tls', None, 400),
- ('verify_tls', 'abc', 400),
-
- ('root_rule', {'rule_kind': 'tag_glob_csv', 'rule_value': ['3.1', '3.1*']}, 201),
- ('root_rule', {'rule_kind': 'tag_glob_csv'}, 400),
- ('root_rule', {'rule_kind': 'tag_glob_csv', 'rule_value': []}, 400),
- ('root_rule', {'rule_kind': 'incorrect', 'rule_value': ['3.1', '3.1*']}, 400),
-
-])
-def test_change_config(key, value, expected_status, client):
- """ Verify that changing each attribute works as expected. """
- mirror = _setup_mirror()
-
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/simple'}
- if key in ('http_proxy', 'https_proxy', 'no_proxy'):
- request_body = {'external_registry_config': {'proxy': {key: value}}}
- elif key == 'verify_tls':
- request_body = {'external_registry_config': {key: value}}
- else:
- request_body = {key: value}
- conduct_api_call(cl, RepoMirrorResource, 'PUT', params, request_body, expected_status)
-
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/simple'}
- resp = conduct_api_call(cl, RepoMirrorResource, 'GET', params, None, 200)
-
- if expected_status < 400:
- if key == 'external_registry_password':
- assert key not in resp.json
- elif key == 'verify_tls':
- assert resp.json['external_registry_config']['verify_tls'] == value
- elif key in ('http_proxy', 'https_proxy', 'no_proxy'):
- assert resp.json['external_registry_config']['proxy'][key] == value
- else:
- assert resp.json[key] == value
- else:
- if key == 'external_registry_password':
- assert key not in resp.json
- elif key == 'verify_tls':
- assert resp.json['external_registry_config'][key] != value
- elif key in ('http_proxy', 'https_proxy', 'no_proxy'):
- assert resp.json['external_registry_config']['proxy'][key] != value
- else:
- assert resp.json[key] != value
-
-
-@pytest.mark.parametrize('request_body, expected_status', [
-
- # Set a new password and username => Success
- ({ 'external_registry_username': 'newUsername',
- 'external_registry_password': 'newPassword'}, 201 ),
-
- # Set password and username to None => Success
- ({ 'external_registry_username': None,
- 'external_registry_password': None}, 201 ),
-
- # Set username to value but password None => Sucess
- ({ 'external_registry_username': 'myUsername',
- 'external_registry_password': None}, 201 ),
-
- # Set only new Username => Success
- ({'external_registry_username': 'myNewUsername'}, 201),
- ({'external_registry_username': None}, 201),
-
- # Set only new Password => Failure
- ({'external_registry_password': 'myNewPassword'}, 400),
- ({'external_registry_password': None}, 400),
-
- # Set username and password to empty string => Success?
- ({'external_registry_username': '',
- 'external_registry_password': ''}, 201),
-
-])
-def test_change_credentials(request_body, expected_status, client):
- """ Verify credentials can only be modified as a pair. """
- mirror = _setup_mirror()
-
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/simple'}
- conduct_api_call(cl, RepoMirrorResource, 'PUT', params, request_body, expected_status)
diff --git a/endpoints/api/test/test_organization.py b/endpoints/api/test/test_organization.py
deleted file mode 100644
index 4341e1125..000000000
--- a/endpoints/api/test/test_organization.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import pytest
-
-from data import model
-from endpoints.api import api
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.api.organization import (Organization,
- OrganizationCollaboratorList)
-from endpoints.test.shared import client_with_identity
-from test.fixtures import *
-
-
-@pytest.mark.parametrize('expiration, expected_code', [
- (0, 200),
- (100, 400),
- (100000000000000000000, 400),
-])
-def test_change_tag_expiration(expiration, expected_code, client):
- with client_with_identity('devtable', client) as cl:
- conduct_api_call(cl, Organization, 'PUT', {'orgname': 'buynlarge'},
- body={'tag_expiration_s': expiration},
- expected_code=expected_code)
-
-
-def test_get_organization_collaborators(client):
- params = {'orgname': 'buynlarge'}
-
- with client_with_identity('devtable', client) as cl:
- resp = conduct_api_call(cl, OrganizationCollaboratorList, 'GET', params)
-
- collaborator_names = [c['name'] for c in resp.json['collaborators']]
- assert 'outsideorg' in collaborator_names
- assert 'devtable' not in collaborator_names
- assert 'reader' not in collaborator_names
-
- for collaborator in resp.json['collaborators']:
- if collaborator['name'] == 'outsideorg':
- assert 'orgrepo' in collaborator['repositories']
- assert 'anotherorgrepo' not in collaborator['repositories']
diff --git a/endpoints/api/test/test_permission.py b/endpoints/api/test/test_permission.py
deleted file mode 100644
index 1182f1071..000000000
--- a/endpoints/api/test/test_permission.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import pytest
-
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.api.permission import RepositoryUserPermission
-from endpoints.test.shared import client_with_identity
-from test.fixtures import *
-
-@pytest.mark.parametrize('repository, username, expected_code', [
- pytest.param('devtable/simple', 'public', 200, id='valid user under user'),
- pytest.param('devtable/simple', 'devtable+dtrobot', 200, id='valid robot under user'),
- pytest.param('devtable/simple', 'buynlarge+coolrobot', 400, id='invalid robot under user'),
- pytest.param('buynlarge/orgrepo', 'devtable', 200, id='valid user under org'),
- pytest.param('buynlarge/orgrepo', 'devtable+dtrobot', 400, id='invalid robot under org'),
- pytest.param('buynlarge/orgrepo', 'buynlarge+coolrobot', 200, id='valid robot under org'),
-])
-def test_robot_permission(repository, username, expected_code, client):
- with client_with_identity('devtable', client) as cl:
- conduct_api_call(cl, RepositoryUserPermission, 'PUT',
- {'repository': repository, 'username': username},
- body={
- 'role': 'read',
- },
- expected_code=expected_code)
diff --git a/endpoints/api/test/test_repoemail_models_pre_oci.py b/endpoints/api/test/test_repoemail_models_pre_oci.py
deleted file mode 100644
index 7c8de8226..000000000
--- a/endpoints/api/test/test_repoemail_models_pre_oci.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import pytest
-from mock import Mock
-
-import util
-from data import model
-from endpoints.api.repoemail_models_interface import RepositoryAuthorizedEmail
-from endpoints.api.repoemail_models_pre_oci import pre_oci_model
-
-
-@pytest.fixture
-def get_monkeypatch(monkeypatch):
- return monkeypatch
-
-
-def return_none(name, repo, email):
- return None
-
-
-def get_return_mock(mock):
- def return_mock(name, repo, email):
- return mock
-
- return return_mock
-
-
-def test_get_email_authorized_for_repo(get_monkeypatch):
- mock = Mock()
-
- get_monkeypatch.setattr(model.repository, 'get_email_authorized_for_repo', mock)
-
- pre_oci_model.get_email_authorized_for_repo('namespace_name', 'repository_name', 'email')
-
- mock.assert_called_once_with('namespace_name', 'repository_name', 'email')
-
-
-def test_get_email_authorized_for_repo_return_none(get_monkeypatch):
- get_monkeypatch.setattr(model.repository, 'get_email_authorized_for_repo', return_none)
-
- repo = pre_oci_model.get_email_authorized_for_repo('namespace_name', 'repository_name', 'email')
-
- assert repo is None
-
-
-def test_get_email_authorized_for_repo_return_repo(get_monkeypatch):
- mock = Mock(confirmed=True, code='code')
- get_monkeypatch.setattr(model.repository, 'get_email_authorized_for_repo', get_return_mock(mock))
-
- actual = pre_oci_model.get_email_authorized_for_repo('namespace_name', 'repository_name',
- 'email')
-
- assert actual == RepositoryAuthorizedEmail('email', 'repository_name', 'namespace_name', True,
- 'code')
-
-
-def test_create_email_authorization_for_repo(get_monkeypatch):
- mock = Mock()
- get_monkeypatch.setattr(model.repository, 'create_email_authorization_for_repo', mock)
-
- pre_oci_model.create_email_authorization_for_repo('namespace_name', 'repository_name', 'email')
-
- mock.assert_called_once_with('namespace_name', 'repository_name', 'email')
-
-
-def test_create_email_authorization_for_repo_return_none(get_monkeypatch):
- get_monkeypatch.setattr(model.repository, 'create_email_authorization_for_repo', return_none)
-
- assert pre_oci_model.create_email_authorization_for_repo('namespace_name', 'repository_name',
- 'email') is None
-
-
-def test_create_email_authorization_for_repo_return_mock(get_monkeypatch):
- mock = Mock()
- get_monkeypatch.setattr(model.repository, 'create_email_authorization_for_repo',
- get_return_mock(mock))
-
- assert pre_oci_model.create_email_authorization_for_repo('namespace_name', 'repository_name',
- 'email') is not None
-
-
-def test_create_email_authorization_for_repo_return_value(get_monkeypatch):
- mock = Mock(confirmed=False, code='code')
-
- get_monkeypatch.setattr(model.repository, 'create_email_authorization_for_repo',
- get_return_mock(mock))
-
- actual = pre_oci_model.create_email_authorization_for_repo('namespace_name', 'repository_name',
- 'email')
- assert actual == RepositoryAuthorizedEmail('email', 'repository_name', 'namespace_name', False,
- 'code')
diff --git a/endpoints/api/test/test_repository.py b/endpoints/api/test/test_repository.py
deleted file mode 100644
index 4edca0e35..000000000
--- a/endpoints/api/test/test_repository.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import pytest
-
-from mock import patch, ANY, MagicMock
-
-from data import model, database
-from data.appr_model import release, channel, blob
-from endpoints.appr.models_cnr import model as appr_model
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.api.repository import RepositoryTrust, Repository, RepositoryList
-from endpoints.test.shared import client_with_identity
-from features import FeatureNameValue
-
-from test.fixtures import *
-
-
-@pytest.mark.parametrize('trust_enabled,repo_found,expected_status', [
- (True, True, 200),
- (False, True, 200),
- (False, False, 404),
- ('invalid_req', False, 400),
-])
-def test_post_changetrust(trust_enabled, repo_found, expected_status, client):
- with patch('endpoints.api.repository.tuf_metadata_api') as mock_tuf:
- with patch(
- 'endpoints.api.repository_models_pre_oci.model.repository.get_repository') as mock_model:
- mock_model.return_value = MagicMock() if repo_found else None
- mock_tuf.get_default_tags_with_expiration.return_value = ['tags', 'expiration']
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/repo'}
- request_body = {'trust_enabled': trust_enabled}
- conduct_api_call(cl, RepositoryTrust, 'POST', params, request_body, expected_status)
-
-
-def test_signing_disabled(client):
- with patch('features.SIGNING', FeatureNameValue('SIGNING', False)):
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/simple'}
- response = conduct_api_call(cl, Repository, 'GET', params).json
- assert not response['trust_enabled']
-
-
-def test_list_starred_repos(client):
- with client_with_identity('devtable', client) as cl:
- params = {
- 'starred': 'true',
- }
-
- response = conduct_api_call(cl, RepositoryList, 'GET', params).json
- repos = {r['namespace'] + '/' + r['name'] for r in response['repositories']}
- assert 'devtable/simple' in repos
- assert 'public/publicrepo' not in repos
-
- # Add a star on publicrepo.
- publicrepo = model.repository.get_repository('public', 'publicrepo')
- model.repository.star_repository(model.user.get_user('devtable'), publicrepo)
-
- # Ensure publicrepo shows up.
- response = conduct_api_call(cl, RepositoryList, 'GET', params).json
- repos = {r['namespace'] + '/' + r['name'] for r in response['repositories']}
- assert 'devtable/simple' in repos
- assert 'public/publicrepo' in repos
-
- # Make publicrepo private and ensure it disappears.
- model.repository.set_repository_visibility(publicrepo, 'private')
-
- response = conduct_api_call(cl, RepositoryList, 'GET', params).json
- repos = {r['namespace'] + '/' + r['name'] for r in response['repositories']}
- assert 'devtable/simple' in repos
- assert 'public/publicrepo' not in repos
-
-
-def test_list_repositories_last_modified(client):
- with client_with_identity('devtable', client) as cl:
- params = {
- 'namespace': 'devtable',
- 'last_modified': 'true',
- }
-
- response = conduct_api_call(cl, RepositoryList, 'GET', params).json
-
- for repo in response['repositories']:
- if repo['name'] != 'building':
- assert repo['last_modified'] is not None
-
-
-@pytest.mark.parametrize('repo_name, expected_status', [
- pytest.param('x' * 255, 201, id='Maximum allowed length'),
- pytest.param('x' * 256, 400, id='Over allowed length'),
- pytest.param('a|b', 400, id='Invalid name'),
-])
-def test_create_repository(repo_name, expected_status, client):
- with client_with_identity('devtable', client) as cl:
- body = {
- 'namespace': 'devtable',
- 'repository': repo_name,
- 'visibility': 'public',
- 'description': 'foo',
- }
-
- result = conduct_api_call(client, RepositoryList, 'post', None, body,
- expected_code=expected_status).json
- if expected_status == 201:
- assert result['name'] == repo_name
- assert model.repository.get_repository('devtable', repo_name).name == repo_name
-
-
-@pytest.mark.parametrize('has_tag_manifest', [
- True,
- False,
-])
-def test_get_repo(has_tag_manifest, client, initialized_db):
- with client_with_identity('devtable', client) as cl:
- if not has_tag_manifest:
- database.TagManifestLabelMap.delete().execute()
- database.TagManifestToManifest.delete().execute()
- database.TagManifestLabel.delete().execute()
- database.TagManifest.delete().execute()
-
- params = {'repository': 'devtable/simple'}
- response = conduct_api_call(cl, Repository, 'GET', params).json
- assert response['kind'] == 'image'
-
-
-def test_get_app_repo(client, initialized_db):
- with client_with_identity('devtable', client) as cl:
- devtable = model.user.get_user('devtable')
- repo = model.repository.create_repository('devtable', 'someappr', devtable,
- repo_kind='application')
-
- models_ref = appr_model.models_ref
- blob.get_or_create_blob('sha256:somedigest', 0, 'application/vnd.cnr.blob.v0.tar+gzip',
- ['local_us'], models_ref)
-
- release.create_app_release(repo, 'test',
- dict(mediaType='application/vnd.cnr.package-manifest.helm.v0.json'),
- 'sha256:somedigest', models_ref, False)
-
- channel.create_or_update_channel(repo, 'somechannel', 'test', models_ref)
-
- params = {'repository': 'devtable/someappr'}
- response = conduct_api_call(cl, Repository, 'GET', params).json
- assert response['kind'] == 'application'
- assert response['channels']
- assert response['releases']
-
-
-
-@pytest.mark.parametrize('state, can_write', [
- (database.RepositoryState.NORMAL, True),
- (database.RepositoryState.READ_ONLY, False),
- (database.RepositoryState.MIRROR, False),
-])
-def test_get_repo_state_can_write(state, can_write, client, initialized_db):
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/simple'}
- response = conduct_api_call(cl, Repository, 'GET', params).json
- assert response['can_write']
-
- repo = model.repository.get_repository('devtable', 'simple')
- repo.state = state
- repo.save()
-
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/simple'}
- response = conduct_api_call(cl, Repository, 'GET', params).json
- assert response['can_write'] == can_write
diff --git a/endpoints/api/test/test_repositorynotification.py b/endpoints/api/test/test_repositorynotification.py
deleted file mode 100644
index 06d65e2f0..000000000
--- a/endpoints/api/test/test_repositorynotification.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import pytest
-
-from mock import Mock, MagicMock
-
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.api.repositorynotification import RepositoryNotificationList, RepositoryNotification, TestRepositoryNotification
-from endpoints.test.shared import client_with_identity
-import endpoints.api.repositorynotification_models_interface as iface
-from test.fixtures import *
-
-@pytest.fixture()
-def authd_client(client):
- with client_with_identity('devtable', client) as cl:
- yield cl
-
-def mock_get_notification(uuid):
- mock_notification = MagicMock(iface.RepositoryNotification)
- if uuid == 'exists':
- mock_notification.return_value = iface.RepositoryNotification(
- 'exists',
- 'title',
- 'event_name',
- 'method_name',
- 'config_json',
- 'event_config_json',
- 2,
- )
- else:
- mock_notification.return_value = None
- return mock_notification
-
-@pytest.mark.parametrize('namespace,repository,body,expected_code',[
- ('devtable', 'simple', dict(config={'url': 'http://example.com'}, event='repo_push',
- method='webhook', eventConfig={}, title='test'), 201),
- ('devtable', 'simple', dict(config={'url': 'http://example.com'}, event='repo_mirror_sync_started',
- method='webhook', eventConfig={}, title='test'), 201),
- ('devtable', 'simple', dict(config={'url': 'http://example.com'}, event='repo_mirror_sync_success',
- method='webhook', eventConfig={}, title='test'), 201),
- ('devtable', 'simple', dict(config={'url': 'http://example.com'}, event='repo_mirror_sync_failed',
- method='webhook', eventConfig={}, title='test'), 201)
-])
-def test_create_repo_notification(namespace, repository, body, expected_code, authd_client):
- params = {'repository': namespace + '/' + repository}
- conduct_api_call(authd_client, RepositoryNotificationList, 'POST', params, body, expected_code=expected_code)
-
-@pytest.mark.parametrize('namespace,repository,expected_code',[
- ('devtable', 'simple', 200)
-])
-def test_list_repo_notifications(namespace, repository, expected_code, authd_client):
- params = {'repository': namespace + '/' + repository}
- resp = conduct_api_call(authd_client, RepositoryNotificationList, 'GET', params, expected_code=expected_code).json
- assert len(resp['notifications']) > 0
-
-@pytest.mark.parametrize('namespace,repository,uuid,expected_code',[
- ('devtable', 'simple', 'exists', 200),
- ('devtable', 'simple', 'not found', 404),
-])
-def test_get_repo_notification(namespace, repository, uuid, expected_code, authd_client, monkeypatch):
- monkeypatch.setattr('endpoints.api.repositorynotification.model.get_repo_notification', mock_get_notification(uuid))
- params = {'repository': namespace + '/' + repository, 'uuid': uuid}
- conduct_api_call(authd_client, RepositoryNotification, 'GET', params, expected_code=expected_code)
-
-@pytest.mark.parametrize('namespace,repository,uuid,expected_code',[
- ('devtable', 'simple', 'exists', 204),
- ('devtable', 'simple', 'not found', 400),
-])
-def test_delete_repo_notification(namespace, repository, uuid, expected_code, authd_client, monkeypatch):
- monkeypatch.setattr('endpoints.api.repositorynotification.model.delete_repo_notification', mock_get_notification(uuid))
- params = {'repository': namespace + '/' + repository, 'uuid': uuid}
- conduct_api_call(authd_client, RepositoryNotification, 'DELETE', params, expected_code=expected_code)
-
-
-@pytest.mark.parametrize('namespace,repository,uuid,expected_code',[
- ('devtable', 'simple', 'exists', 204),
- ('devtable', 'simple', 'not found', 400),
-])
-def test_reset_repo_noticiation(namespace, repository, uuid, expected_code, authd_client, monkeypatch):
- monkeypatch.setattr('endpoints.api.repositorynotification.model.reset_notification_number_of_failures', mock_get_notification(uuid))
- params = {'repository': namespace + '/' + repository, 'uuid': uuid}
- conduct_api_call(authd_client, RepositoryNotification, 'POST', params, expected_code=expected_code)
-
-
-@pytest.mark.parametrize('namespace,repository,uuid,expected_code',[
- ('devtable', 'simple', 'exists', 200),
- ('devtable', 'simple', 'not found', 400),
-])
-def test_test_repo_notification(namespace, repository, uuid, expected_code, authd_client, monkeypatch):
- monkeypatch.setattr('endpoints.api.repositorynotification.model.queue_test_notification', mock_get_notification(uuid))
- params = {'repository': namespace + '/' + repository, 'uuid': uuid}
- conduct_api_call(authd_client, TestRepositoryNotification, 'POST', params, expected_code=expected_code)
diff --git a/endpoints/api/test/test_robot.py b/endpoints/api/test/test_robot.py
deleted file mode 100644
index 7c5349549..000000000
--- a/endpoints/api/test/test_robot.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import pytest
-import json
-
-from data import model
-from endpoints.api import api
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.api.robot import UserRobot, OrgRobot, UserRobotList, OrgRobotList
-from endpoints.test.shared import client_with_identity
-from util.names import parse_robot_username
-
-from test.test_ldap import mock_ldap
-
-from test.fixtures import *
-
-@pytest.mark.parametrize('endpoint', [
- UserRobot,
- OrgRobot,
-])
-@pytest.mark.parametrize('body', [
- {},
- {'description': 'this is a description'},
- {'unstructured_metadata': {'foo': 'bar'}},
- {'description': 'this is a description', 'unstructured_metadata': {'foo': 'bar'}},
-])
-def test_create_robot_with_metadata(endpoint, body, client):
- with client_with_identity('devtable', client) as cl:
- # Create the robot with the specified body.
- conduct_api_call(cl, endpoint, 'PUT', {'orgname': 'buynlarge', 'robot_shortname': 'somebot'},
- body, expected_code=201)
-
- # Ensure the create succeeded.
- resp = conduct_api_call(cl, endpoint, 'GET', {
- 'orgname': 'buynlarge',
- 'robot_shortname': 'somebot',
- })
-
- body = body or {}
- assert resp.json['description'] == (body.get('description') or '')
- assert resp.json['unstructured_metadata'] == (body.get('unstructured_metadata') or {})
-
-
-@pytest.mark.parametrize('endpoint, params', [
- (UserRobot, {'robot_shortname': 'dtrobot'}),
- (OrgRobot, {'orgname': 'buynlarge', 'robot_shortname': 'coolrobot'}),
-])
-def test_retrieve_robot(endpoint, params, app, client):
- with client_with_identity('devtable', client) as cl:
- result = conduct_api_call(cl, endpoint, 'GET', params, None)
- assert result.json['token'] is not None
-
-
-@pytest.mark.parametrize('endpoint, params, bot_endpoint', [
- (UserRobotList, {}, UserRobot),
- (OrgRobotList, {'orgname': 'buynlarge'}, OrgRobot),
-])
-@pytest.mark.parametrize('include_token', [
- True,
- False,
-])
-@pytest.mark.parametrize('limit', [
- None,
- 1,
- 5,
-])
-def test_retrieve_robots(endpoint, params, bot_endpoint, include_token, limit, app, client):
- params['token'] = 'true' if include_token else 'false'
-
- if limit is not None:
- params['limit'] = limit
-
- with client_with_identity('devtable', client) as cl:
- result = conduct_api_call(cl, endpoint, 'GET', params, None)
-
- if limit is not None:
- assert len(result.json['robots']) <= limit
-
- for robot in result.json['robots']:
- assert (robot.get('token') is not None) == include_token
- if include_token:
- bot_params = dict(params)
- bot_params['robot_shortname'] = parse_robot_username(robot['name'])[1]
- result = conduct_api_call(cl, bot_endpoint, 'GET', bot_params, None)
- assert robot.get('token') == result.json['token']
-
-
-@pytest.mark.parametrize('username, is_admin', [
- ('devtable', True),
- ('reader', False),
-])
-@pytest.mark.parametrize('with_permissions', [
- True,
- False,
-])
-def test_retrieve_robots_token_permission(username, is_admin, with_permissions, app, client):
- with client_with_identity(username, client) as cl:
- params = {'orgname': 'buynlarge', 'token': 'true'}
- if with_permissions:
- params['permissions'] = 'true'
-
- result = conduct_api_call(cl, OrgRobotList, 'GET', params, None)
- assert result.json['robots']
- for robot in result.json['robots']:
- assert (robot.get('token') is not None) == is_admin
- assert (robot.get('repositories') is not None) == (is_admin and with_permissions)
diff --git a/endpoints/api/test/test_search.py b/endpoints/api/test/test_search.py
deleted file mode 100644
index 5e034934c..000000000
--- a/endpoints/api/test/test_search.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import pytest
-
-from playhouse.test_utils import assert_query_count
-
-from data import model, database
-from endpoints.api.search import ConductRepositorySearch, ConductSearch
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.test.shared import client_with_identity
-from test.fixtures import *
-
-@pytest.mark.parametrize('query', [
- (''),
- ('simple'),
- ('public'),
- ('repository'),
-])
-def test_repository_search(query, client):
- # Prime the caches.
- database.Repository.kind.get_id('image')
- database.Repository.kind.get_name(1)
-
- with client_with_identity('devtable', client) as cl:
- params = {'query': query}
- with assert_query_count(7):
- result = conduct_api_call(cl, ConductRepositorySearch, 'GET', params, None, 200).json
- assert result['start_index'] == 0
- assert result['page'] == 1
- assert len(result['results'])
-
-
-@pytest.mark.parametrize('query', [
- ('simple'),
- ('public'),
- ('repository'),
-])
-def test_search_query_count(query, client):
- with client_with_identity('devtable', client) as cl:
- params = {'query': query}
- with assert_query_count(10):
- result = conduct_api_call(cl, ConductSearch, 'GET', params, None, 200).json
- assert len(result['results'])
diff --git a/endpoints/api/test/test_secscan.py b/endpoints/api/test/test_secscan.py
deleted file mode 100644
index 40afa6ac3..000000000
--- a/endpoints/api/test/test_secscan.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import base64
-
-import pytest
-
-from data.registry_model import registry_model
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.api.secscan import RepositoryImageSecurity, RepositoryManifestSecurity
-
-from test.fixtures import *
-
-@pytest.mark.parametrize('endpoint', [
- RepositoryImageSecurity,
- RepositoryManifestSecurity,
-])
-def test_get_security_info_with_pull_secret(endpoint, client):
- repository_ref = registry_model.lookup_repository('devtable', 'simple')
- tag = registry_model.get_repo_tag(repository_ref, 'latest', include_legacy_image=True)
- manifest = registry_model.get_manifest_for_tag(tag, backfill_if_necessary=True)
-
- params = {
- 'repository': 'devtable/simple',
- 'imageid': tag.legacy_image.docker_image_id,
- 'manifestref': manifest.digest,
- }
-
- headers = {
- 'Authorization': 'Basic %s' % base64.b64encode('devtable:password'),
- }
-
- conduct_api_call(client, endpoint, 'GET', params, None, headers=headers, expected_code=200)
diff --git a/endpoints/api/test/test_security.py b/endpoints/api/test/test_security.py
deleted file mode 100644
index a2b18ef4c..000000000
--- a/endpoints/api/test/test_security.py
+++ /dev/null
@@ -1,1480 +0,0 @@
-from mock import patch
-
-import pytest
-from flask_principal import AnonymousIdentity
-
-from endpoints.api import api
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.test.shared import client_with_identity, toggle_feature
-
-from endpoints.api.appspecifictokens import *
-from endpoints.api.billing import *
-from endpoints.api.build import *
-from endpoints.api.discovery import *
-from endpoints.api.globalmessages import *
-from endpoints.api.image import *
-from endpoints.api.logs import *
-from endpoints.api.manifest import *
-from endpoints.api.organization import *
-from endpoints.api.permission import *
-from endpoints.api.prototype import *
-from endpoints.api.repoemail import *
-from endpoints.api.repository import *
-from endpoints.api.repositorynotification import *
-from endpoints.api.repotoken import *
-from endpoints.api.robot import *
-from endpoints.api.search import *
-from endpoints.api.secscan import *
-from endpoints.api.signing import *
-from endpoints.api.subscribe import *
-from endpoints.api.suconfig import *
-from endpoints.api.superuser import *
-from endpoints.api.tag import *
-from endpoints.api.team import *
-from endpoints.api.trigger import *
-from endpoints.api.user import *
-from endpoints.api.mirror import *
-
-from endpoints.api.repository import Repository
-
-from test.fixtures import *
-
-ORG_PARAMS = {'orgname': 'buynlarge'}
-TEAM_PARAMS = {'orgname': 'buynlarge', 'teamname': 'owners'}
-BUILD_PARAMS = {'build_uuid': 'test-1234'}
-REPO_PARAMS = {'repository': 'devtable/someapp'}
-SEARCH_PARAMS = {'query': ''}
-NOTIFICATION_PARAMS = {'namespace': 'devtable', 'repository': 'devtable/simple', 'uuid': 'some uuid'}
-TOKEN_PARAMS = {'token_uuid': 'someuuid'}
-TRIGGER_PARAMS = {'repository': 'devtable/simple', 'trigger_uuid': 'someuuid'}
-MANIFEST_PARAMS = {'repository': 'devtable/simple', 'manifestref': 'sha256:deadbeef'}
-EXPORTLOGS_PARAMS = {'callback_url': 'http://foo'}
-
-SECURITY_TESTS = [
- (AppTokens, 'GET', {}, {}, None, 401),
- (AppTokens, 'GET', {}, {}, 'freshuser', 200),
- (AppTokens, 'GET', {}, {}, 'reader', 200),
- (AppTokens, 'GET', {}, {}, 'devtable', 200),
-
- (AppTokens, 'POST', {}, {}, None, 401),
- (AppTokens, 'POST', {}, {}, 'freshuser', 400),
- (AppTokens, 'POST', {}, {}, 'reader', 400),
- (AppTokens, 'POST', {}, {}, 'devtable', 400),
-
- (AppToken, 'GET', TOKEN_PARAMS, {}, None, 401),
- (AppToken, 'GET', TOKEN_PARAMS, {}, 'freshuser', 404),
- (AppToken, 'GET', TOKEN_PARAMS, {}, 'reader', 404),
- (AppToken, 'GET', TOKEN_PARAMS, {}, 'devtable', 404),
-
- (AppToken, 'DELETE', TOKEN_PARAMS, {}, None, 401),
- (AppToken, 'DELETE', TOKEN_PARAMS, {}, 'freshuser', 404),
- (AppToken, 'DELETE', TOKEN_PARAMS, {}, 'reader', 404),
- (AppToken, 'DELETE', TOKEN_PARAMS, {}, 'devtable', 404),
-
- (RepositoryManifest, 'GET', MANIFEST_PARAMS, {}, None, 401),
- (RepositoryManifest, 'GET', MANIFEST_PARAMS, {}, 'freshuser', 403),
- (RepositoryManifest, 'GET', MANIFEST_PARAMS, {}, 'reader', 403),
- (RepositoryManifest, 'GET', MANIFEST_PARAMS, {}, 'devtable', 404),
-
- (OrganizationCollaboratorList, 'GET', ORG_PARAMS, None, None, 401),
- (OrganizationCollaboratorList, 'GET', ORG_PARAMS, None, 'freshuser', 403),
- (OrganizationCollaboratorList, 'GET', ORG_PARAMS, None, 'reader', 403),
- (OrganizationCollaboratorList, 'GET', ORG_PARAMS, None, 'devtable', 200),
-
- (OrganizationTeamSyncing, 'POST', TEAM_PARAMS, {}, None, 401),
- (OrganizationTeamSyncing, 'POST', TEAM_PARAMS, {}, 'freshuser', 403),
- (OrganizationTeamSyncing, 'POST', TEAM_PARAMS, {}, 'reader', 403),
- (OrganizationTeamSyncing, 'POST', TEAM_PARAMS, {}, 'devtable', 400),
-
- (OrganizationTeamSyncing, 'DELETE', TEAM_PARAMS, {}, None, 401),
- (OrganizationTeamSyncing, 'DELETE', TEAM_PARAMS, {}, 'freshuser', 403),
- (OrganizationTeamSyncing, 'DELETE', TEAM_PARAMS, {}, 'reader', 403),
- (OrganizationTeamSyncing, 'DELETE', TEAM_PARAMS, {}, 'devtable', 200),
-
- (ConductRepositorySearch, 'GET', SEARCH_PARAMS, None, None, 200),
- (ConductRepositorySearch, 'GET', SEARCH_PARAMS, None, 'freshuser', 200),
- (ConductRepositorySearch, 'GET', SEARCH_PARAMS, None, 'reader', 200),
- (ConductRepositorySearch, 'GET', SEARCH_PARAMS, None, 'devtable', 200),
-
- (SuperUserRepositoryBuildLogs, 'GET', BUILD_PARAMS, None, None, 401),
- (SuperUserRepositoryBuildLogs, 'GET', BUILD_PARAMS, None, 'freshuser', 403),
- (SuperUserRepositoryBuildLogs, 'GET', BUILD_PARAMS, None, 'reader', 403),
- (SuperUserRepositoryBuildLogs, 'GET', BUILD_PARAMS, None, 'devtable', 400),
-
- (SuperUserRepositoryBuildStatus, 'GET', BUILD_PARAMS, None, None, 401),
- (SuperUserRepositoryBuildStatus, 'GET', BUILD_PARAMS, None, 'freshuser', 403),
- (SuperUserRepositoryBuildStatus, 'GET', BUILD_PARAMS, None, 'reader', 403),
- (SuperUserRepositoryBuildStatus, 'GET', BUILD_PARAMS, None, 'devtable', 400),
-
- (SuperUserRepositoryBuildResource, 'GET', BUILD_PARAMS, None, None, 401),
- (SuperUserRepositoryBuildResource, 'GET', BUILD_PARAMS, None, 'freshuser', 403),
- (SuperUserRepositoryBuildResource, 'GET', BUILD_PARAMS, None, 'reader', 403),
- (SuperUserRepositoryBuildResource, 'GET', BUILD_PARAMS, None, 'devtable', 404),
-
- (RepositorySignatures, 'GET', REPO_PARAMS, {}, 'freshuser', 403),
- (RepositorySignatures, 'GET', REPO_PARAMS, {}, 'reader', 403),
- (RepositorySignatures, 'GET', REPO_PARAMS, {}, 'devtable', 404),
-
- (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, None, 401),
- (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'freshuser', 403),
- (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'reader', 403),
- (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'devtable', 400),
-
- (RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, None, 401),
- (RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'freshuser', 403),
- (RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'reader', 403),
- (RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'devtable', 404),
-
- (BuildTrigger, 'GET', TRIGGER_PARAMS, {}, None, 401),
- (BuildTrigger, 'GET', TRIGGER_PARAMS, {}, 'freshuser', 403),
- (BuildTrigger, 'GET', TRIGGER_PARAMS, {}, 'reader', 403),
- (BuildTrigger, 'GET', TRIGGER_PARAMS, {}, 'devtable', 404),
-
- (BuildTrigger, 'DELETE', TRIGGER_PARAMS, {}, None, 401),
- (BuildTrigger, 'DELETE', TRIGGER_PARAMS, {}, 'freshuser', 403),
- (BuildTrigger, 'DELETE', TRIGGER_PARAMS, {}, 'reader', 403),
- (BuildTrigger, 'DELETE', TRIGGER_PARAMS, {}, 'devtable', 404),
-
- (BuildTrigger, 'PUT', TRIGGER_PARAMS, {}, None, 401),
- (BuildTrigger, 'PUT', TRIGGER_PARAMS, {}, 'freshuser', 403),
- (BuildTrigger, 'PUT', TRIGGER_PARAMS, {}, 'reader', 403),
- (BuildTrigger, 'PUT', TRIGGER_PARAMS, {}, 'devtable', 400),
-
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'devtable/shared'}, None, None, 401),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'devtable/shared'}, None, 'devtable', 404),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'A2O9','repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'devtable','repository': 'devtable/shared'}, None, 'devtable', 200),
- (RepositoryUserTransitivePermission, 'GET', {'username': 'devtable','repository': 'devtable/nope'}, None, 'devtable', 404),
-
- (StarredRepositoryList, 'GET', None, None, None, 401),
- (StarredRepositoryList, 'GET', None, None, 'devtable', 200),
- (StarredRepositoryList, 'GET', None, None, 'freshuser', 200),
- (StarredRepositoryList, 'GET', None, None, 'reader', 200),
- (StarredRepositoryList, 'POST', None, {u'namespace': 'public', u'repository': 'publicrepo'}, None, 401),
- (StarredRepositoryList, 'POST', None, {u'namespace': 'public', u'repository': 'publicrepo'}, 'devtable', 201),
- (StarredRepositoryList, 'POST', None, {u'namespace': 'public', u'repository': 'publicrepo'}, 'freshuser', 201),
- (StarredRepositoryList, 'POST', None, {u'namespace': 'public', u'repository': 'publicrepo'}, 'reader', 201),
-
- (StarredRepository, 'DELETE', {'repository': 'public/publicrepo'}, None, None, 401),
- (StarredRepository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'devtable', 204),
- (StarredRepository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'freshuser', 204),
- (StarredRepository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'reader', 204),
-
- (UserNotification, 'GET', {'uuid': 'someuuid'}, None, None, 401),
- (UserNotification, 'GET', {'uuid': 'someuuid'}, None, 'devtable', 404),
- (UserNotification, 'GET', {'uuid': 'someuuid'}, None, 'freshuser', 404),
- (UserNotification, 'GET', {'uuid': 'someuuid'}, None, 'reader', 404),
- (UserNotification, 'PUT', {'uuid': 'someuuid'}, {}, None, 401),
- (UserNotification, 'PUT', {'uuid': 'someuuid'}, {}, 'devtable', 404),
- (UserNotification, 'PUT', {'uuid': 'someuuid'}, {}, 'freshuser', 404),
- (UserNotification, 'PUT', {'uuid': 'someuuid'}, {}, 'reader', 404),
-
- (UserInvoiceList, 'GET', None, None, None, 401),
- (UserInvoiceList, 'GET', None, None, 'devtable', 200),
- (UserInvoiceList, 'GET', None, None, 'freshuser', 404),
- (UserInvoiceList, 'GET', None, None, 'reader', 404),
-
- (PrivateRepositories, 'GET', None, None, None, 401),
- (PrivateRepositories, 'GET', None, None, 'devtable', 200),
- (PrivateRepositories, 'GET', None, None, 'freshuser', 200),
- (PrivateRepositories, 'GET', None, None, 'reader', 200),
-
- (ConvertToOrganization, 'POST', None, {u'adminPassword': 'IQTM', u'plan': '1RB4', u'adminUser': '44E8'}, None, 401),
- (ConvertToOrganization, 'POST', None, {u'adminPassword': 'IQTM', u'plan': '1RB4', u'adminUser': '44E8'}, 'devtable', 400),
- (ConvertToOrganization, 'POST', None, {u'adminPassword': 'IQTM', u'plan': '1RB4', u'adminUser': '44E8'}, 'freshuser', 400),
- (ConvertToOrganization, 'POST', None, {u'adminPassword': 'IQTM', u'plan': '1RB4', u'adminUser': '44E8'}, 'reader', 400),
-
- (UserRobotList, 'GET', None, None, None, 401),
- (UserRobotList, 'GET', None, None, 'devtable', 200),
- (UserRobotList, 'GET', None, None, 'freshuser', 200),
- (UserRobotList, 'GET', None, None, 'reader', 200),
-
- (UserCard, 'GET', None, None, None, 401),
- (UserCard, 'GET', None, None, 'devtable', 200),
- (UserCard, 'GET', None, None, 'freshuser', 200),
- (UserCard, 'GET', None, None, 'reader', 200),
- (UserCard, 'POST', None, {u'token': 'ORH4'}, None, 401),
-
- (UserPlan, 'GET', None, None, None, 401),
- (UserPlan, 'GET', None, None, 'devtable', 200),
- (UserPlan, 'GET', None, None, 'freshuser', 200),
- (UserPlan, 'GET', None, None, 'reader', 200),
- (UserPlan, 'PUT', None, {u'plan': '1QIK'}, None, 401),
-
- (UserLogs, 'GET', None, None, None, 401),
- (UserLogs, 'GET', None, None, 'devtable', 200),
- (UserLogs, 'GET', None, None, 'freshuser', 200),
- (UserLogs, 'GET', None, None, 'reader', 200),
-
- (OrganizationList, 'POST', None, {u'name': 'KSIS', u'email': 'DHVZ'}, None, 401),
- (OrganizationList, 'POST', None, {u'name': 'KSIS', u'email': 'DHVZ'}, 'devtable', 400),
- (OrganizationList, 'POST', None, {u'name': 'KSIS', u'email': 'DHVZ'}, 'freshuser', 400),
- (OrganizationList, 'POST', None, {u'name': 'KSIS', u'email': 'DHVZ'}, 'reader', 400),
-
- (Repository, 'GET', {'repository': 'public/publicrepo'}, None, None, 200),
- (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 200),
- (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 200),
- (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 200),
-
- (RepositoryList, 'GET', None, None, None, 400),
- (RepositoryList, 'GET', None, None, 'devtable', 400),
- (RepositoryList, 'GET', None, None, 'freshuser', 400),
- (RepositoryList, 'GET', None, None, 'reader', 400),
- (RepositoryList, 'POST', None, {u'repository': 'XZGB', u'visibility': u'public', u'description': '0O8U'}, None, 400),
- (RepositoryList, 'POST', None, {u'repository': 'XZGB', u'visibility': u'public', u'description': '0O8U'}, 'devtable', 201),
- (RepositoryList, 'POST', None, {u'repository': 'XZGB', u'visibility': u'public', u'description': '0O8U'}, 'freshuser', 201),
- (RepositoryList, 'POST', None, {u'repository': 'XZGB', u'visibility': u'public', u'description': '0O8U'}, 'reader', 201),
-
- (DiscoveryResource, 'GET', None, None, None, 200),
- (DiscoveryResource, 'GET', None, None, 'devtable', 200),
- (DiscoveryResource, 'GET', None, None, 'freshuser', 200),
- (DiscoveryResource, 'GET', None, None, 'reader', 200),
-
- (FileDropResource, 'POST', None, {u'mimeType': 'TKBX'}, None, 200),
- (FileDropResource, 'POST', None, {u'mimeType': 'TKBX'}, 'devtable', 200),
- (FileDropResource, 'POST', None, {u'mimeType': 'TKBX'}, 'freshuser', 200),
- (FileDropResource, 'POST', None, {u'mimeType': 'TKBX'}, 'reader', 200),
-
- (Recovery, 'POST', None, {u'email': '826S'}, None, 200),
- (Recovery, 'POST', None, {u'email': '826S'}, 'devtable', 200),
- (Recovery, 'POST', None, {u'email': '826S'}, 'freshuser', 200),
- (Recovery, 'POST', None, {u'email': '826S'}, 'reader', 200),
-
- (Signout, 'POST', None, None, None, 200),
- (Signout, 'POST', None, None, 'devtable', 200),
- (Signout, 'POST', None, None, 'freshuser', 200),
- (Signout, 'POST', None, None, 'reader', 200),
-
- (Signin, 'POST', None, {u'username': 'E9RY', u'password': 'LQ0N'}, None, 403),
- (Signin, 'POST', None, {u'username': 'E9RY', u'password': 'LQ0N'}, 'devtable', 403),
- (Signin, 'POST', None, {u'username': 'E9RY', u'password': 'LQ0N'}, 'freshuser', 403),
- (Signin, 'POST', None, {u'username': 'E9RY', u'password': 'LQ0N'}, 'reader', 403),
-
- (ExternalLoginInformation, 'POST', {'service_id': 'someservice'}, {}, None, 400),
- (ExternalLoginInformation, 'POST', {'service_id': 'someservice'}, {}, 'devtable', 400),
- (ExternalLoginInformation, 'POST', {'service_id': 'someservice'}, {}, 'freshuser', 400),
- (ExternalLoginInformation, 'POST', {'service_id': 'someservice'}, {}, 'reader', 400),
-
- (DetachExternal, 'POST', {'service_id': 'someservice'}, {}, None, 401),
- (DetachExternal, 'POST', {'service_id': 'someservice'}, {}, 'devtable', 200),
- (DetachExternal, 'POST', {'service_id': 'someservice'}, {}, 'freshuser', 200),
- (DetachExternal, 'POST', {'service_id': 'someservice'}, {}, 'reader', 200),
-
- (VerifyUser, 'POST', None, {u'password': 'LQ0N'}, None, 401),
- (VerifyUser, 'POST', None, {u'password': 'password'}, 'devtable', 200),
- (VerifyUser, 'POST', None, {u'password': 'LQ0N'}, 'freshuser', 403),
- (VerifyUser, 'POST', None, {u'password': 'LQ0N'}, 'reader', 403),
-
- (ClientKey, 'POST', None, {u'password': 'LQ0N'}, None, 401),
- (ClientKey, 'POST', None, {u'password': 'password'}, 'devtable', 200),
- (ClientKey, 'POST', None, {u'password': 'LQ0N'}, 'freshuser', 400),
- (ClientKey, 'POST', None, {u'password': 'password'}, 'reader', 200),
-
- (ListPlans, 'GET', None, None, None, 200),
- (ListPlans, 'GET', None, None, 'devtable', 200),
- (ListPlans, 'GET', None, None, 'freshuser', 200),
- (ListPlans, 'GET', None, None, 'reader', 200),
-
- (User, 'GET', None, None, None, 401),
- (User, 'GET', None, None, 'devtable', 200),
- (User, 'GET', None, None, 'freshuser', 200),
- (User, 'GET', None, None, 'reader', 200),
- (User, 'POST', None, {u'username': 'T946', u'password': '0SG4', u'email': 'MENT'}, None, 400),
- (User, 'POST', None, {u'username': 'T946', u'password': '0SG4', u'email': 'MENT'}, 'devtable', 400),
- (User, 'POST', None, {u'username': 'T946', u'password': '0SG4', u'email': 'MENT'}, 'freshuser', 400),
- (User, 'POST', None, {u'username': 'T946', u'password': '0SG4', u'email': 'MENT'}, 'reader', 400),
- (User, 'PUT', None, {}, None, 401),
- (User, 'PUT', None, {}, 'devtable', 200),
- (User, 'PUT', None, {}, 'freshuser', 200),
- (User, 'PUT', None, {}, 'reader', 200),
- (User, 'DELETE', None, {}, None, 401),
- (User, 'DELETE', None, {}, 'devtable', 400),
- (User, 'DELETE', None, {}, 'freshuser', 204),
- (User, 'DELETE', None, {}, 'reader', 204),
-
- (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, None, 401),
- (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'devtable', 400),
- (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'freshuser', 403),
- (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'reader', 403),
- (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, None, 401),
- (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'devtable', 200),
- (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'freshuser', 403),
- (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'readers'}, None, 'reader', 403),
- (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, None, 401),
- (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'devtable', 400),
- (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'freshuser', 403),
- (TeamMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'reader', 403),
- (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, None, 401),
- (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'devtable', 400),
- (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'freshuser', 403),
- (TeamMember, 'PUT', {'orgname': 'buynlarge', 'membername': 'devtable', 'teamname': 'owners'}, None, 'reader', 403),
-
- (TeamPermissions, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, None, 401),
- (TeamPermissions, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'devtable', 200),
- (TeamPermissions, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'freshuser', 403),
- (TeamPermissions, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'reader', 403),
-
- (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, None, 401),
- (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'devtable', 200),
- (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'freshuser', 403),
- (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'reader', 200),
- (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, None, 401),
- (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'devtable', 200),
- (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'freshuser', 403),
- (TeamMemberList, 'GET', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'reader', 403),
-
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'public/publicrepo'}, {u'role': u'read'}, None, 401),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'devtable', 403),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'reader', 403),
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'devtable', 400),
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'devtable', 400),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'devtable/shared'}, {u'role': u'read'}, None, 401),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'devtable', 400),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'reader', 403),
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryUserPermission, 'DELETE', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryUserPermission, 'GET', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, None, 401),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'devtable', 400),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryUserPermission, 'PUT', {'username': 'A2O9', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'reader', 403),
-
- (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, None, 401),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'devtable', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, None, 401),
- (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'devtable', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'readers'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'readers'}, {u'role': u'read'}, None, 401),
- (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'devtable', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'reader', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, None, 401),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'devtable', 400),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, None, 401),
- (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'devtable', 400),
- (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'readers'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'readers'}, {u'role': u'read'}, None, 401),
- (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'readers'}, {u'role': u'read'}, 'devtable', 400),
- (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'readers'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'readers'}, {u'role': u'read'}, 'reader', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, None, 401),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'devtable', 204),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, None, 401),
- (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'devtable', 200),
- (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, {u'role': u'read'}, None, 401),
- (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'devtable', 200),
- (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'readers'}, {u'role': u'read'}, 'reader', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, None, 401),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'devtable', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, None, 401),
- (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'devtable', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'public/publicrepo', 'teamname': 'owners'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'owners'}, {u'role': u'read'}, None, 401),
- (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'devtable', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'public/publicrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'reader', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, None, 401),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'devtable', 400),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, None, 401),
- (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'devtable', 400),
- (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'devtable/shared', 'teamname': 'owners'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'owners'}, {u'role': u'read'}, None, 401),
- (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'owners'}, {u'role': u'read'}, 'devtable', 400),
- (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'owners'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'devtable/shared', 'teamname': 'owners'}, {u'role': u'read'}, 'reader', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, None, 401),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'devtable', 400),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'DELETE', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, None, 401),
- (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'devtable', 400),
- (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'freshuser', 403),
- (RepositoryTeamPermission, 'GET', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, None, 'reader', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, {u'role': u'read'}, None, 401),
- (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'devtable', 200),
- (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryTeamPermission, 'PUT', {'repository': 'buynlarge/orgrepo', 'teamname': 'owners'}, {u'role': u'read'}, 'reader', 403),
-
- (BuildTriggerActivate, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, None, 401),
- (BuildTriggerActivate, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'devtable', 403),
- (BuildTriggerActivate, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
- (BuildTriggerActivate, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
- (BuildTriggerActivate, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, None, 401),
- (BuildTriggerActivate, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {'config': {}}, 'devtable', 404),
- (BuildTriggerActivate, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
- (BuildTriggerActivate, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
- (BuildTriggerActivate, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, None, 401),
- (BuildTriggerActivate, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {'config': {}}, 'devtable', 404),
- (BuildTriggerActivate, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
- (BuildTriggerActivate, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
-
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, None, 401),
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'devtable', 403),
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'public/publicrepo', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, None, 401),
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {'config': {}}, 'devtable', 404),
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'devtable/shared', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, None, 401),
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {'config': {}}, 'devtable', 404),
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, 'freshuser', 403),
- (BuildTriggerFieldValues, 'POST', {'field_name': 'test_field', 'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'SWO1'}, {}, 'reader', 403),
-
- (BuildTriggerSources, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '831C'}, None, None, 401),
- (BuildTriggerSources, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '831C'}, {'namespace': 'foo'}, 'devtable', 403),
- (BuildTriggerSources, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '831C'}, None, 'freshuser', 403),
- (BuildTriggerSources, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '831C'}, None, 'reader', 403),
- (BuildTriggerSources, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '831C'}, None, None, 401),
- (BuildTriggerSources, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '831C'}, {'namespace': 'foo'}, 'devtable', 404),
- (BuildTriggerSources, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '831C'}, None, 'freshuser', 403),
- (BuildTriggerSources, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '831C'}, None, 'reader', 403),
- (BuildTriggerSources, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '831C'}, None, None, 401),
- (BuildTriggerSources, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '831C'}, {'namespace': 'foo'}, 'devtable', 404),
- (BuildTriggerSources, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '831C'}, None, 'freshuser', 403),
- (BuildTriggerSources, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '831C'}, None, 'reader', 403),
-
- (BuildTriggerSubdirs, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '4I2Y'}, {}, None, 401),
- (BuildTriggerSubdirs, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '4I2Y'}, {}, 'devtable', 403),
- (BuildTriggerSubdirs, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '4I2Y'}, {}, 'freshuser', 403),
- (BuildTriggerSubdirs, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '4I2Y'}, {}, 'reader', 403),
- (BuildTriggerSubdirs, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '4I2Y'}, {}, None, 401),
- (BuildTriggerSubdirs, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '4I2Y'}, {}, 'devtable', 404),
- (BuildTriggerSubdirs, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '4I2Y'}, {}, 'freshuser', 403),
- (BuildTriggerSubdirs, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '4I2Y'}, {}, 'reader', 403),
- (BuildTriggerSubdirs, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '4I2Y'}, {}, None, 401),
- (BuildTriggerSubdirs, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '4I2Y'}, {}, 'devtable', 404),
- (BuildTriggerSubdirs, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '4I2Y'}, {}, 'freshuser', 403),
- (BuildTriggerSubdirs, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '4I2Y'}, {}, 'reader', 403),
-
- (TriggerBuildList, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'ZM1W'}, None, None, 401),
- (TriggerBuildList, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'ZM1W'}, None, 'devtable', 403),
- (TriggerBuildList, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'ZM1W'}, None, 'freshuser', 403),
- (TriggerBuildList, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'ZM1W'}, None, 'reader', 403),
- (TriggerBuildList, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'ZM1W'}, None, None, 401),
- (TriggerBuildList, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'ZM1W'}, None, 'devtable', 200),
- (TriggerBuildList, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'ZM1W'}, None, 'freshuser', 403),
- (TriggerBuildList, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'ZM1W'}, None, 'reader', 403),
- (TriggerBuildList, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'ZM1W'}, None, None, 401),
- (TriggerBuildList, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'ZM1W'}, None, 'devtable', 200),
- (TriggerBuildList, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'ZM1W'}, None, 'freshuser', 403),
- (TriggerBuildList, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'ZM1W'}, None, 'reader', 403),
-
- (ActivateBuildTrigger, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, None, 401),
- (ActivateBuildTrigger, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, 'devtable', 403),
- (ActivateBuildTrigger, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
- (ActivateBuildTrigger, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
- (ActivateBuildTrigger, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, None, 401),
- (ActivateBuildTrigger, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, {}, 'devtable', 404),
- (ActivateBuildTrigger, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
- (ActivateBuildTrigger, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
- (ActivateBuildTrigger, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, None, 401),
- (ActivateBuildTrigger, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, {}, 'devtable', 404),
- (ActivateBuildTrigger, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
- (ActivateBuildTrigger, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
-
- (BuildTriggerAnalyze, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, None, 401),
- (BuildTriggerAnalyze, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, {'config': {}}, 'devtable', 403),
- (BuildTriggerAnalyze, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
- (BuildTriggerAnalyze, 'POST', {'repository': 'public/publicrepo', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
- (BuildTriggerAnalyze, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, None, 401),
- (BuildTriggerAnalyze, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, {'config': {}}, 'devtable', 404),
- (BuildTriggerAnalyze, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
- (BuildTriggerAnalyze, 'POST', {'repository': 'devtable/shared', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
- (BuildTriggerAnalyze, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, None, 401),
- (BuildTriggerAnalyze, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, {'config': {}}, 'devtable', 404),
- (BuildTriggerAnalyze, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, 'freshuser', 403),
- (BuildTriggerAnalyze, 'POST', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': '0BYE'}, None, 'reader', 403),
-
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, None, 400),
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'devtable', 400),
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'freshuser', 400),
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'reader', 400),
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'devtable', 400),
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'reader', 400),
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryBuildStatus, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 400),
-
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, None, 404),
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'devtable', 404),
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'freshuser', 404),
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'reader', 404),
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'devtable', 404),
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'reader', 404),
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryBuildResource, 'GET', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 404),
-
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'devtable', 404),
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryBuildResource, 'DELETE', {'build_uuid': 'FG86', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
-
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'devtable/shared'}, None, 'devtable', 400),
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryBuildLogs, 'GET', {'build_uuid': 'S5J8', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
-
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, None, 200),
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'devtable', 200),
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'freshuser', 200),
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'reader', 200),
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, None, 401),
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'devtable', 200),
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'reader', 200),
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (ListRepositoryTags, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 200),
-
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, None, 404),
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'devtable', 404),
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'freshuser', 404),
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'public/publicrepo'}, None, 'reader', 404),
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'devtable', 404),
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'devtable/shared'}, None, 'reader', 404),
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryTagImages, 'GET', {'tag': 'TN96', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 404),
-
- (PermissionPrototype, 'DELETE', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, None, None, 401),
- (PermissionPrototype, 'DELETE', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, None, 'devtable', 404),
- (PermissionPrototype, 'DELETE', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, None, 'freshuser', 403),
- (PermissionPrototype, 'DELETE', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, None, 'reader', 403),
- (PermissionPrototype, 'PUT', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, {u'role': u'read'}, None, 401),
- (PermissionPrototype, 'PUT', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, {u'role': u'read'}, 'devtable', 404),
- (PermissionPrototype, 'PUT', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, {u'role': u'read'}, 'freshuser', 403),
- (PermissionPrototype, 'PUT', {'orgname': 'buynlarge', 'prototypeid': 'L24B'}, {u'role': u'read'}, 'reader', 403),
-
- (OrganizationMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, None, 401),
- (OrganizationMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'devtable', 404),
- (OrganizationMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'freshuser', 403),
- (OrganizationMember, 'DELETE', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'reader', 403),
- (OrganizationMember, 'GET', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, None, 401),
- (OrganizationMember, 'GET', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'devtable', 404),
- (OrganizationMember, 'GET', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'freshuser', 403),
- (OrganizationMember, 'GET', {'orgname': 'buynlarge', 'membername': 'someuser'}, None, 'reader', 403),
-
- (OrgRobot, 'DELETE', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, None, 401),
- (OrgRobot, 'DELETE', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'devtable', 400),
- (OrgRobot, 'DELETE', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'freshuser', 403),
- (OrgRobot, 'DELETE', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'reader', 403),
- (OrgRobot, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, None, 401),
- (OrgRobot, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'devtable', 400),
- (OrgRobot, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'freshuser', 403),
- (OrgRobot, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, None, 'reader', 403),
- (OrgRobot, 'PUT', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, {}, None, 401),
- (OrgRobot, 'PUT', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, {}, 'devtable', 400),
- (OrgRobot, 'PUT', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, {}, 'freshuser', 403),
- (OrgRobot, 'PUT', {'orgname': 'buynlarge', 'robot_shortname': 'Z7PD'}, {}, 'reader', 403),
-
- (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, None, 401),
- (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'devtable', 204),
- (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'freshuser', 403),
- (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'readers'}, None, 'reader', 403),
- (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'readers'}, {u'role': u'member'}, None, 401),
- (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'readers'}, {u'role': u'member'}, 'devtable', 200),
- (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'readers'}, {u'role': u'member'}, 'freshuser', 403),
- (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'readers'}, {u'role': u'member'}, 'reader', 403),
- (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, None, 401),
- (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'devtable', 400),
- (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'freshuser', 403),
- (OrganizationTeam, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners'}, None, 'reader', 403),
- (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners'}, {u'role': u'member'}, None, 401),
- (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners'}, {u'role': u'member'}, 'devtable', 400),
- (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners'}, {u'role': u'member'}, 'freshuser', 403),
- (OrganizationTeam, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners'}, {u'role': u'member'}, 'reader', 403),
-
- (RepositoryTeamPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryTeamPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryTeamPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryTeamPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryTeamPermissionList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryTeamPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
- (RepositoryTeamPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryTeamPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryTeamPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryTeamPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
- (RepositoryTeamPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryTeamPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
-
- (RepositoryUserPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryUserPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryUserPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryUserPermissionList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryUserPermissionList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryUserPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
- (RepositoryUserPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryUserPermissionList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryUserPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryUserPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
- (RepositoryUserPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryUserPermissionList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
-
- (BuildTrigger, 'DELETE', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, None, 401),
- (BuildTrigger, 'DELETE', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'devtable', 403),
- (BuildTrigger, 'DELETE', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
- (BuildTrigger, 'DELETE', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
- (BuildTrigger, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, None, 401),
- (BuildTrigger, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'devtable', 403),
- (BuildTrigger, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
- (BuildTrigger, 'GET', {'repository': 'public/publicrepo', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
- (BuildTrigger, 'DELETE', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, None, 401),
- (BuildTrigger, 'DELETE', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'devtable', 404),
- (BuildTrigger, 'DELETE', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
- (BuildTrigger, 'DELETE', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
- (BuildTrigger, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, None, 401),
- (BuildTrigger, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'devtable', 404),
- (BuildTrigger, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
- (BuildTrigger, 'GET', {'repository': 'devtable/shared', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
- (BuildTrigger, 'DELETE', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, None, 401),
- (BuildTrigger, 'DELETE', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'devtable', 404),
- (BuildTrigger, 'DELETE', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
- (BuildTrigger, 'DELETE', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
- (BuildTrigger, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, None, 401),
- (BuildTrigger, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'devtable', 404),
- (BuildTrigger, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'freshuser', 403),
- (BuildTrigger, 'GET', {'repository': 'buynlarge/orgrepo', 'trigger_uuid': 'D6TI'}, None, 'reader', 403),
-
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'devtable', 400),
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'devtable', 404),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryNotification, 'DELETE', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryNotification, 'GET', {'uuid': 'QFAT', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
-
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'public/publicrepo'}, {u'role': u'read'}, None, 401),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'devtable', 403),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'public/publicrepo'}, {u'role': u'read'}, 'reader', 403),
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'devtable', 410),
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'devtable', 410),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'devtable/shared'}, {u'role': u'read'}, None, 401),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'devtable', 410),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'devtable/shared'}, {u'role': u'read'}, 'reader', 403),
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 410),
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryToken, 'DELETE', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 410),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryToken, 'GET', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, None, 401),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'devtable', 410),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'freshuser', 403),
- (RepositoryToken, 'PUT', {'code': 'UJQB', 'repository': 'buynlarge/orgrepo'}, {u'role': u'read'}, 'reader', 403),
-
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'public/publicrepo'}, None, None, 404),
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'public/publicrepo'}, None, 'devtable', 404),
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'public/publicrepo'}, None, 'freshuser', 404),
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'public/publicrepo'}, None, 'reader', 404),
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'devtable/shared'}, None, 'devtable', 404),
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'devtable/shared'}, None, 'reader', 404),
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryImage, 'GET', {'image_id': '5AVQ', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 404),
-
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, None, 401),
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'devtable', 403),
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'freshuser', 403),
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'reader', 403),
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, None, 401),
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'devtable', 404),
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'freshuser', 403),
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'reader', 403),
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, None, 401),
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'devtable', 404),
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'freshuser', 403),
- (RestoreTag, 'POST', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'reader', 403),
-
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, None, 401),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'devtable', 403),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'freshuser', 403),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'public/publicrepo'}, {u'image': 'WXNG'}, 'reader', 403),
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'devtable/shared'}, None, 'devtable', 400),
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, None, 401),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'devtable', 404),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'freshuser', 403),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'devtable/shared'}, {u'image': 'WXNG'}, 'reader', 403),
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 400),
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryTag, 'DELETE', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, None, 401),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'devtable', 404),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'freshuser', 403),
- (RepositoryTag, 'PUT', {'tag': 'HP8R', 'repository': 'buynlarge/orgrepo'}, {u'image': 'WXNG'}, 'reader', 403),
-
- (PermissionPrototypeList, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
- (PermissionPrototypeList, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (PermissionPrototypeList, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
- (PermissionPrototypeList, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
- (PermissionPrototypeList, 'POST', {'orgname': 'buynlarge'}, {u'role': u'read', u'delegate': {u'kind': u'user', u'name': '7DGP'}}, None, 401),
- (PermissionPrototypeList, 'POST', {'orgname': 'buynlarge'}, {u'role': u'read', u'delegate': {u'kind': u'user', u'name': '7DGP'}}, 'devtable', 400),
- (PermissionPrototypeList, 'POST', {'orgname': 'buynlarge'}, {u'role': u'read', u'delegate': {u'kind': u'user', u'name': '7DGP'}}, 'freshuser', 403),
- (PermissionPrototypeList, 'POST', {'orgname': 'buynlarge'}, {u'role': u'read', u'delegate': {u'kind': u'user', u'name': '7DGP'}}, 'reader', 403),
-
- (OrganizationInvoiceList, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
- (OrganizationInvoiceList, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (OrganizationInvoiceList, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
- (OrganizationInvoiceList, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
-
- (OrgPrivateRepositories, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
- (OrgPrivateRepositories, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (OrgPrivateRepositories, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
- (OrgPrivateRepositories, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
-
- (OrganizationMemberList, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
- (OrganizationMemberList, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (OrganizationMemberList, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
- (OrganizationMemberList, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
-
- (OrgRobotList, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
- (OrgRobotList, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (OrgRobotList, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
- (OrgRobotList, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 200),
-
- (OrganizationCard, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
- (OrganizationCard, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (OrganizationCard, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
- (OrganizationCard, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
- (OrganizationCard, 'POST', {'orgname': 'buynlarge'}, {u'token': '4VFR'}, None, 401),
- (OrganizationCard, 'POST', {'orgname': 'buynlarge'}, {u'token': '4VFR'}, 'freshuser', 403),
- (OrganizationCard, 'POST', {'orgname': 'buynlarge'}, {u'token': '4VFR'}, 'reader', 403),
-
- (OrganizationPlan, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
- (OrganizationPlan, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (OrganizationPlan, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
- (OrganizationPlan, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
- (OrganizationPlan, 'PUT', {'orgname': 'buynlarge'}, {u'plan': 'WWEI'}, None, 401),
- (OrganizationPlan, 'PUT', {'orgname': 'buynlarge'}, {u'plan': 'WWEI'}, 'freshuser', 403),
- (OrganizationPlan, 'PUT', {'orgname': 'buynlarge'}, {u'plan': 'WWEI'}, 'reader', 403),
-
- (OrgLogs, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
- (OrgLogs, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (OrgLogs, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
- (OrgLogs, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
-
- (RepositoryVisibility, 'POST', {'repository': 'public/publicrepo'}, {u'visibility': u'public'}, None, 401),
- (RepositoryVisibility, 'POST', {'repository': 'public/publicrepo'}, {u'visibility': u'public'}, 'devtable', 403),
- (RepositoryVisibility, 'POST', {'repository': 'public/publicrepo'}, {u'visibility': u'public'}, 'freshuser', 403),
- (RepositoryVisibility, 'POST', {'repository': 'public/publicrepo'}, {u'visibility': u'public'}, 'reader', 403),
- (RepositoryVisibility, 'POST', {'repository': 'devtable/shared'}, {u'visibility': u'public'}, None, 401),
- (RepositoryVisibility, 'POST', {'repository': 'devtable/shared'}, {u'visibility': u'public'}, 'devtable', 200),
- (RepositoryVisibility, 'POST', {'repository': 'devtable/shared'}, {u'visibility': u'public'}, 'freshuser', 403),
- (RepositoryVisibility, 'POST', {'repository': 'devtable/shared'}, {u'visibility': u'public'}, 'reader', 403),
- (RepositoryVisibility, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'visibility': u'public'}, None, 401),
- (RepositoryVisibility, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'visibility': u'public'}, 'devtable', 200),
- (RepositoryVisibility, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'visibility': u'public'}, 'freshuser', 403),
- (RepositoryVisibility, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'visibility': u'public'}, 'reader', 403),
-
- (BuildTriggerList, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
- (BuildTriggerList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (BuildTriggerList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (BuildTriggerList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
- (BuildTriggerList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
- (BuildTriggerList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
- (BuildTriggerList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (BuildTriggerList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
- (BuildTriggerList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (BuildTriggerList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
- (BuildTriggerList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (BuildTriggerList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
-
- (RepositoryNotificationList, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryNotificationList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryNotificationList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryNotificationList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryNotificationList, 'POST', {'repository': 'public/publicrepo'}, {}, None, 401),
- (RepositoryNotificationList, 'POST', {'repository': 'public/publicrepo'}, {}, 'devtable', 403),
- (RepositoryNotificationList, 'POST', {'repository': 'public/publicrepo'}, {}, 'freshuser', 403),
- (RepositoryNotificationList, 'POST', {'repository': 'public/publicrepo'}, {}, 'reader', 403),
- (RepositoryNotificationList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryNotificationList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
- (RepositoryNotificationList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryNotificationList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryNotificationList, 'POST', {'repository': 'devtable/shared'}, {}, None, 401),
- (RepositoryNotificationList, 'POST', {'repository': 'devtable/shared'}, {'config': {'email': 'a@b.com'}, 'event': 'repo_push', 'method': 'email'}, 'devtable', 400),
- (RepositoryNotificationList, 'POST', {'repository': 'devtable/shared'}, {}, 'freshuser', 403),
- (RepositoryNotificationList, 'POST', {'repository': 'devtable/shared'}, {}, 'reader', 403),
- (RepositoryNotificationList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryNotificationList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
- (RepositoryNotificationList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryNotificationList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
- (RepositoryNotificationList, 'POST', {'repository': 'buynlarge/orgrepo'}, {}, None, 401),
- (RepositoryNotificationList, 'POST', {'repository': 'buynlarge/orgrepo'}, {'config': {'email': 'a@b.com'}, 'event': 'repo_push', 'method': 'email'}, 'devtable', 400),
- (RepositoryNotificationList, 'POST', {'repository': 'buynlarge/orgrepo'}, {}, 'freshuser', 403),
- (RepositoryNotificationList, 'POST', {'repository': 'buynlarge/orgrepo'}, {}, 'reader', 403),
-
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, {}, None, 401),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, {}, 'devtable', 403),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, {}, 'freshuser', 403),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'public/publicrepo'}, {}, 'reader', 403),
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, None, 'devtable', 404),
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, {}, None, 401),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, {}, 'devtable', 200),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, {}, 'freshuser', 403),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'devtable/shared'}, {}, 'reader', 403),
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, None, 'devtable', 404),
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryAuthorizedEmail, 'GET', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, {}, None, 401),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, {}, 'devtable', 200),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, {}, 'freshuser', 403),
- (RepositoryAuthorizedEmail, 'POST', {'email': 'jschorr@devtable.com', 'repository': 'buynlarge/orgrepo'}, {}, 'reader', 403),
-
- (RepositoryTokenList, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryTokenList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryTokenList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryTokenList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryTokenList, 'POST', {'repository': 'public/publicrepo'}, {u'friendlyName': 'R1CN'}, None, 401),
- (RepositoryTokenList, 'POST', {'repository': 'public/publicrepo'}, {u'friendlyName': 'R1CN'}, 'devtable', 403),
- (RepositoryTokenList, 'POST', {'repository': 'public/publicrepo'}, {u'friendlyName': 'R1CN'}, 'freshuser', 403),
- (RepositoryTokenList, 'POST', {'repository': 'public/publicrepo'}, {u'friendlyName': 'R1CN'}, 'reader', 403),
- (RepositoryTokenList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryTokenList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 410),
- (RepositoryTokenList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryTokenList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryTokenList, 'POST', {'repository': 'devtable/shared'}, {u'friendlyName': 'R1CN'}, None, 401),
- (RepositoryTokenList, 'POST', {'repository': 'devtable/shared'}, {u'friendlyName': 'R1CN'}, 'devtable', 410),
- (RepositoryTokenList, 'POST', {'repository': 'devtable/shared'}, {u'friendlyName': 'R1CN'}, 'freshuser', 403),
- (RepositoryTokenList, 'POST', {'repository': 'devtable/shared'}, {u'friendlyName': 'R1CN'}, 'reader', 403),
- (RepositoryTokenList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryTokenList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 410),
- (RepositoryTokenList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryTokenList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
- (RepositoryTokenList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'friendlyName': 'R1CN'}, None, 401),
- (RepositoryTokenList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'friendlyName': 'R1CN'}, 'devtable', 410),
- (RepositoryTokenList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'friendlyName': 'R1CN'}, 'freshuser', 403),
- (RepositoryTokenList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'friendlyName': 'R1CN'}, 'reader', 403),
-
- (RepositoryBuildList, 'GET', {'repository': 'public/publicrepo'}, None, None, 200),
- (RepositoryBuildList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 200),
- (RepositoryBuildList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 200),
- (RepositoryBuildList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 200),
- (RepositoryBuildList, 'POST', {'repository': 'public/publicrepo'}, {u'file_id': 'UX7K'}, None, 401),
- (RepositoryBuildList, 'POST', {'repository': 'public/publicrepo'}, {u'file_id': 'UX7K'}, 'devtable', 403),
- (RepositoryBuildList, 'POST', {'repository': 'public/publicrepo'}, {u'file_id': 'UX7K'}, 'freshuser', 403),
- (RepositoryBuildList, 'POST', {'repository': 'public/publicrepo'}, {u'file_id': 'UX7K'}, 'reader', 403),
- (RepositoryBuildList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryBuildList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
- (RepositoryBuildList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryBuildList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 200),
- (RepositoryBuildList, 'POST', {'repository': 'devtable/shared'}, {u'file_id': 'UX7K'}, None, 401),
- (RepositoryBuildList, 'POST', {'repository': 'devtable/shared'}, {u'file_id': 'UX7K'}, 'devtable', 201),
- (RepositoryBuildList, 'POST', {'repository': 'devtable/shared'}, {u'file_id': 'UX7K'}, 'freshuser', 403),
- (RepositoryBuildList, 'POST', {'repository': 'devtable/shared'}, {u'file_id': 'UX7K'}, 'reader', 403),
- (RepositoryBuildList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryBuildList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
- (RepositoryBuildList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryBuildList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 200),
- (RepositoryBuildList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'file_id': 'UX7K'}, None, 401),
- (RepositoryBuildList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'file_id': 'UX7K'}, 'devtable', 201),
- (RepositoryBuildList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'file_id': 'UX7K'}, 'freshuser', 403),
- (RepositoryBuildList, 'POST', {'repository': 'buynlarge/orgrepo'}, {u'file_id': 'UX7K'}, 'reader', 403),
-
- (RepositoryImageList, 'GET', {'repository': 'public/publicrepo'}, None, None, 200),
- (RepositoryImageList, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 200),
- (RepositoryImageList, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 200),
- (RepositoryImageList, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 200),
- (RepositoryImageList, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryImageList, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
- (RepositoryImageList, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryImageList, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 200),
- (RepositoryImageList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryImageList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
- (RepositoryImageList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryImageList, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 200),
-
- (RepositoryLogs, 'GET', {'repository': 'public/publicrepo'}, None, None, 401),
- (RepositoryLogs, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (RepositoryLogs, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (RepositoryLogs, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 403),
- (RepositoryLogs, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
- (RepositoryLogs, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
- (RepositoryLogs, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (RepositoryLogs, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 403),
- (RepositoryLogs, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (RepositoryLogs, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
- (RepositoryLogs, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (RepositoryLogs, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
-
- (UserRobot, 'DELETE', {'robot_shortname': 'robotname'}, None, None, 401),
- (UserRobot, 'DELETE', {'robot_shortname': 'robotname'}, None, 'devtable', 400),
- (UserRobot, 'DELETE', {'robot_shortname': 'robotname'}, None, 'freshuser', 400),
- (UserRobot, 'DELETE', {'robot_shortname': 'robotname'}, None, 'reader', 400),
- (UserRobot, 'GET', {'robot_shortname': 'robotname'}, None, None, 401),
- (UserRobot, 'GET', {'robot_shortname': 'robotname'}, None, 'devtable', 400),
- (UserRobot, 'GET', {'robot_shortname': 'robotname'}, None, 'freshuser', 400),
- (UserRobot, 'GET', {'robot_shortname': 'robotname'}, None, 'reader', 400),
- (UserRobot, 'PUT', {'robot_shortname': 'robotname'}, {}, None, 401),
- (UserRobot, 'PUT', {'robot_shortname': 'robotname'}, {}, 'devtable', 201),
- (UserRobot, 'PUT', {'robot_shortname': 'robotname'}, {}, 'freshuser', 201),
- (UserRobot, 'PUT', {'robot_shortname': 'robotname'}, {}, 'reader', 201),
-
- (RegenerateUserRobot, 'POST', {'robot_shortname': 'robotname'}, None, None, 401),
- (RegenerateUserRobot, 'POST', {'robot_shortname': 'robotname'}, None, 'devtable', 400),
- (RegenerateUserRobot, 'POST', {'robot_shortname': 'robotname'}, None, 'freshuser', 400),
- (RegenerateUserRobot, 'POST', {'robot_shortname': 'robotname'}, None, 'reader', 400),
-
- (RegenerateOrgRobot, 'POST', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, None, 401),
- (RegenerateOrgRobot, 'POST', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'devtable', 400),
- (RegenerateOrgRobot, 'POST', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'freshuser', 403),
- (RegenerateOrgRobot, 'POST', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'reader', 403),
-
- (UserRobotPermissions, 'GET', {'robot_shortname': 'robotname'}, None, None, 401),
- (UserRobotPermissions, 'GET', {'robot_shortname': 'robotname'}, None, 'devtable', 400),
- (UserRobotPermissions, 'GET', {'robot_shortname': 'robotname'}, None, 'freshuser', 400),
- (UserRobotPermissions, 'GET', {'robot_shortname': 'robotname'}, None, 'reader', 400),
-
- (OrgRobotPermissions, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, None, 401),
- (OrgRobotPermissions, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'devtable', 400),
- (OrgRobotPermissions, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'freshuser', 403),
- (OrgRobotPermissions, 'GET', {'orgname': 'buynlarge', 'robot_shortname': 'robotname'}, None, 'reader', 403),
-
- (Organization, 'DELETE', {'orgname': 'buynlarge'}, {}, None, 401),
- (Organization, 'DELETE', {'orgname': 'buynlarge'}, {}, 'devtable', 204),
- (Organization, 'DELETE', {'orgname': 'buynlarge'}, {}, 'freshuser', 403),
- (Organization, 'DELETE', {'orgname': 'buynlarge'}, {}, 'reader', 403),
- (Organization, 'GET', {'orgname': 'buynlarge'}, None, None, 200),
- (Organization, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (Organization, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 200),
- (Organization, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 200),
- (Organization, 'PUT', {'orgname': 'buynlarge'}, {}, None, 401),
- (Organization, 'PUT', {'orgname': 'buynlarge'}, {}, 'devtable', 200),
- (Organization, 'PUT', {'orgname': 'buynlarge'}, {}, 'freshuser', 403),
- (Organization, 'PUT', {'orgname': 'buynlarge'}, {}, 'reader', 403),
-
- (Repository, 'DELETE', {'repository': 'public/publicrepo'}, None, None, 401),
- (Repository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'devtable', 403),
- (Repository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'freshuser', 403),
- (Repository, 'DELETE', {'repository': 'public/publicrepo'}, None, 'reader', 403),
- (Repository, 'GET', {'repository': 'public/publicrepo'}, None, None, 200),
- (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'devtable', 200),
- (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'freshuser', 200),
- (Repository, 'GET', {'repository': 'public/publicrepo'}, None, 'reader', 200),
- (Repository, 'PUT', {'repository': 'public/publicrepo'}, {u'description': 'WXNG'}, None, 401),
- (Repository, 'PUT', {'repository': 'public/publicrepo'}, {u'description': 'WXNG'}, 'devtable', 403),
- (Repository, 'PUT', {'repository': 'public/publicrepo'}, {u'description': 'WXNG'}, 'freshuser', 403),
- (Repository, 'PUT', {'repository': 'public/publicrepo'}, {u'description': 'WXNG'}, 'reader', 403),
- (Repository, 'DELETE', {'repository': 'devtable/shared'}, None, None, 401),
- (Repository, 'DELETE', {'repository': 'devtable/shared'}, None, 'devtable', 204),
- (Repository, 'DELETE', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (Repository, 'DELETE', {'repository': 'devtable/shared'}, None, 'reader', 403),
- (Repository, 'GET', {'repository': 'devtable/shared'}, None, None, 401),
- (Repository, 'GET', {'repository': 'devtable/shared'}, None, 'devtable', 200),
- (Repository, 'GET', {'repository': 'devtable/shared'}, None, 'freshuser', 403),
- (Repository, 'GET', {'repository': 'devtable/shared'}, None, 'reader', 200),
- (Repository, 'PUT', {'repository': 'devtable/shared'}, {u'description': 'WXNG'}, None, 401),
- (Repository, 'PUT', {'repository': 'devtable/shared'}, {u'description': 'WXNG'}, 'devtable', 200),
- (Repository, 'PUT', {'repository': 'devtable/shared'}, {u'description': 'WXNG'}, 'freshuser', 403),
- (Repository, 'PUT', {'repository': 'devtable/shared'}, {u'description': 'WXNG'}, 'reader', 403),
- (Repository, 'DELETE', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (Repository, 'DELETE', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 204),
- (Repository, 'DELETE', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (Repository, 'DELETE', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 403),
- (Repository, 'GET', {'repository': 'buynlarge/orgrepo'}, None, None, 401),
- (Repository, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'devtable', 200),
- (Repository, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'freshuser', 403),
- (Repository, 'GET', {'repository': 'buynlarge/orgrepo'}, None, 'reader', 200),
- (Repository, 'PUT', {'repository': 'buynlarge/orgrepo'}, {u'description': 'WXNG'}, None, 401),
- (Repository, 'PUT', {'repository': 'buynlarge/orgrepo'}, {u'description': 'WXNG'}, 'devtable', 200),
- (Repository, 'PUT', {'repository': 'buynlarge/orgrepo'}, {u'description': 'WXNG'}, 'freshuser', 403),
- (Repository, 'PUT', {'repository': 'buynlarge/orgrepo'}, {u'description': 'WXNG'}, 'reader', 403),
-
- (EntitySearch, 'GET', {'prefix': 'R9NZ'}, None, None, 200),
- (EntitySearch, 'GET', {'prefix': 'R9NZ'}, None, 'devtable', 200),
- (EntitySearch, 'GET', {'prefix': 'R9NZ'}, None, 'freshuser', 200),
- (EntitySearch, 'GET', {'prefix': 'R9NZ'}, None, 'reader', 200),
-
- (ApplicationInformation, 'GET', {'client_id': '3LGI'}, None, None, 404),
- (ApplicationInformation, 'GET', {'client_id': '3LGI'}, None, 'devtable', 404),
- (ApplicationInformation, 'GET', {'client_id': '3LGI'}, None, 'freshuser', 404),
- (ApplicationInformation, 'GET', {'client_id': '3LGI'}, None, 'reader', 404),
-
- (OrganizationApplications, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
- (OrganizationApplications, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (OrganizationApplications, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
- (OrganizationApplications, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
- (OrganizationApplications, 'POST', {'orgname': 'buynlarge'}, {u'name': 'foo'}, None, 401),
- (OrganizationApplications, 'POST', {'orgname': 'buynlarge'}, {u'name': 'foo'}, 'devtable', 200),
- (OrganizationApplications, 'POST', {'orgname': 'buynlarge'}, {u'name': 'foo'}, 'freshuser', 403),
- (OrganizationApplications, 'POST', {'orgname': 'buynlarge'}, {u'name': 'foo'}, 'reader', 403),
-
- (OrganizationApplicationResource, 'DELETE', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, None, 401),
- (OrganizationApplicationResource, 'DELETE', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'devtable', 204),
- (OrganizationApplicationResource, 'DELETE', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'freshuser', 403),
- (OrganizationApplicationResource, 'DELETE', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'reader', 403),
- (OrganizationApplicationResource, 'GET', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, None, 401),
- (OrganizationApplicationResource, 'GET', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'devtable', 200),
- (OrganizationApplicationResource, 'GET', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'freshuser', 403),
- (OrganizationApplicationResource, 'GET', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'reader', 403),
- (OrganizationApplicationResource, 'PUT', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, {u'redirect_uri': 'foo', u'name': 'foo', u'application_uri': 'foo'}, None, 401),
- (OrganizationApplicationResource, 'PUT', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, {u'redirect_uri': 'foo', u'name': 'foo', u'application_uri': 'foo'}, 'devtable', 200),
- (OrganizationApplicationResource, 'PUT', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, {u'redirect_uri': 'foo', u'name': 'foo', u'application_uri': 'foo'}, 'freshuser', 403),
- (OrganizationApplicationResource, 'PUT', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, {u'redirect_uri': 'foo', u'name': 'foo', u'application_uri': 'foo'}, 'reader', 403),
-
- (OrganizationApplicationResetClientSecret, 'POST', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, None, 401),
- (OrganizationApplicationResetClientSecret, 'POST', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'devtable', 200),
- (OrganizationApplicationResetClientSecret, 'POST', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'freshuser', 403),
- (OrganizationApplicationResetClientSecret, 'POST', {'orgname': 'buynlarge', 'client_id': 'deadbeef'}, None, 'reader', 403),
-
- (Users, 'GET', {'username': 'devtable'}, None, None, 200),
-
- (UserNotificationList, 'GET', None, None, None, 401),
- (UserNotificationList, 'GET', None, None, 'devtable', 200),
- (UserNotificationList, 'GET', None, None, 'freshuser', 200),
- (UserNotificationList, 'GET', None, None, 'reader', 200),
-
- (UserAuthorizationList, 'GET', None, None, None, 401),
- (UserAuthorizationList, 'GET', None, None, 'devtable', 200),
- (UserAuthorizationList, 'GET', None, None, 'freshuser', 200),
- (UserAuthorizationList, 'GET', None, None, 'reader', 200),
-
- (UserAuthorization, 'DELETE', {'access_token_uuid': 'fake'}, None, None, 401),
- (UserAuthorization, 'DELETE', {'access_token_uuid': 'fake'}, None, 'devtable', 404),
- (UserAuthorization, 'DELETE', {'access_token_uuid': 'fake'}, None, 'freshuser', 404),
- (UserAuthorization, 'DELETE', {'access_token_uuid': 'fake'}, None, 'reader', 404),
- (UserAuthorization, 'GET', {'access_token_uuid': 'fake'}, None, None, 401),
- (UserAuthorization, 'GET', {'access_token_uuid': 'fake'}, None, 'devtable', 404),
- (UserAuthorization, 'GET', {'access_token_uuid': 'fake'}, None, 'freshuser', 404),
- (UserAuthorization, 'GET', {'access_token_uuid': 'fake'}, None, 'reader', 404),
-
- (UserAggregateLogs, 'GET', None, None, None, 401),
- (UserAggregateLogs, 'GET', None, None, 'devtable', 200),
- (UserAggregateLogs, 'GET', None, None, 'freshuser', 200),
- (UserAggregateLogs, 'GET', None, None, 'reader', 200),
-
- (OrgAggregateLogs, 'GET', {'orgname': 'buynlarge'}, None, None, 401),
- (OrgAggregateLogs, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (OrgAggregateLogs, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
- (OrgAggregateLogs, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
-
- (RepositoryAggregateLogs, 'GET', {'repository': 'devtable/simple'}, None, None, 401),
- (RepositoryAggregateLogs, 'GET', {'repository': 'devtable/simple'}, None, 'devtable', 200),
- (RepositoryAggregateLogs, 'GET', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
- (RepositoryAggregateLogs, 'GET', {'repository': 'devtable/simple'}, None, 'reader', 403),
-
- (ExportUserLogs, 'POST', None, EXPORTLOGS_PARAMS, None, 401),
- (ExportUserLogs, 'POST', None, EXPORTLOGS_PARAMS, 'devtable', 200),
- (ExportUserLogs, 'POST', None, EXPORTLOGS_PARAMS, 'freshuser', 200),
- (ExportUserLogs, 'POST', None, EXPORTLOGS_PARAMS, 'reader', 200),
-
- (ExportOrgLogs, 'POST', {'orgname': 'buynlarge'}, EXPORTLOGS_PARAMS, None, 401),
- (ExportOrgLogs, 'POST', {'orgname': 'buynlarge'}, EXPORTLOGS_PARAMS, 'devtable', 200),
- (ExportOrgLogs, 'POST', {'orgname': 'buynlarge'}, EXPORTLOGS_PARAMS, 'freshuser', 403),
- (ExportOrgLogs, 'POST', {'orgname': 'buynlarge'}, EXPORTLOGS_PARAMS, 'reader', 403),
-
- (ExportRepositoryLogs, 'POST', {'repository': 'devtable/simple'}, EXPORTLOGS_PARAMS, None, 401),
- (ExportRepositoryLogs, 'POST', {'repository': 'devtable/simple'}, EXPORTLOGS_PARAMS, 'devtable', 200),
- (ExportRepositoryLogs, 'POST', {'repository': 'devtable/simple'}, EXPORTLOGS_PARAMS, 'freshuser', 403),
- (ExportRepositoryLogs, 'POST', {'repository': 'devtable/simple'}, EXPORTLOGS_PARAMS, 'reader', 403),
-
- (SuperUserAggregateLogs, 'GET', None, None, None, 401),
- (SuperUserAggregateLogs, 'GET', None, None, 'devtable', 200),
- (SuperUserAggregateLogs, 'GET', None, None, 'freshuser', 403),
- (SuperUserAggregateLogs, 'GET', None, None, 'reader', 403),
-
- (SuperUserLogs, 'GET', None, None, None, 401),
- (SuperUserLogs, 'GET', None, None, 'devtable', 200),
- (SuperUserLogs, 'GET', None, None, 'freshuser', 403),
- (SuperUserLogs, 'GET', None, None, 'reader', 403),
-
- (SuperUserSendRecoveryEmail, 'POST', {'username': 'someuser'}, None, None, 401),
- (SuperUserSendRecoveryEmail, 'POST', {'username': 'someuser'}, None, 'devtable', 404),
- (SuperUserSendRecoveryEmail, 'POST', {'username': 'someuser'}, None, 'freshuser', 403),
- (SuperUserSendRecoveryEmail, 'POST', {'username': 'someuser'}, None, 'reader', 403),
-
- (SuperUserTakeOwnership, 'POST', {'namespace': 'invalidnamespace'}, {}, None, 401),
- (SuperUserTakeOwnership, 'POST', {'namespace': 'invalidnamespace'}, {}, 'devtable', 404),
- (SuperUserTakeOwnership, 'POST', {'namespace': 'invalidnamespace'}, {}, 'freshuser', 403),
- (SuperUserTakeOwnership, 'POST', {'namespace': 'invalidnamespace'}, {}, 'reader', 403),
-
- (SuperUserServiceKeyApproval, 'POST', {'kid': 1234}, {}, None, 401),
- (SuperUserServiceKeyApproval, 'POST', {'kid': 1234}, {}, 'devtable', 404),
- (SuperUserServiceKeyApproval, 'POST', {'kid': 1234}, {}, 'freshuser', 403),
- (SuperUserServiceKeyApproval, 'POST', {'kid': 1234}, {}, 'reader', 403),
-
- (SuperUserServiceKeyManagement, 'GET', None, None, None, 401),
- (SuperUserServiceKeyManagement, 'GET', None, None, 'devtable', 200),
- (SuperUserServiceKeyManagement, 'GET', None, None, 'freshuser', 403),
- (SuperUserServiceKeyManagement, 'GET', None, None, 'reader', 403),
- (SuperUserServiceKeyManagement, 'POST', None, {'expiration': None, 'service': 'someservice'}, None, 401),
- (SuperUserServiceKeyManagement, 'POST', None, {'expiration': None, 'service': 'someservice'}, 'devtable', 200),
- (SuperUserServiceKeyManagement, 'POST', None, {'expiration': None, 'service': 'someservice'}, 'freshuser', 403),
- (SuperUserServiceKeyManagement, 'POST', None, {'expiration': None, 'service': 'someservice'}, 'reader', 403),
-
- (SuperUserServiceKey, 'DELETE', {'kid': 1234}, None, None, 401),
- (SuperUserServiceKey, 'DELETE', {'kid': 1234}, None, 'devtable', 404),
- (SuperUserServiceKey, 'DELETE', {'kid': 1234}, None, 'freshuser', 403),
- (SuperUserServiceKey, 'DELETE', {'kid': 1234}, None, 'reader', 403),
- (SuperUserServiceKey, 'GET', {'kid': 1234}, None, None, 401),
- (SuperUserServiceKey, 'GET', {'kid': 1234}, None, 'devtable', 404),
- (SuperUserServiceKey, 'GET', {'kid': 1234}, None, 'freshuser', 403),
- (SuperUserServiceKey, 'GET', {'kid': 1234}, None, 'reader', 403),
- (SuperUserServiceKey, 'PUT', {'kid': 1234}, {}, None, 401),
- (SuperUserServiceKey, 'PUT', {'kid': 1234}, {}, 'devtable', 404),
- (SuperUserServiceKey, 'PUT', {'kid': 1234}, {}, 'freshuser', 403),
- (SuperUserServiceKey, 'PUT', {'kid': 1234}, {}, 'reader', 403),
-
- (TeamMemberInvite, 'DELETE', {'code': 'foobarbaz'}, None, None, 401),
- (TeamMemberInvite, 'DELETE', {'code': 'foobarbaz'}, None, 'devtable', 400),
- (TeamMemberInvite, 'DELETE', {'code': 'foobarbaz'}, None, 'freshuser', 400),
- (TeamMemberInvite, 'DELETE', {'code': 'foobarbaz'}, None, 'reader', 400),
- (TeamMemberInvite, 'PUT', {'code': 'foobarbaz'}, None, None, 401),
- (TeamMemberInvite, 'PUT', {'code': 'foobarbaz'}, None, 'devtable', 400),
- (TeamMemberInvite, 'PUT', {'code': 'foobarbaz'}, None, 'freshuser', 400),
- (TeamMemberInvite, 'PUT', {'code': 'foobarbaz'}, None, 'reader', 400),
-
- (ConductSearch, 'GET', None, None, None, 200),
- (ConductSearch, 'GET', None, None, 'devtable', 200),
-
- (ChangeLog, 'GET', None, None, None, 401),
- (ChangeLog, 'GET', None, None, 'devtable', 200),
- (ChangeLog, 'GET', None, None, 'freshuser', 403),
- (ChangeLog, 'GET', None, None, 'reader', 403),
-
- (SuperUserOrganizationList, 'GET', None, None, None, 401),
- (SuperUserOrganizationList, 'GET', None, None, 'devtable', 200),
- (SuperUserOrganizationList, 'GET', None, None, 'freshuser', 403),
- (SuperUserOrganizationList, 'GET', None, None, 'reader', 403),
-
- (SuperUserOrganizationManagement, 'DELETE', {'name': 'buynlarge'}, None, None, 401),
- (SuperUserOrganizationManagement, 'DELETE', {'name': 'buynlarge'}, None, 'devtable', 204),
- (SuperUserOrganizationManagement, 'DELETE', {'name': 'buynlarge'}, None, 'freshuser', 403),
- (SuperUserOrganizationManagement, 'DELETE', {'name': 'buynlarge'}, None, 'reader', 403),
- (SuperUserOrganizationManagement, 'PUT', {'name': 'buynlarge'}, {}, None, 401),
- (SuperUserOrganizationManagement, 'PUT', {'name': 'buynlarge'}, {}, 'devtable', 200),
- (SuperUserOrganizationManagement, 'PUT', {'name': 'buynlarge'}, {}, 'freshuser', 403),
- (SuperUserOrganizationManagement, 'PUT', {'name': 'buynlarge'}, {}, 'reader', 403),
-
- (SuperUserList, 'GET', None, None, None, 401),
- (SuperUserList, 'GET', None, None, 'devtable', 200),
- (SuperUserList, 'GET', None, None, 'freshuser', 403),
- (SuperUserList, 'GET', None, None, 'reader', 403),
-
- (SuperUserList, 'POST', None, {'username': 'foo'}, None, 401),
- (SuperUserList, 'POST', None, {'username': 'foo'}, 'devtable', 400),
- (SuperUserList, 'POST', None, {'username': 'foo'}, 'freshuser', 403),
- (SuperUserList, 'POST', None, {'username': 'foo'}, 'reader', 403),
-
- (SuperUserManagement, 'DELETE', {'username': 'freshuser'}, None, None, 401),
- (SuperUserManagement, 'DELETE', {'username': 'freshuser'}, None, 'devtable', 204),
- (SuperUserManagement, 'DELETE', {'username': 'freshuser'}, None, 'freshuser', 403),
- (SuperUserManagement, 'DELETE', {'username': 'freshuser'}, None, 'reader', 403),
- (SuperUserManagement, 'GET', {'username': 'freshuser'}, None, None, 401),
- (SuperUserManagement, 'GET', {'username': 'freshuser'}, None, 'devtable', 200),
- (SuperUserManagement, 'GET', {'username': 'freshuser'}, None, 'freshuser', 403),
- (SuperUserManagement, 'GET', {'username': 'freshuser'}, None, 'reader', 403),
- (SuperUserManagement, 'PUT', {'username': 'freshuser'}, {}, None, 401),
- (SuperUserManagement, 'PUT', {'username': 'freshuser'}, {}, 'devtable', 200),
- (SuperUserManagement, 'PUT', {'username': 'freshuser'}, {}, 'freshuser', 403),
- (SuperUserManagement, 'PUT', {'username': 'freshuser'}, {}, 'reader', 403),
-
- (GlobalUserMessages, 'GET', None, None, None, 200),
-
- (GlobalUserMessages, 'POST', None, None, None, 401),
- (GlobalUserMessages, 'POST', None, {'message': {'content': 'msg', 'media_type': 'text/plain', 'severity': 'info'}}, 'devtable', 201),
- (GlobalUserMessages, 'POST', None, {'message': {'content': 'msg', 'media_type': 'text/plain', 'severity': 'info'}}, 'freshuser', 403),
- (GlobalUserMessages, 'POST', None, {'message': {'content': 'msg', 'media_type': 'text/plain', 'severity': 'info'}}, 'reader', 403),
-
- (GlobalUserMessage, 'DELETE', {'uuid': '1234'}, None, None, 401),
- (GlobalUserMessage, 'DELETE', {'uuid': '1234'}, None, 'devtable', 204),
- (GlobalUserMessage, 'DELETE', {'uuid': '1234'}, None, 'freshuser', 403),
- (GlobalUserMessage, 'DELETE', {'uuid': '1234'}, None, 'reader', 403),
-
- (UserInvoiceFieldList, 'GET', None, None, None, 401),
- (UserInvoiceFieldList, 'GET', None, None, 'devtable', 200),
- (UserInvoiceFieldList, 'GET', None, None, 'freshuser', 404),
- (UserInvoiceFieldList, 'GET', None, None, 'reader', 404),
- (UserInvoiceFieldList, 'POST', None, None, None, 401),
- (UserInvoiceFieldList, 'POST', None, {'value': 'bar', 'title': 'foo'}, 'devtable', 200),
- (UserInvoiceFieldList, 'POST', None, {'value': 'bar', 'title': 'foo'}, 'freshuser', 404),
- (UserInvoiceFieldList, 'POST', None, {'value': 'bar', 'title': 'foo'}, 'reader', 404),
-
- (UserInvoiceField, 'DELETE', {'field_uuid': '1234'}, None, None, 401),
- (UserInvoiceField, 'DELETE', {'field_uuid': '1234'}, None, 'devtable', 201),
- (UserInvoiceField, 'DELETE', {'field_uuid': '1234'}, None, 'freshuser', 404),
- (UserInvoiceField, 'DELETE', {'field_uuid': '1234'}, None, 'reader', 404),
-
- (OrganizationInvoiceFieldList, 'GET', {'orgname': 'buynlarge'}, None, None, 403),
- (OrganizationInvoiceFieldList, 'GET', {'orgname': 'buynlarge'}, None, 'devtable', 200),
- (OrganizationInvoiceFieldList, 'GET', {'orgname': 'buynlarge'}, None, 'freshuser', 403),
- (OrganizationInvoiceFieldList, 'GET', {'orgname': 'buynlarge'}, None, 'reader', 403),
- (OrganizationInvoiceFieldList, 'POST', {'orgname': 'buynlarge'}, {'value': 'bar', 'title': 'foo'}, None, 403),
- (OrganizationInvoiceFieldList, 'POST', {'orgname': 'buynlarge'}, {'value': 'bar', 'title': 'foo'}, 'devtable', 200),
- (OrganizationInvoiceFieldList, 'POST', {'orgname': 'buynlarge'}, {'value': 'bar', 'title': 'foo'}, 'freshuser', 403),
- (OrganizationInvoiceFieldList, 'POST', {'orgname': 'buynlarge'}, {'value': 'bar', 'title': 'foo'}, 'reader', 403),
-
- (OrganizationInvoiceField, 'DELETE', {'orgname': 'buynlarge', 'field_uuid': '1234'}, None, None, 403),
- (OrganizationInvoiceField, 'DELETE', {'orgname': 'buynlarge', 'field_uuid': '1234'}, None, 'devtable', 201),
- (OrganizationInvoiceField, 'DELETE', {'orgname': 'buynlarge', 'field_uuid': '1234'}, None, 'freshuser', 403),
- (OrganizationInvoiceField, 'DELETE', {'orgname': 'buynlarge', 'field_uuid': '1234'}, None, 'reader', 403),
-
- (RepositoryImageSecurity, 'GET', {'repository': 'devtable/simple', 'imageid': 'fake'}, None, None, 401),
- (RepositoryImageSecurity, 'GET', {'repository': 'devtable/simple', 'imageid': 'fake'}, None, 'devtable', 404),
- (RepositoryImageSecurity, 'GET', {'repository': 'devtable/simple', 'imageid': 'fake'}, None, 'freshuser', 403),
- (RepositoryImageSecurity, 'GET', {'repository': 'devtable/simple', 'imageid': 'fake'}, None, 'reader', 403),
-
- (RepositoryManifestSecurity, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, None, 401),
- (RepositoryManifestSecurity, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'devtable', 404),
- (RepositoryManifestSecurity, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'freshuser', 403),
- (RepositoryManifestSecurity, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'reader', 403),
-
- (RepositoryManifestLabels, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, None, 401),
- (RepositoryManifestLabels, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'devtable', 404),
- (RepositoryManifestLabels, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'freshuser', 403),
- (RepositoryManifestLabels, 'GET', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'reader', 403),
- (RepositoryManifestLabels, 'POST', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, {'media_type': 'text/plain', 'value': 'bar', 'key': 'foo'}, None, 401),
- (RepositoryManifestLabels, 'POST', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, {'media_type': 'text/plain', 'value': 'bar', 'key': 'foo'}, 'devtable', 404),
- (RepositoryManifestLabels, 'POST', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, {'media_type': 'text/plain', 'value': 'bar', 'key': 'foo'}, 'freshuser', 403),
- (RepositoryManifestLabels, 'POST', {'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, {'media_type': 'text/plain', 'value': 'bar', 'key': 'foo'}, 'reader', 403),
-
- (ManageRepositoryManifestLabel, 'GET', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, None, 401),
- (ManageRepositoryManifestLabel, 'GET', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'devtable', 404),
- (ManageRepositoryManifestLabel, 'GET', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'freshuser', 403),
- (ManageRepositoryManifestLabel, 'GET', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'reader', 403),
-
- (ManageRepositoryManifestLabel, 'DELETE', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, None, 401),
- (ManageRepositoryManifestLabel, 'DELETE', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'devtable', 404),
- (ManageRepositoryManifestLabel, 'DELETE', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'freshuser', 403),
- (ManageRepositoryManifestLabel, 'DELETE', {'labelid': 'someid', 'manifestref': 'sha256:abcd', 'repository': 'devtable/simple'}, None, 'reader', 403),
-
- (InviteTeamMember, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, None, 401),
- (InviteTeamMember, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'devtable', 200),
- (InviteTeamMember, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'freshuser', 403),
- (InviteTeamMember, 'PUT', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'reader', 403),
-
- (InviteTeamMember, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, None, 401),
- (InviteTeamMember, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'devtable', 404),
- (InviteTeamMember, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'freshuser', 403),
- (InviteTeamMember, 'DELETE', {'orgname': 'buynlarge', 'teamname': 'owners', 'email': 'a@example.com'}, None, 'reader', 403),
-
- (TestRepositoryNotification, 'POST', {'repository': 'buynlarge/orgrepo', 'uuid': 'foo'}, None, None, 401),
- (TestRepositoryNotification, 'POST', {'repository': 'buynlarge/orgrepo', 'uuid': 'foo'}, None, 'devtable', 400),
- (TestRepositoryNotification, 'POST', {'repository': 'buynlarge/orgrepo', 'uuid': 'foo'}, None, 'freshuser', 403),
- (TestRepositoryNotification, 'POST', {'repository': 'buynlarge/orgrepo', 'uuid': 'foo'}, None, 'reader', 403),
-
- (LinkExternalEntity, 'POST', {'username': 'foo'}, None, None, 404),
-
- (BuildTriggerSourceNamespaces, 'GET', {'repository': 'devtable/simple', 'trigger_uuid': 'foo'}, None, None, 401),
- (BuildTriggerSourceNamespaces, 'GET', {'repository': 'devtable/simple', 'trigger_uuid': 'foo'}, None, 'devtable', 404),
- (BuildTriggerSourceNamespaces, 'GET', {'repository': 'devtable/simple', 'trigger_uuid': 'foo'}, None, 'freshuser', 403),
- (BuildTriggerSourceNamespaces, 'GET', {'repository': 'devtable/simple', 'trigger_uuid': 'foo'}, None, 'reader', 403),
-
- (RepoMirrorResource, 'GET', {'repository': 'devtable/simple'}, None, None, 401),
- (RepoMirrorResource, 'GET', {'repository': 'devtable/simple'}, None, 'devtable', 404),
- (RepoMirrorResource, 'GET', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
- (RepoMirrorResource, 'GET', {'repository': 'devtable/simple'}, None, 'reader', 403),
-
- (RepoMirrorResource, 'POST', {'repository': 'devtable/simple'}, None, None, 401),
- (RepoMirrorResource, 'POST', {'repository': 'devtable/simple'}, None, 'devtable', 400),
- (RepoMirrorResource, 'POST', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
- (RepoMirrorResource, 'POST', {'repository': 'devtable/simple'}, None, 'reader', 403),
-
- (RepoMirrorResource, 'PUT', {'repository': 'devtable/simple'}, None, None, 401),
- (RepoMirrorResource, 'PUT', {'repository': 'devtable/simple'}, None, 'devtable', 400),
- (RepoMirrorResource, 'PUT', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
- (RepoMirrorResource, 'PUT', {'repository': 'devtable/simple'}, None, 'reader', 403),
-
- (RepoMirrorSyncNowResource, 'POST', {'repository': 'devtable/simple'}, None, None, 401),
- (RepoMirrorSyncNowResource, 'POST', {'repository': 'devtable/simple'}, None, 'devtable', 404),
- (RepoMirrorSyncNowResource, 'POST', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
- (RepoMirrorSyncNowResource, 'POST', {'repository': 'devtable/simple'}, None, 'reader', 403),
-
- (RepoMirrorSyncCancelResource, 'POST', {'repository': 'devtable/simple'}, None, None, 401),
- (RepoMirrorSyncCancelResource, 'POST', {'repository': 'devtable/simple'}, None, 'devtable', 404),
- (RepoMirrorSyncCancelResource, 'POST', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
- (RepoMirrorSyncCancelResource, 'POST', {'repository': 'devtable/simple'}, None, 'reader', 403),
-
- (RepositoryStateResource, 'PUT', {'repository': 'devtable/simple'}, None, None, 401),
- (RepositoryStateResource, 'PUT', {'repository': 'devtable/simple'}, None, 'devtable', 400),
- (RepositoryStateResource, 'PUT', {'repository': 'devtable/simple'}, None, 'freshuser', 403),
- (RepositoryStateResource, 'PUT', {'repository': 'devtable/simple'}, None, 'reader', 403),
-]
-
-@pytest.mark.parametrize('resource,method,params,body,identity,expected', SECURITY_TESTS)
-def test_api_security(resource, method, params, body, identity, expected, client):
- with client_with_identity(identity, client) as cl:
- conduct_api_call(cl, resource, method, params, body, expected)
-
-
-ALLOWED_MISSING_MODULES = {'endpoints.api.suconfig', 'endpoints.api.error', 'data.userfiles'}
-
-def test_all_apis_tested(app):
- required_tests = set()
-
- for rule in app.url_map.iter_rules():
- endpoint_method = app.view_functions[rule.endpoint]
-
- # Verify that we have a view class for this API method.
- if not 'view_class' in dir(endpoint_method):
- continue
-
- view_class = endpoint_method.view_class
- if view_class.__module__ in ALLOWED_MISSING_MODULES:
- continue
-
- method_names = list(rule.methods.difference(['HEAD', 'OPTIONS']))
- full_name = '%s.%s' % (view_class.__module__, view_class.__name__)
- for method_name in method_names:
- required_tests.add('%s::%s' % (full_name, method_name.upper()))
-
- assert required_tests
-
- for test in SECURITY_TESTS:
- view_class = test[0]
- required_tests.discard('%s.%s::%s' % (view_class.__module__, view_class.__name__,
- test[1].upper()))
-
- assert not required_tests, "API security tests missing for: %s" % required_tests
-
-
-@pytest.mark.parametrize('is_superuser', [
- (True),
- (False),
-])
-@pytest.mark.parametrize('allow_nonsuperuser', [
- (True),
- (False),
-])
-@pytest.mark.parametrize('method, expected', [
- ('POST', 400),
- ('DELETE', 200),
-])
-def test_team_sync_security(is_superuser, allow_nonsuperuser, method, expected, client):
- def is_superuser_method(_):
- return is_superuser
-
- with patch('auth.permissions.superusers.is_superuser', is_superuser_method):
- with toggle_feature('NONSUPERUSER_TEAM_SYNCING_SETUP', allow_nonsuperuser):
- with client_with_identity('devtable', client) as cl:
- expect_success = is_superuser or allow_nonsuperuser
- expected_status = expected if expect_success else 403
- conduct_api_call(cl, OrganizationTeamSyncing, method, TEAM_PARAMS, {}, expected_status)
diff --git a/endpoints/api/test/test_signing.py b/endpoints/api/test/test_signing.py
deleted file mode 100644
index e941cee56..000000000
--- a/endpoints/api/test/test_signing.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import pytest
-
-from collections import Counter
-from mock import patch
-
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.api.signing import RepositorySignatures
-from endpoints.test.shared import client_with_identity
-
-from test.fixtures import *
-
-VALID_TARGETS_MAP = {
- "targets/ci": {
- "targets": {
- "latest": {
- "hashes": {
- "sha256": "2Q8GLEgX62VBWeL76axFuDj/Z1dd6Zhx0ZDM6kNwPkQ="
- },
- "length": 2111
- }
- },
- "expiration": "2020-05-22T10:26:46.618176424-04:00"
- },
- "targets": {
- "targets": {
- "latest": {
- "hashes": {
- "sha256": "2Q8GLEgX62VBWeL76axFuDj/Z1dd6Zhx0ZDM6kNwPkQ="
- },
- "length": 2111
- }
- },
- "expiration": "2020-05-22T10:26:01.953414888-04:00"}
- }
-
-
-def tags_equal(expected, actual):
- expected_tags = expected.get('delegations')
- actual_tags = actual.get('delegations')
- if expected_tags and actual_tags:
- return Counter(expected_tags) == Counter(actual_tags)
- return expected == actual
-
-@pytest.mark.parametrize('targets_map,expected', [
- (VALID_TARGETS_MAP, {'delegations': VALID_TARGETS_MAP}),
- ({'bad': 'tags'}, {'delegations': {'bad': 'tags'}}),
- ({}, {'delegations': {}}),
- (None, {'delegations': None}), # API returns None on exceptions
-])
-def test_get_signatures(targets_map, expected, client):
- with patch('endpoints.api.signing.tuf_metadata_api') as mock_tuf:
- mock_tuf.get_all_tags_with_expiration.return_value = targets_map
- with client_with_identity('devtable', client) as cl:
- params = {'repository': 'devtable/trusted'}
- assert tags_equal(expected, conduct_api_call(cl, RepositorySignatures, 'GET', params, None, 200).json)
diff --git a/endpoints/api/test/test_subscribe_models_pre_oci.py b/endpoints/api/test/test_subscribe_models_pre_oci.py
deleted file mode 100644
index 8810e36f5..000000000
--- a/endpoints/api/test/test_subscribe_models_pre_oci.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import pytest
-from mock import patch
-
-from endpoints.api.subscribe_models_pre_oci import data_model
-
-
-@pytest.mark.parametrize('username,repo_count', [
- ('devtable', 3)
-])
-def test_get_private_repo_count(username, repo_count):
- with patch('endpoints.api.subscribe_models_pre_oci.get_private_repo_count') as mock_get_private_reop_count:
- mock_get_private_reop_count.return_value = repo_count
- count = data_model.get_private_repo_count(username)
-
- mock_get_private_reop_count.assert_called_once_with(username)
- assert count == repo_count
-
-
-@pytest.mark.parametrize('kind_name,target_username,metadata', [
- ('over_private_usage', 'devtable', {'namespace': 'devtable'})
-])
-def test_create_unique_notification(kind_name, target_username, metadata):
- with patch('endpoints.api.subscribe_models_pre_oci.get_user_or_org') as mock_get_user_or_org:
- mock_get_user_or_org.return_value = {'username': target_username}
- with patch('endpoints.api.subscribe_models_pre_oci.create_unique_notification') as mock_create_unique_notification:
- data_model.create_unique_notification(kind_name, target_username, metadata)
-
- mock_get_user_or_org.assert_called_once_with(target_username)
- mock_create_unique_notification.assert_called_once_with(kind_name, mock_get_user_or_org.return_value, metadata)
-
-
-@pytest.mark.parametrize('target_username,kind_name', [
- ('devtable', 'over_private_usage')
-])
-def test_delete_notifications_by_kind(target_username, kind_name):
- with patch('endpoints.api.subscribe_models_pre_oci.get_user_or_org') as mock_get_user_or_org:
- mock_get_user_or_org.return_value = {'username': target_username}
- with patch('endpoints.api.subscribe_models_pre_oci.delete_notifications_by_kind') as mock_delete_notifications_by_kind:
- data_model.delete_notifications_by_kind(target_username, kind_name)
-
- mock_get_user_or_org.assert_called_once_with(target_username)
- mock_delete_notifications_by_kind.assert_called_once_with(mock_get_user_or_org.return_value, kind_name)
-
diff --git a/endpoints/api/test/test_superuser.py b/endpoints/api/test/test_superuser.py
deleted file mode 100644
index 46e4bacf3..000000000
--- a/endpoints/api/test/test_superuser.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import pytest
-
-from endpoints.api.superuser import SuperUserList, SuperUserManagement
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.test.shared import client_with_identity
-from test.fixtures import *
-
-@pytest.mark.parametrize('disabled', [
- (True),
- (False),
-])
-def test_list_all_users(disabled, client):
- with client_with_identity('devtable', client) as cl:
- params = {'disabled': disabled}
- result = conduct_api_call(cl, SuperUserList, 'GET', params, None, 200).json
- assert len(result['users'])
- for user in result['users']:
- if not disabled:
- assert user['enabled']
-
-
-def test_change_install_user(client):
- with client_with_identity('devtable', client) as cl:
- params = {'username': 'randomuser'}
- body = {'email': 'new_email123@test.com'}
- result = conduct_api_call(cl, SuperUserManagement, 'PUT', params, body, 200).json
-
- assert result['email'] == body['email']
diff --git a/endpoints/api/test/test_tag.py b/endpoints/api/test/test_tag.py
deleted file mode 100644
index 54f6df599..000000000
--- a/endpoints/api/test/test_tag.py
+++ /dev/null
@@ -1,116 +0,0 @@
-import pytest
-
-from playhouse.test_utils import assert_query_count
-
-from data.registry_model import registry_model
-from data.database import Manifest
-
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.test.shared import client_with_identity
-from endpoints.api.tag import RepositoryTag, RestoreTag, ListRepositoryTags, RepositoryTagImages
-
-from test.fixtures import *
-
-@pytest.mark.parametrize('expiration_time, expected_status', [
- (None, 201),
- ('aksdjhasd', 400),
-])
-def test_change_tag_expiration_default(expiration_time, expected_status, client, app):
- with client_with_identity('devtable', client) as cl:
- params = {
- 'repository': 'devtable/simple',
- 'tag': 'latest',
- }
-
- request_body = {
- 'expiration': expiration_time,
- }
-
- conduct_api_call(cl, RepositoryTag, 'put', params, request_body, expected_status)
-
-
-def test_change_tag_expiration(client, app):
- with client_with_identity('devtable', client) as cl:
- params = {
- 'repository': 'devtable/simple',
- 'tag': 'latest',
- }
-
- tag = model.tag.get_active_tag('devtable', 'simple', 'latest')
- updated_expiration = tag.lifetime_start_ts + 60*60*24
-
- request_body = {
- 'expiration': updated_expiration,
- }
-
- conduct_api_call(cl, RepositoryTag, 'put', params, request_body, 201)
- tag = model.tag.get_active_tag('devtable', 'simple', 'latest')
- assert tag.lifetime_end_ts == updated_expiration
-
-
-@pytest.mark.parametrize('image_exists,test_tag,expected_status', [
- (True, '-INVALID-TAG-NAME', 400),
- (True, '.INVALID-TAG-NAME', 400),
- (True,
- 'INVALID-TAG_NAME-BECAUSE-THIS-IS-WAY-WAY-TOO-LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOONG',
- 400),
- (False, 'newtag', 404),
- (True, 'generatemanifestfail', None),
- (True, 'latest', 201),
- (True, 'newtag', 201),
-])
-def test_move_tag(image_exists, test_tag, expected_status, client, app):
- with client_with_identity('devtable', client) as cl:
- test_image = 'unknown'
- if image_exists:
- repo_ref = registry_model.lookup_repository('devtable', 'simple')
- tag_ref = registry_model.get_repo_tag(repo_ref, 'latest', include_legacy_image=True)
- assert tag_ref
-
- test_image = tag_ref.legacy_image.docker_image_id
-
- params = {'repository': 'devtable/simple', 'tag': test_tag}
- request_body = {'image': test_image}
- if expected_status is None:
- with pytest.raises(Exception):
- conduct_api_call(cl, RepositoryTag, 'put', params, request_body, expected_status)
- else:
- conduct_api_call(cl, RepositoryTag, 'put', params, request_body, expected_status)
-
-
-@pytest.mark.parametrize('repo_namespace, repo_name, query_count', [
- ('devtable', 'simple', 5),
- ('devtable', 'history', 5),
- ('devtable', 'complex', 5),
- ('devtable', 'gargantuan', 5),
- ('buynlarge', 'orgrepo', 7), # +2 for permissions checks.
- ('buynlarge', 'anotherorgrepo', 7), # +2 for permissions checks.
-])
-def test_list_repo_tags(repo_namespace, repo_name, client, query_count, app):
- # Pre-cache media type loads to ensure consistent query count.
- Manifest.media_type.get_name(1)
-
- params = {'repository': repo_namespace + '/' + repo_name}
- with client_with_identity('devtable', client) as cl:
- with assert_query_count(query_count):
- tags = conduct_api_call(cl, ListRepositoryTags, 'get', params).json['tags']
-
- repo_ref = registry_model.lookup_repository(repo_namespace, repo_name)
- history, _ = registry_model.list_repository_tag_history(repo_ref)
- assert len(tags) == len(history)
-
-
-@pytest.mark.parametrize('repository, tag, owned, expect_images', [
- ('devtable/simple', 'prod', False, True),
- ('devtable/simple', 'prod', True, False),
- ('devtable/simple', 'latest', False, True),
- ('devtable/simple', 'latest', True, False),
-
- ('devtable/complex', 'prod', False, True),
- ('devtable/complex', 'prod', True, True),
-])
-def test_list_tag_images(repository, tag, owned, expect_images, client, app):
- with client_with_identity('devtable', client) as cl:
- params = {'repository': repository, 'tag': tag, 'owned': owned}
- result = conduct_api_call(cl, RepositoryTagImages, 'get', params, None, 200).json
- assert bool(result['images']) == expect_images
diff --git a/endpoints/api/test/test_team.py b/endpoints/api/test/test_team.py
deleted file mode 100644
index 9a17a36e4..000000000
--- a/endpoints/api/test/test_team.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import json
-
-from mock import patch
-
-from data import model
-from endpoints.api import api
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.api.team import OrganizationTeamSyncing, TeamMemberList
-from endpoints.api.organization import Organization
-from endpoints.test.shared import client_with_identity
-
-from test.test_ldap import mock_ldap
-
-from test.fixtures import *
-
-SYNCED_TEAM_PARAMS = {'orgname': 'sellnsmall', 'teamname': 'synced'}
-UNSYNCED_TEAM_PARAMS = {'orgname': 'sellnsmall', 'teamname': 'owners'}
-
-def test_team_syncing(client):
- with mock_ldap() as ldap:
- with patch('endpoints.api.team.authentication', ldap):
- with client_with_identity('devtable', client) as cl:
- config = {
- 'group_dn': 'cn=AwesomeFolk',
- }
-
- conduct_api_call(cl, OrganizationTeamSyncing, 'POST', UNSYNCED_TEAM_PARAMS, config)
-
- # Ensure the team is now synced.
- sync_info = model.team.get_team_sync_information(UNSYNCED_TEAM_PARAMS['orgname'],
- UNSYNCED_TEAM_PARAMS['teamname'])
- assert sync_info is not None
- assert json.loads(sync_info.config) == config
-
- # Remove the syncing.
- conduct_api_call(cl, OrganizationTeamSyncing, 'DELETE', UNSYNCED_TEAM_PARAMS, None)
-
- # Ensure the team is no longer synced.
- sync_info = model.team.get_team_sync_information(UNSYNCED_TEAM_PARAMS['orgname'],
- UNSYNCED_TEAM_PARAMS['teamname'])
- assert sync_info is None
-
-
-def test_team_member_sync_info(client):
- with mock_ldap() as ldap:
- with patch('endpoints.api.team.authentication', ldap):
- # Check for an unsynced team, with superuser.
- with client_with_identity('devtable', client) as cl:
- resp = conduct_api_call(cl, TeamMemberList, 'GET', UNSYNCED_TEAM_PARAMS)
- assert 'can_sync' in resp.json
- assert resp.json['can_sync']['service'] == 'ldap'
-
- assert 'synced' not in resp.json
-
- # Check for an unsynced team, with non-superuser.
- with client_with_identity('randomuser', client) as cl:
- resp = conduct_api_call(cl, TeamMemberList, 'GET', UNSYNCED_TEAM_PARAMS)
- assert 'can_sync' not in resp.json
- assert 'synced' not in resp.json
-
- # Check for a synced team, with superuser.
- with client_with_identity('devtable', client) as cl:
- resp = conduct_api_call(cl, TeamMemberList, 'GET', SYNCED_TEAM_PARAMS)
- assert 'can_sync' in resp.json
- assert resp.json['can_sync']['service'] == 'ldap'
-
- assert 'synced' in resp.json
- assert 'last_updated' in resp.json['synced']
- assert 'group_dn' in resp.json['synced']['config']
-
- # Check for a synced team, with non-superuser.
- with client_with_identity('randomuser', client) as cl:
- resp = conduct_api_call(cl, TeamMemberList, 'GET', SYNCED_TEAM_PARAMS)
- assert 'can_sync' not in resp.json
-
- assert 'synced' in resp.json
- assert 'last_updated' not in resp.json['synced']
- assert 'config' not in resp.json['synced']
-
-
-def test_organization_teams_sync_bool(client):
- with mock_ldap() as ldap:
- with patch('endpoints.api.organization.authentication', ldap):
- # Ensure synced teams are marked as such in the organization teams list.
- with client_with_identity('devtable', client) as cl:
- resp = conduct_api_call(cl, Organization, 'GET', {'orgname': 'sellnsmall'})
-
- assert not resp.json['teams']['owners']['is_synced']
-
- assert resp.json['teams']['synced']['is_synced']
diff --git a/endpoints/api/test/test_trigger.py b/endpoints/api/test/test_trigger.py
deleted file mode 100644
index 946b34431..000000000
--- a/endpoints/api/test/test_trigger.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import pytest
-import json
-
-from data import model
-from endpoints.api.trigger_analyzer import is_parent
-from endpoints.api.trigger import BuildTrigger
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.test.shared import client_with_identity
-from test.fixtures import *
-
-
-@pytest.mark.parametrize('context,dockerfile_path,expected', [
- ("/", "/a/b", True),
- ("/a", "/a/b", True),
- ("/a/b", "/a/b", False),
- ("/a//", "/a/b", True),
- ("/a", "/a//b/c", True),
- ("/a//", "a/b", True),
- ("/a/b", "a/bc/d", False),
- ("/d", "/a/b", False),
- ("/a/b", "/a/b.c", False),
- ("/a/b", "/a/b/b.c", True),
- ("", "/a/b.c", False),
- ("/a/b", "", False),
- ("", "", False),
-])
-def test_super_user_build_endpoints(context, dockerfile_path, expected):
- assert is_parent(context, dockerfile_path) == expected
-
-
-def test_enabled_disabled_trigger(app, client):
- trigger = model.build.list_build_triggers('devtable', 'building')[0]
- trigger.config = json.dumps({'hook_id': 'someid'})
- trigger.save()
-
- params = {
- 'repository': 'devtable/building',
- 'trigger_uuid': trigger.uuid,
- }
-
- body = {
- 'enabled': False,
- }
-
- with client_with_identity('devtable', client) as cl:
- result = conduct_api_call(cl, BuildTrigger, 'PUT', params, body, 200).json
- assert not result['enabled']
-
- body = {
- 'enabled': True,
- }
-
- with client_with_identity('devtable', client) as cl:
- result = conduct_api_call(cl, BuildTrigger, 'PUT', params, body, 200).json
- assert result['enabled']
diff --git a/endpoints/api/test/test_trigger_analyzer.py b/endpoints/api/test/test_trigger_analyzer.py
deleted file mode 100644
index 881bad8a3..000000000
--- a/endpoints/api/test/test_trigger_analyzer.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import pytest
-from mock import Mock
-
-from auth import permissions
-from data import model
-from endpoints.api.trigger_analyzer import TriggerAnalyzer
-from util import dockerfileparse
-
-BAD_PATH = "\"server_hostname/\" is not a valid Quay repository path"
-
-EMPTY_CONF = {}
-
-GOOD_CONF = {'context': '/', 'dockerfile_path': '/file'}
-
-BAD_CONF = {'context': 'context', 'dockerfile_path': 'dockerfile_path'}
-
-ONE_ROBOT = {'can_read': False, 'is_robot': True, 'kind': 'user', 'name': 'name'}
-
-DOCKERFILE_NOT_CHILD = 'Dockerfile, context, is not a child of the context, dockerfile_path.'
-
-THE_DOCKERFILE_SPECIFIED = 'Could not parse the Dockerfile specified'
-
-DOCKERFILE_PATH_NOT_FOUND = 'Specified Dockerfile path for the trigger was not found on the main branch. This trigger may fail.'
-
-NO_FROM_LINE = 'No FROM line found in the Dockerfile'
-
-REPO_NOT_FOUND = 'Repository "server_hostname/path/file" referenced by the Dockerfile was not found'
-
-
-@pytest.fixture
-def get_monkeypatch(monkeypatch):
- return monkeypatch
-
-
-def patch_permissions(monkeypatch, can_read=False):
- def can_read_fn(base_namespace, base_repository):
- return can_read
-
- monkeypatch.setattr(permissions, 'ReadRepositoryPermission', can_read_fn)
-
-
-def patch_list_namespace_robots(monkeypatch):
- my_mock = Mock()
- my_mock.configure_mock(**{'username': 'name'})
- return_value = [my_mock]
-
- def return_list_mocks(namesapce):
- return return_value
-
- monkeypatch.setattr(model.user, 'list_namespace_robots', return_list_mocks)
- return return_value
-
-
-def patch_get_all_repo_users_transitive(monkeypatch):
- my_mock = Mock()
- my_mock.configure_mock(**{'username': 'name'})
- return_value = [my_mock]
-
- def return_get_mocks(namesapce, image_repostiory):
- return return_value
-
- monkeypatch.setattr(model.user, 'get_all_repo_users_transitive', return_get_mocks)
- return return_value
-
-
-def patch_parse_dockerfile(monkeypatch, get_base_image):
- if get_base_image is not None:
- def return_return_value(content):
- parse_mock = Mock()
- parse_mock.configure_mock(**{'get_base_image': get_base_image})
- return parse_mock
-
- monkeypatch.setattr(dockerfileparse, "parse_dockerfile", return_return_value)
- else:
- def return_return_value(content):
- return get_base_image
-
- monkeypatch.setattr(dockerfileparse, "parse_dockerfile", return_return_value)
-
-
-def patch_model_repository_get_repository(monkeypatch, get_repository):
- if get_repository is not None:
-
- def mock_get_repository(base_namespace, base_repository):
- vis_mock = Mock()
- vis_mock.name = get_repository
- get_repo_mock = Mock(visibility=vis_mock)
-
-
- return get_repo_mock
-
- else:
- def mock_get_repository(base_namespace, base_repository):
- return None
-
- monkeypatch.setattr(model.repository, "get_repository", mock_get_repository)
-
-
-def return_none():
- return None
-
-
-def return_content():
- return Mock()
-
-
-def return_server_hostname():
- return "server_hostname/"
-
-
-def return_non_server_hostname():
- return "slime"
-
-
-def return_path():
- return "server_hostname/path/file"
-
-
-@pytest.mark.parametrize(
- 'handler_fn, config_dict, admin_org_permission, status, message, get_base_image, robots, server_hostname, get_repository, can_read, namespace, name', [
- (return_none, EMPTY_CONF, False, "warning", DOCKERFILE_PATH_NOT_FOUND, None, [], None, None, False, "namespace", None),
- (return_none, EMPTY_CONF, True, "warning", DOCKERFILE_PATH_NOT_FOUND, None, [ONE_ROBOT], None, None, False, "namespace", None),
- (return_content, BAD_CONF, False, "error", THE_DOCKERFILE_SPECIFIED, None, [], None, None, False, "namespace", None),
- (return_none, EMPTY_CONF, False, "warning", DOCKERFILE_PATH_NOT_FOUND, return_none, [], None, None, False, "namespace", None),
- (return_none, EMPTY_CONF, True, "warning", DOCKERFILE_PATH_NOT_FOUND, return_none, [ONE_ROBOT], None, None, False, "namespace", None),
- (return_content, BAD_CONF, False, "error", DOCKERFILE_NOT_CHILD, return_none, [], None, None, False, "namespace", None),
- (return_content, GOOD_CONF, False, "warning", NO_FROM_LINE, return_none, [], None, None, False, "namespace", None),
- (return_content, GOOD_CONF, False, "publicbase", None, return_non_server_hostname, [], "server_hostname", None, False, "namespace", None),
- (return_content, GOOD_CONF, False, "warning", BAD_PATH, return_server_hostname, [], "server_hostname", None, False, "namespace", None),
- (return_content, GOOD_CONF, False, "error", REPO_NOT_FOUND, return_path, [], "server_hostname", None, False, "namespace", None),
- (return_content, GOOD_CONF, False, "error", REPO_NOT_FOUND, return_path, [], "server_hostname", "nonpublic", False, "namespace", None),
- (return_content, GOOD_CONF, False, "requiresrobot", None, return_path, [], "server_hostname", "nonpublic", True, "path", "file"),
- (return_content, GOOD_CONF, False, "publicbase", None, return_path, [], "server_hostname", "public", True, "path", "file"),
-
- ])
-def test_trigger_analyzer(handler_fn, config_dict, admin_org_permission, status, message, get_base_image, robots,
- server_hostname, get_repository, can_read, namespace, name,
- get_monkeypatch):
- patch_list_namespace_robots(get_monkeypatch)
- patch_get_all_repo_users_transitive(get_monkeypatch)
- patch_parse_dockerfile(get_monkeypatch, get_base_image)
- patch_model_repository_get_repository(get_monkeypatch, get_repository)
- patch_permissions(get_monkeypatch, can_read)
- handler_mock = Mock()
- handler_mock.configure_mock(**{'load_dockerfile_contents': handler_fn})
- trigger_analyzer = TriggerAnalyzer(handler_mock, 'namespace', server_hostname, config_dict, admin_org_permission)
- assert trigger_analyzer.analyze_trigger() == {'namespace': namespace,
- 'name': name,
- 'robots': robots,
- 'status': status,
- 'message': message,
- 'is_admin': admin_org_permission}
diff --git a/endpoints/api/test/test_user.py b/endpoints/api/test/test_user.py
deleted file mode 100644
index bf31b0b6d..000000000
--- a/endpoints/api/test/test_user.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import pytest
-
-from mock import patch
-
-from endpoints.api.test.shared import conduct_api_call
-from endpoints.api.user import User
-from endpoints.test.shared import client_with_identity
-from features import FeatureNameValue
-
-from test.fixtures import *
-
-
-def test_user_metadata_update(client):
- with patch('features.USER_METADATA', FeatureNameValue('USER_METADATA', True)):
- with client_with_identity('devtable', client) as cl:
- metadata = {
- 'given_name': 'Quay',
- 'family_name': 'User',
- 'location': 'NYC',
- 'company': 'Red Hat',
- }
-
- # Update all user metadata fields.
- conduct_api_call(cl, User, 'PUT', None, body=metadata)
-
- # Test that they were successfully updated.
- user = conduct_api_call(cl, User, 'GET', None).json
- for field in metadata:
- assert user.get(field) == metadata.get(field)
-
- # Now nullify one of the fields, and remove another.
- metadata['company'] = None
- location = metadata.pop('location')
-
- conduct_api_call(cl, User, 'PUT', None, body=metadata)
-
- user = conduct_api_call(cl, User, 'GET', None).json
- for field in metadata:
- assert user.get(field) == metadata.get(field)
-
- # The location field should be unchanged.
- assert user.get('location') == location
diff --git a/endpoints/api/trigger.py b/endpoints/api/trigger.py
index fb9f72a48..c03e9fbd9 100644
--- a/endpoints/api/trigger.py
+++ b/endpoints/api/trigger.py
@@ -1,52 +1,45 @@
""" Create, list and manage build triggers. """
+import json
import logging
+
+from urllib import quote
from urlparse import urlunparse
from flask import request, url_for
-from active_migration import ActiveDataMigration, ERTMigrationFlags
from app import app
-from auth.permissions import (UserAdminPermission, AdministerOrganizationPermission,
- AdministerRepositoryPermission)
from buildtrigger.basehandler import BuildTriggerHandler
-from buildtrigger.triggerutil import TriggerException, EmptyRepositoryException
-from data import model
-from data.fields import DecryptedValue
-from data.model.build import update_build_trigger
+from buildtrigger.triggerutil import (TriggerDeactivationException,
+ TriggerActivationException, EmptyRepositoryException,
+ RepositoryReadException, TriggerStartException)
from endpoints.api import (RepositoryParamResource, nickname, resource, require_repo_admin,
log_action, request_error, query_param, parse_args, internal_only,
- validate_json_request, api, path_param, abort,
- disallow_for_app_repositories, disallow_for_non_normal_repositories)
-from endpoints.api.build import build_status_view, trigger_view, RepositoryBuildStatus
-from endpoints.api.trigger_analyzer import TriggerAnalyzer
-from endpoints.building import (start_build, MaximumBuildsQueuedException,
- BuildTriggerDisabledException)
+ validate_json_request, api, path_param, abort)
from endpoints.exception import NotFound, Unauthorized, InvalidRequest
+from endpoints.api.build import build_status_view, trigger_view, RepositoryBuildStatus
+from endpoints.building import start_build, MaximumBuildsQueuedException
+from data import model
+from auth.permissions import (UserAdminPermission, AdministerOrganizationPermission,
+ ReadRepositoryPermission, AdministerRepositoryPermission)
from util.names import parse_robot_username
+from util.dockerfileparse import parse_dockerfile
+
logger = logging.getLogger(__name__)
def _prepare_webhook_url(scheme, username, password, hostname, path):
- auth_hostname = '%s:%s@%s' % (username, password, hostname)
+ auth_hostname = '%s:%s@%s' % (quote(username), quote(password), hostname)
return urlunparse((scheme, auth_hostname, path, '', '', ''))
-def get_trigger(trigger_uuid):
- try:
- trigger = model.build.get_build_trigger(trigger_uuid)
- except model.InvalidBuildTriggerException:
- raise NotFound()
- return trigger
-
@resource('/v1/repository//trigger/')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
class BuildTriggerList(RepositoryParamResource):
""" Resource for listing repository build triggers. """
@require_repo_admin
- @disallow_for_app_repositories
@nickname('listBuildTriggers')
def get(self, namespace_name, repo_name):
""" List the triggers for the specified repository. """
@@ -61,64 +54,32 @@ class BuildTriggerList(RepositoryParamResource):
@path_param('trigger_uuid', 'The UUID of the build trigger')
class BuildTrigger(RepositoryParamResource):
""" Resource for managing specific build triggers. """
- schemas = {
- 'UpdateTrigger': {
- 'type': 'object',
- 'description': 'Options for updating a build trigger',
- 'required': [
- 'enabled',
- ],
- 'properties': {
- 'enabled': {
- 'type': 'boolean',
- 'description': 'Whether the build trigger is enabled',
- },
- }
- },
- }
@require_repo_admin
- @disallow_for_app_repositories
@nickname('getBuildTrigger')
def get(self, namespace_name, repo_name, trigger_uuid):
""" Get information for the specified build trigger. """
- return trigger_view(get_trigger(trigger_uuid), can_admin=True)
+ try:
+ trigger = model.build.get_build_trigger(trigger_uuid)
+ except model.InvalidBuildTriggerException:
+ raise NotFound()
+
+ return trigger_view(trigger, can_admin=True)
@require_repo_admin
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
- @nickname('updateBuildTrigger')
- @validate_json_request('UpdateTrigger')
- def put(self, namespace_name, repo_name, trigger_uuid):
- """ Updates the specified build trigger. """
- trigger = get_trigger(trigger_uuid)
-
- handler = BuildTriggerHandler.get_handler(trigger)
- if not handler.is_active():
- raise InvalidRequest('Cannot update an unactivated trigger')
-
- enable = request.get_json()['enabled']
- model.build.toggle_build_trigger(trigger, enable)
- log_action('toggle_repo_trigger', namespace_name,
- {'repo': repo_name, 'trigger_id': trigger_uuid,
- 'service': trigger.service.name, 'enabled': enable},
- repo=model.repository.get_repository(namespace_name, repo_name))
-
- return trigger_view(trigger)
-
- @require_repo_admin
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
@nickname('deleteBuildTrigger')
def delete(self, namespace_name, repo_name, trigger_uuid):
""" Delete the specified build trigger. """
- trigger = get_trigger(trigger_uuid)
+ try:
+ trigger = model.build.get_build_trigger(trigger_uuid)
+ except model.InvalidBuildTriggerException:
+ raise NotFound()
handler = BuildTriggerHandler.get_handler(trigger)
if handler.is_active():
try:
handler.deactivate()
- except TriggerException as ex:
+ except TriggerDeactivationException as ex:
# We are just going to eat this error
logger.warning('Trigger deactivation problem: %s', ex)
@@ -149,13 +110,14 @@ class BuildTriggerSubdirs(RepositoryParamResource):
}
@require_repo_admin
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
@nickname('listBuildTriggerSubdirs')
@validate_json_request('BuildTriggerSubdirRequest')
def post(self, namespace_name, repo_name, trigger_uuid):
""" List the subdirectories available for the specified build trigger and source. """
- trigger = get_trigger(trigger_uuid)
+ try:
+ trigger = model.build.get_build_trigger(trigger_uuid)
+ except model.InvalidBuildTriggerException:
+ raise NotFound()
user_permission = UserAdminPermission(trigger.connected_user.username)
if user_permission.can():
@@ -164,25 +126,19 @@ class BuildTriggerSubdirs(RepositoryParamResource):
try:
subdirs = handler.list_build_subdirs()
- context_map = {}
- for file in subdirs:
- context_map = handler.get_parent_directory_mappings(file, context_map)
-
return {
- 'dockerfile_paths': ['/' + subdir for subdir in subdirs],
- 'contextMap': context_map,
- 'status': 'success',
+ 'subdir': subdirs,
+ 'status': 'success'
}
except EmptyRepositoryException as exc:
return {
'status': 'success',
- 'contextMap': {},
- 'dockerfile_paths': [],
+ 'subdir': []
}
- except TriggerException as exc:
+ except RepositoryReadException as exc:
return {
'status': 'error',
- 'message': exc.message,
+ 'message': exc.message
}
else:
raise Unauthorized()
@@ -214,13 +170,15 @@ class BuildTriggerActivate(RepositoryParamResource):
}
@require_repo_admin
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
@nickname('activateBuildTrigger')
@validate_json_request('BuildTriggerActivateRequest')
def post(self, namespace_name, repo_name, trigger_uuid):
""" Activate the specified build trigger. """
- trigger = get_trigger(trigger_uuid)
+ try:
+ trigger = model.build.get_build_trigger(trigger_uuid)
+ except model.InvalidBuildTriggerException:
+ raise NotFound()
+
handler = BuildTriggerHandler.get_handler(trigger)
if handler.is_active():
raise InvalidRequest('Trigger config is not sufficient for activation.')
@@ -257,25 +215,23 @@ class BuildTriggerActivate(RepositoryParamResource):
try:
path = url_for('webhooks.build_trigger_webhook', trigger_uuid=trigger.uuid)
authed_url = _prepare_webhook_url(app.config['PREFERRED_URL_SCHEME'],
- '$token', write_token.get_code(),
+ '$token', write_token.code,
app.config['SERVER_HOSTNAME'], path)
handler = BuildTriggerHandler.get_handler(trigger, new_config_dict)
final_config, private_config = handler.activate(authed_url)
if 'private_key' in private_config:
- trigger.secure_private_key = DecryptedValue(private_config['private_key'])
+ trigger.private_key = private_config['private_key']
- # TODO(remove-unenc): Remove legacy field.
- if ActiveDataMigration.has_flag(ERTMigrationFlags.WRITE_OLD_FIELDS):
- trigger.private_key = private_config['private_key']
-
- except TriggerException as exc:
+ except TriggerActivationException as exc:
write_token.delete_instance()
raise request_error(message=exc.message)
# Save the updated config.
- update_build_trigger(trigger, final_config, write_token=write_token)
+ trigger.config = json.dumps(final_config)
+ trigger.write_token = write_token
+ trigger.save()
# Log the trigger setup.
repo = model.repository.get_repository(namespace_name, repo_name)
@@ -315,40 +271,120 @@ class BuildTriggerAnalyze(RepositoryParamResource):
}
@require_repo_admin
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
@nickname('analyzeBuildTrigger')
@validate_json_request('BuildTriggerAnalyzeRequest')
def post(self, namespace_name, repo_name, trigger_uuid):
""" Analyze the specified build trigger configuration. """
- trigger = get_trigger(trigger_uuid)
-
- if trigger.repository.namespace_user.username != namespace_name:
- raise NotFound()
-
- if trigger.repository.name != repo_name:
+ try:
+ trigger = model.build.get_build_trigger(trigger_uuid)
+ except model.InvalidBuildTriggerException:
raise NotFound()
new_config_dict = request.get_json()['config']
handler = BuildTriggerHandler.get_handler(trigger, new_config_dict)
- server_hostname = app.config['SERVER_HOSTNAME']
+
try:
- trigger_analyzer = TriggerAnalyzer(handler,
- namespace_name,
- server_hostname,
- new_config_dict,
- AdministerOrganizationPermission(namespace_name).can())
- return trigger_analyzer.analyze_trigger()
- except TriggerException as rre:
+ # Load the contents of the Dockerfile.
+ contents = handler.load_dockerfile_contents()
+ if not contents:
+ return {
+ 'status': 'error',
+ 'message': 'Could not read the Dockerfile for the trigger'
+ }
+
+ # Parse the contents of the Dockerfile.
+ parsed = parse_dockerfile(contents)
+ if not parsed:
+ return {
+ 'status': 'error',
+ 'message': 'Could not parse the Dockerfile specified'
+ }
+
+ # Determine the base image (i.e. the FROM) for the Dockerfile.
+ base_image = parsed.get_base_image()
+ if not base_image:
+ return {
+ 'status': 'warning',
+ 'message': 'No FROM line found in the Dockerfile'
+ }
+
+ # Check to see if the base image lives in Quay.
+ quay_registry_prefix = '%s/' % (app.config['SERVER_HOSTNAME'])
+
+ if not base_image.startswith(quay_registry_prefix):
+ return {
+ 'status': 'publicbase'
+ }
+
+ # Lookup the repository in Quay.
+ result = base_image[len(quay_registry_prefix):].split('/', 2)
+ if len(result) != 2:
+ return {
+ 'status': 'warning',
+ 'message': '"%s" is not a valid Quay repository path' % (base_image)
+ }
+
+ (base_namespace, base_repository) = result
+ found_repository = model.repository.get_repository(base_namespace, base_repository)
+ if not found_repository:
+ return {
+ 'status': 'error',
+ 'message': 'Repository "%s" referenced by the Dockerfile was not found' % (base_image)
+ }
+
+ # If the repository is private and the user cannot see that repo, then
+ # mark it as not found.
+ can_read = ReadRepositoryPermission(base_namespace, base_repository)
+ if found_repository.visibility.name != 'public' and not can_read:
+ return {
+ 'status': 'error',
+ 'message': 'Repository "%s" referenced by the Dockerfile was not found' % (base_image)
+ }
+
+ # Check to see if the repository is public. If not, we suggest the
+ # usage of a robot account to conduct the pull.
+ read_robots = []
+
+ if AdministerOrganizationPermission(base_namespace).can():
+ def robot_view(robot):
+ return {
+ 'name': robot.username,
+ 'kind': 'user',
+ 'is_robot': True
+ }
+
+ def is_valid_robot(user):
+ # Make sure the user is a robot.
+ if not user.robot:
+ return False
+
+ # Make sure the current user can see/administer the robot.
+ (robot_namespace, shortname) = parse_robot_username(user.username)
+ return AdministerOrganizationPermission(robot_namespace).can()
+
+ repo_users = list(model.user.get_all_repo_users_transitive(base_namespace, base_repository))
+ read_robots = [robot_view(user) for user in repo_users if is_valid_robot(user)]
+
+ return {
+ 'namespace': base_namespace,
+ 'name': base_repository,
+ 'is_public': found_repository.visibility.name == 'public',
+ 'robots': read_robots,
+ 'status': 'analyzed'
+ }
+
+ except RepositoryReadException as rre:
return {
'status': 'error',
- 'message': 'Could not analyze the repository: %s' % rre.message,
+ 'message': rre.message
}
except NotImplementedError:
return {
'status': 'notimplemented',
}
+ raise NotFound()
+
@resource('/v1/repository//trigger//start')
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
@@ -369,7 +405,7 @@ class ActivateBuildTrigger(RepositoryParamResource):
'description': '(Custom Only) If specified, the ref/SHA1 used to checkout a git repository.'
},
'refs': {
- 'type': ['object', 'null'],
+ 'type': 'object',
'description': '(SCM Only) If specified, the ref to build.'
}
},
@@ -378,15 +414,14 @@ class ActivateBuildTrigger(RepositoryParamResource):
}
@require_repo_admin
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
@nickname('manuallyStartBuildTrigger')
@validate_json_request('RunParameters')
def post(self, namespace_name, repo_name, trigger_uuid):
""" Manually start a build from the specified trigger. """
- trigger = get_trigger(trigger_uuid)
- if not trigger.enabled:
- raise InvalidRequest('Trigger is not enabled.')
+ try:
+ trigger = model.build.get_build_trigger(trigger_uuid)
+ except model.InvalidBuildTriggerException:
+ raise NotFound()
handler = BuildTriggerHandler.get_handler(trigger)
if not handler.is_active():
@@ -399,12 +434,10 @@ class ActivateBuildTrigger(RepositoryParamResource):
run_parameters = request.get_json()
prepared = handler.manual_start(run_parameters=run_parameters)
build_request = start_build(repo, prepared, pull_robot_name=pull_robot_name)
- except TriggerException as tse:
+ except TriggerStartException as tse:
raise InvalidRequest(tse.message)
except MaximumBuildsQueuedException:
abort(429, message='Maximum queued build rate exceeded.')
- except BuildTriggerDisabledException:
- abort(400, message='Build trigger is disabled')
resp = build_status_view(build_request)
repo_string = '%s/%s' % (namespace_name, repo_name)
@@ -420,9 +453,7 @@ class ActivateBuildTrigger(RepositoryParamResource):
@path_param('trigger_uuid', 'The UUID of the build trigger')
class TriggerBuildList(RepositoryParamResource):
""" Resource to represent builds that were activated from the specified trigger. """
-
@require_repo_admin
- @disallow_for_app_repositories
@parse_args()
@query_param('limit', 'The maximum number of builds to return', type=int, default=5)
@nickname('listTriggerRecentBuilds')
@@ -437,19 +468,18 @@ class TriggerBuildList(RepositoryParamResource):
FIELD_VALUE_LIMIT = 30
-
@resource('/v1/repository//trigger//fields/')
@internal_only
class BuildTriggerFieldValues(RepositoryParamResource):
""" Custom verb to fetch a values list for a particular field name. """
-
@require_repo_admin
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
@nickname('listTriggerFieldValues')
def post(self, namespace_name, repo_name, trigger_uuid, field_name):
""" List the field values for a custom run field. """
- trigger = get_trigger(trigger_uuid)
+ try:
+ trigger = model.build.get_build_trigger(trigger_uuid)
+ except model.InvalidBuildTriggerException:
+ raise NotFound()
config = request.get_json() or None
if AdministerRepositoryPermission(namespace_name, repo_name).can():
@@ -472,57 +502,14 @@ class BuildTriggerFieldValues(RepositoryParamResource):
@internal_only
class BuildTriggerSources(RepositoryParamResource):
""" Custom verb to fetch the list of build sources for the trigger config. """
- schemas = {
- 'BuildTriggerSourcesRequest': {
- 'type': 'object',
- 'description': 'Specifies the namespace under which to fetch sources',
- 'properties': {
- 'namespace': {
- 'type': 'string',
- 'description': 'The namespace for which to fetch sources'
- },
- },
- }
- }
-
@require_repo_admin
- @disallow_for_app_repositories
- @disallow_for_non_normal_repositories
@nickname('listTriggerBuildSources')
- @validate_json_request('BuildTriggerSourcesRequest')
- def post(self, namespace_name, repo_name, trigger_uuid):
- """ List the build sources for the trigger configuration thus far. """
- namespace = request.get_json()['namespace']
-
- trigger = get_trigger(trigger_uuid)
-
- user_permission = UserAdminPermission(trigger.connected_user.username)
- if user_permission.can():
- handler = BuildTriggerHandler.get_handler(trigger)
-
- try:
- return {
- 'sources': handler.list_build_sources_for_namespace(namespace)
- }
- except TriggerException as rre:
- raise InvalidRequest(rre.message)
- else:
- raise Unauthorized()
-
-
-@resource('/v1/repository//trigger//namespaces')
-@path_param('repository', 'The full path of the repository. e.g. namespace/name')
-@path_param('trigger_uuid', 'The UUID of the build trigger')
-@internal_only
-class BuildTriggerSourceNamespaces(RepositoryParamResource):
- """ Custom verb to fetch the list of namespaces (orgs, projects, etc) for the trigger config. """
-
- @require_repo_admin
- @disallow_for_app_repositories
- @nickname('listTriggerBuildSourceNamespaces')
def get(self, namespace_name, repo_name, trigger_uuid):
""" List the build sources for the trigger configuration thus far. """
- trigger = get_trigger(trigger_uuid)
+ try:
+ trigger = model.build.get_build_trigger(trigger_uuid)
+ except model.InvalidBuildTriggerException:
+ raise NotFound()
user_permission = UserAdminPermission(trigger.connected_user.username)
if user_permission.can():
@@ -530,10 +517,9 @@ class BuildTriggerSourceNamespaces(RepositoryParamResource):
try:
return {
- 'namespaces': handler.list_build_source_namespaces()
+ 'sources': handler.list_build_sources()
}
- except TriggerException as rre:
+ except RepositoryReadException as rre:
raise InvalidRequest(rre.message)
else:
raise Unauthorized()
-
diff --git a/endpoints/api/trigger_analyzer.py b/endpoints/api/trigger_analyzer.py
deleted file mode 100644
index 2a29e502e..000000000
--- a/endpoints/api/trigger_analyzer.py
+++ /dev/null
@@ -1,122 +0,0 @@
-from os import path
-
-from auth import permissions
-from data import model
-from util import dockerfileparse
-
-
-def is_parent(context, dockerfile_path):
- """ This checks whether the context is a parent of the dockerfile_path"""
- if context == "" or dockerfile_path == "":
- return False
-
- normalized_context = path.normpath(context)
- if normalized_context[len(normalized_context) - 1] != path.sep:
- normalized_context += path.sep
-
- if normalized_context[0] != path.sep:
- normalized_context = path.sep + normalized_context
-
- normalized_subdir = path.normpath(path.dirname(dockerfile_path))
- if normalized_subdir[0] != path.sep:
- normalized_subdir = path.sep + normalized_subdir
-
- if normalized_subdir[len(normalized_subdir) - 1] != path.sep:
- normalized_subdir += path.sep
-
- return normalized_subdir.startswith(normalized_context)
-
-
-class TriggerAnalyzer:
- """ This analyzes triggers and returns the appropriate trigger and robot view to the frontend. """
-
- def __init__(self, handler, namespace_name, server_hostname, new_config_dict, admin_org_permission):
- self.handler = handler
- self.namespace_name = namespace_name
- self.server_hostname = server_hostname
- self.new_config_dict = new_config_dict
- self.admin_org_permission = admin_org_permission
-
- def analyze_trigger(self):
- # Load the contents of the Dockerfile.
- contents = self.handler.load_dockerfile_contents()
- if not contents:
- return self.analyze_view(self.namespace_name, None, 'warning',
- message='Specified Dockerfile path for the trigger was not found on the main ' +
- 'branch. This trigger may fail.')
-
- # Parse the contents of the Dockerfile.
- parsed = dockerfileparse.parse_dockerfile(contents)
- if not parsed:
- return self.analyze_view(self.namespace_name, None, 'error', message='Could not parse the Dockerfile specified')
-
- # Check whether the dockerfile_path is correct
- if self.new_config_dict.get('context') and not is_parent(self.new_config_dict.get('context'),
- self.new_config_dict.get('dockerfile_path')):
- return self.analyze_view(self.namespace_name, None, 'error',
- message='Dockerfile, %s, is not a child of the context, %s.' %
- (self.new_config_dict.get('context'),
- self.new_config_dict.get('dockerfile_path')))
-
- # Determine the base image (i.e. the FROM) for the Dockerfile.
- base_image = parsed.get_base_image()
- if not base_image:
- return self.analyze_view(self.namespace_name, None, 'warning', message='No FROM line found in the Dockerfile')
-
- # Check to see if the base image lives in Quay.
- quay_registry_prefix = '%s/' % self.server_hostname
- if not base_image.startswith(quay_registry_prefix):
- return self.analyze_view(self.namespace_name, None, 'publicbase')
-
- # Lookup the repository in Quay.
- result = str(base_image)[len(quay_registry_prefix):].split('/', 2)
- if len(result) != 2:
- msg = '"%s" is not a valid Quay repository path' % base_image
- return self.analyze_view(self.namespace_name, None, 'warning', message=msg)
-
- (base_namespace, base_repository) = result
- found_repository = model.repository.get_repository(base_namespace, base_repository)
- if not found_repository:
- return self.analyze_view(self.namespace_name, None, 'error',
- message='Repository "%s" referenced by the Dockerfile was not found' % base_image)
-
- # If the repository is private and the user cannot see that repo, then
- # mark it as not found.
- can_read = permissions.ReadRepositoryPermission(base_namespace, base_repository)
- if found_repository.visibility.name != 'public' and not can_read:
- return self.analyze_view(self.namespace_name, None, 'error',
- message='Repository "%s" referenced by the Dockerfile was not found' % base_image)
-
- if found_repository.visibility.name == 'public':
- return self.analyze_view(base_namespace, base_repository, 'publicbase')
-
- return self.analyze_view(base_namespace, base_repository, 'requiresrobot')
-
- def analyze_view(self, image_namespace, image_repository, status, message=None):
- # Retrieve the list of robots and mark whether they have read access already.
- robots = []
- if self.admin_org_permission:
- if image_repository is not None:
- perm_query = model.user.get_all_repo_users_transitive(image_namespace, image_repository)
- user_ids_with_permission = set([user.id for user in perm_query])
- else:
- user_ids_with_permission = set()
-
- def robot_view(robot):
- return {
- 'name': robot.username,
- 'kind': 'user',
- 'is_robot': True,
- 'can_read': robot.id in user_ids_with_permission,
- }
-
- robots = [robot_view(robot) for robot in model.user.list_namespace_robots(image_namespace)]
-
- return {
- 'namespace': image_namespace,
- 'name': image_repository,
- 'robots': robots,
- 'status': status,
- 'message': message,
- 'is_admin': self.admin_org_permission,
- }
diff --git a/endpoints/api/user.py b/endpoints/api/user.py
index 4eabe1088..c969926dd 100644
--- a/endpoints/api/user.py
+++ b/endpoints/api/user.py
@@ -11,33 +11,27 @@ from peewee import IntegrityError
import features
-from app import (app, billing as stripe, authentication, avatar, user_analytics, all_queues,
- oauth_login, namespace_gc_queue, ip_resolver, url_scheme_and_hostname)
-
+from app import app, billing as stripe, authentication, avatar, user_analytics, all_queues
from auth import scopes
from auth.auth_context import get_authenticated_user
from auth.permissions import (AdministerOrganizationPermission, CreateRepositoryPermission,
UserAdminPermission, UserReadPermission, SuperUserPermission)
from data import model
from data.billing import get_plan
-from data.database import Repository as RepositoryTable
-from data.users.shared import can_create_user
+from data.database import Repository as RepositoryTable, UserPromptTypes
from endpoints.api import (ApiResource, nickname, resource, validate_json_request, request_error,
log_action, internal_only, require_user_admin, parse_args,
query_param, require_scope, format_date, show_if,
require_fresh_login, path_param, define_json_response,
RepositoryParamResource, page_support)
-from endpoints.exception import NotFound, InvalidToken, InvalidRequest, DownstreamIssue
+from endpoints.exception import NotFound, InvalidToken
from endpoints.api.subscribe import subscribe
from endpoints.common import common_login
from endpoints.csrf import generate_csrf_token, OAUTH_CSRF_TOKEN_NAME
-from endpoints.decorators import anon_allowed, readonly_call_allowed
-from oauth.oidc import DiscoveryFailureException
+from endpoints.decorators import anon_allowed
from util.useremails import (send_confirmation_email, send_recovery_email, send_change_email,
send_password_changed, send_org_recovery_email)
from util.names import parse_single_urn
-from util.saas.useranalytics import build_error_callback
-from util.request import get_request_ip
REPOS_PER_PAGE = 100
@@ -81,7 +75,6 @@ def user_view(user, previous_username=None):
'name': o.username,
'avatar': avatar.get_data_for_org(o),
'can_create_repo': CreateRepositoryPermission(o.username).can(),
- 'public': o.username in app.config.get('PUBLIC_NAMESPACES', []),
}
if user_admin:
@@ -92,13 +85,7 @@ def user_view(user, previous_username=None):
return org_response
- # Retrieve the organizations for the user.
- organizations = {o.username: o for o in model.organization.get_user_organizations(user.username)}
-
- # Add any public namespaces.
- public_namespaces = app.config.get('PUBLIC_NAMESPACES', [])
- if public_namespaces:
- organizations.update({ns: model.user.get_namespace_user(ns) for ns in public_namespaces})
+ organizations = model.organization.get_user_organizations(user.username)
def login_view(login):
try:
@@ -131,14 +118,8 @@ def user_view(user, previous_username=None):
'invoice_email': user.invoice_email,
'invoice_email_address': user.invoice_email_address,
'preferred_namespace': not (user.stripe_id is None),
- 'tag_expiration_s': user.removed_tag_expiration_s,
+ 'tag_expiration': user.removed_tag_expiration_s,
'prompts': model.user.get_user_prompts(user),
- 'company': user.company,
- 'family_name': user.family_name,
- 'given_name': user.given_name,
- 'location': user.location,
- 'is_free_account': user.stripe_id is None,
- 'has_password_set': authentication.has_password_set(user.username),
})
analytics_metadata = user_analytics.get_user_analytics_metadata(user)
@@ -152,7 +133,7 @@ def user_view(user, previous_username=None):
user_view_perm = UserReadPermission(user.username)
if user_view_perm.can():
user_response.update({
- 'organizations': [org_view(o, user_admin=user_admin.can()) for o in organizations.values()],
+ 'organizations': [org_view(o, user_admin=user_admin.can()) for o in organizations],
})
@@ -225,10 +206,10 @@ class User(ApiResource):
'type': 'string',
'description': 'The user\'s email address',
},
- 'tag_expiration_s': {
+ 'tag_expiration': {
'type': 'integer',
+ 'maximum': 2592000,
'minimum': 0,
- 'description': 'The number of seconds for tag expiration',
},
'username': {
'type': 'string',
@@ -239,21 +220,17 @@ class User(ApiResource):
'description': 'Custom email address for receiving invoices',
},
'given_name': {
- 'type': ['string', 'null'],
+ 'type': 'string',
'description': 'The optional entered given name for the user',
},
'family_name': {
- 'type': ['string', 'null'],
+ 'type': 'string',
'description': 'The optional entered family name for the user',
},
'company': {
- 'type': ['string', 'null'],
+ 'type': 'string',
'description': 'The optional entered company for the user',
},
- 'location': {
- 'type': ['string', 'null'],
- 'description': 'The optional entered location for the user',
- },
},
},
'UserView': {
@@ -320,12 +297,12 @@ class User(ApiResource):
@nickname('changeUserDetails')
@internal_only
@validate_json_request('UpdateUser')
+ @define_json_response('UserView')
def put(self):
""" Update a users details such as password or email. """
user = get_authenticated_user()
user_data = request.get_json()
previous_username = None
- headers = None
try:
if 'password' in user_data:
@@ -336,9 +313,7 @@ class User(ApiResource):
model.user.change_password(user, user_data['password'])
# Login again to reset their session cookie.
- success, headers = common_login(user.uuid)
- if not success:
- raise request_error(message='Could not perform login action')
+ common_login(user)
if features.MAILING:
send_password_changed(user.username, user.email)
@@ -347,9 +322,9 @@ class User(ApiResource):
logger.debug('Changing invoice_email for user: %s', user.username)
model.user.change_send_invoice_email(user, user_data['invoice_email'])
- if features.CHANGE_TAG_EXPIRATION and 'tag_expiration_s' in user_data:
- logger.debug('Changing user tag expiration to: %ss', user_data['tag_expiration_s'])
- model.user.change_user_tag_expiration(user, user_data['tag_expiration_s'])
+ if 'tag_expiration' in user_data:
+ logger.debug('Changing user tag expiration to: %ss', user_data['tag_expiration'])
+ model.user.change_user_tag_expiration(user, user_data['tag_expiration'])
if ('invoice_email_address' in user_data and
user_data['invoice_email_address'] != user.invoice_email_address):
@@ -364,25 +339,17 @@ class User(ApiResource):
if features.MAILING:
logger.debug('Sending email to change email address for user: %s',
user.username)
- confirmation_code = model.user.create_confirm_email_code(user, new_email=new_email)
- send_change_email(user.username, user_data['email'], confirmation_code)
+ code = model.user.create_confirm_email_code(user, new_email=new_email)
+ send_change_email(user.username, user_data['email'], code.code)
else:
- ua_future = user_analytics.change_email(user.email, new_email)
- ua_future.add_done_callback(build_error_callback('Change email failed'))
+ user_analytics.change_email(user.email, new_email)
model.user.update_email(user, new_email, auto_verify=not features.MAILING)
- if features.USER_METADATA:
- metadata = {}
-
- for field in ('given_name', 'family_name', 'company', 'location'):
- if field in user_data:
- metadata[field] = user_data.get(field)
-
- if len(metadata) > 0:
- model.user.update_user_metadata(user, metadata)
-
- ua_mdata_future = user_analytics.change_metadata(user.email, **metadata)
- ua_mdata_future.add_done_callback(build_error_callback('Change metadata failed'))
+ if 'given_name' in user_data or 'family_name' in user_data or 'company' in user_data:
+ model.user.update_user_metadata(user, user_data.get('given_name'),
+ user_data.get('family_name'), user_data.get('company'))
+ user_analytics.change_metadata(user.email, user_data.get('given_name'),
+ user_data.get('family_name'), user_data.get('company'))
# Check for username rename. A username can be renamed if the feature is enabled OR the user
# currently has a confirm_username prompt.
@@ -391,8 +358,7 @@ class User(ApiResource):
new_username = user_data.get('username')
previous_username = user.username
- rename_allowed = (features.USER_RENAME or
- (confirm_username and features.USERNAME_CONFIRMATION))
+ rename_allowed = features.USER_RENAME or confirm_username
username_changing = new_username and new_username != previous_username
if rename_allowed and username_changing:
@@ -401,16 +367,14 @@ class User(ApiResource):
raise request_error(message='Username is already in use')
user = model.user.change_username(user.id, new_username)
- username_future = user_analytics.change_username(user.email, new_username)
- username_future.add_done_callback(build_error_callback('Change username failed'))
-
+ user_analytics.change_username(user.email, new_username)
elif confirm_username:
model.user.remove_user_prompt(user, 'confirm_username')
except model.user.InvalidPasswordException, ex:
raise request_error(exception=ex)
- return user_view(user, previous_username=previous_username), 200, headers
+ return user_view(user, previous_username=previous_username)
@show_if(features.USER_CREATION)
@show_if(features.DIRECT_LOGIN)
@@ -424,63 +388,44 @@ class User(ApiResource):
user_data = request.get_json()
- invite_code = user_data.get('invite_code', '')
- existing_user = model.user.get_nonrobot_user(user_data['username'])
- if existing_user:
- raise request_error(message='The username already exists')
-
- # Ensure an e-mail address was specified if required.
- if features.MAILING and not user_data.get('email'):
- raise request_error(message='Email address is required')
-
- # If invite-only user creation is turned on and no invite code was sent, return an error.
- # Technically, this is handled by the can_create_user call below as well, but it makes
- # a nicer error.
- if features.INVITE_ONLY_USER_CREATION and not invite_code:
- raise request_error(message='Cannot create non-invited user')
-
- # Ensure that this user can be created.
- blacklisted_domains = app.config.get('BLACKLISTED_EMAIL_DOMAINS', [])
- if not can_create_user(user_data.get('email'), blacklisted_domains=blacklisted_domains):
- raise request_error(message='Creation of a user account for this e-mail is disabled; please contact an administrator')
-
# If recaptcha is enabled, then verify the user is a human.
if features.RECAPTCHA:
recaptcha_response = user_data.get('recaptcha_response', '')
result = recaptcha2.verify(app.config['RECAPTCHA_SECRET_KEY'],
recaptcha_response,
- get_request_ip())
+ request.remote_addr)
if not result['success']:
return {
'message': 'Are you a bot? If not, please revalidate the captcha.'
}, 400
- is_possible_abuser = ip_resolver.is_ip_possible_threat(get_request_ip())
+ invite_code = user_data.get('invite_code', '')
+ existing_user = model.user.get_nonrobot_user(user_data['username'])
+ if existing_user:
+ raise request_error(message='The username already exists')
+
+ if features.MAILING and not user_data.get('email'):
+ raise request_error(message='Email address is required')
+
try:
prompts = model.user.get_default_user_prompts(features)
new_user = model.user.create_user(user_data['username'], user_data['password'],
user_data.get('email'),
auto_verify=not features.MAILING,
email_required=features.MAILING,
- is_possible_abuser=is_possible_abuser,
prompts=prompts)
email_address_confirmed = handle_invite_code(invite_code, new_user)
if features.MAILING and not email_address_confirmed:
- confirmation_code = model.user.create_confirm_email_code(new_user)
- send_confirmation_email(new_user.username, new_user.email, confirmation_code)
+ code = model.user.create_confirm_email_code(new_user)
+ send_confirmation_email(new_user.username, new_user.email, code.code)
return {
'awaiting_verification': True
}
else:
- success, headers = common_login(new_user.uuid)
- if not success:
- return {
- 'message': 'Could not login. Is your account inactive?'
- }, 403
-
- return user_view(new_user), 200, headers
+ common_login(new_user)
+ return user_view(new_user)
except model.user.DataModelException as ex:
raise request_error(exception=ex)
@@ -493,7 +438,7 @@ class User(ApiResource):
if app.config['AUTHENTICATION_TYPE'] != 'Database':
abort(404)
- model.user.mark_namespace_for_deletion(get_authenticated_user(), all_queues, namespace_gc_queue)
+ model.user.delete_user(get_authenticated_user(), all_queues)
return '', 204
@@ -549,9 +494,6 @@ class ClientKey(ApiResource):
@validate_json_request('GenerateClientKey')
def post(self):
""" Return's the user's private client key. """
- if not authentication.supports_encrypted_credentials:
- raise NotFound()
-
username = get_authenticated_user().username
password = request.get_json()['password']
(result, error_message) = authentication.confirm_existing_user(username, password)
@@ -574,9 +516,8 @@ def conduct_signin(username_or_email, password, invite_code=None):
if invite_code:
handle_invite_code(invite_code, found_user)
- success, headers = common_login(found_user.uuid)
- if success:
- return {'success': True}, 200, headers
+ if common_login(found_user):
+ return {'success': True}
else:
needs_email_verification = True
@@ -587,7 +528,7 @@ def conduct_signin(username_or_email, password, invite_code=None):
'needsEmailVerification': needs_email_verification,
'invalidCredentials': invalid_credentials,
'message': error_message
- }, 403, None
+ }, 403
@resource('/v1/user/convert')
@@ -686,7 +627,6 @@ class Signin(ApiResource):
@nickname('signinUser')
@validate_json_request('SigninUser')
@anon_allowed
- @readonly_call_allowed
def post(self):
""" Sign in the user with the specified credentials. """
signin_data = request.get_json()
@@ -723,7 +663,6 @@ class VerifyUser(ApiResource):
@require_user_admin
@nickname('verifyUser')
@validate_json_request('VerifyUser')
- @readonly_call_allowed
def post(self):
""" Verifies the signed in the user with the specified credentials. """
signin_data = request.get_json()
@@ -737,13 +676,8 @@ class VerifyUser(ApiResource):
'invalidCredentials': True,
}, 403
- success, headers = common_login(result.uuid)
- if not success:
- return {
- 'message': 'Could not verify user.',
- }, 403
-
- return {'success': True}, 200, headers
+ common_login(result)
+ return {'success': True}
@resource('/v1/signout')
@@ -753,75 +687,31 @@ class Signout(ApiResource):
@nickname('logout')
def post(self):
""" Request that the current user be signed out. """
- # Invalidate all sessions for the user.
- model.user.invalidate_all_sessions(get_authenticated_user())
-
- # Clear out the user's identity.
- identity_changed.send(app, identity=AnonymousIdentity())
-
- # Remove the user's session cookie.
logout_user()
-
+ identity_changed.send(app, identity=AnonymousIdentity())
return {'success': True}
-@resource('/v1/externallogin/')
+@resource('/v1/externaltoken')
@internal_only
-class ExternalLoginInformation(ApiResource):
- """ Resource for both setting a token for external login and returning its authorization
- url.
- """
- schemas = {
- 'GetLogin': {
- 'type': 'object',
- 'description': 'Information required to an retrieve external login URL.',
- 'required': [
- 'kind',
- ],
- 'properties': {
- 'kind': {
- 'type': 'string',
- 'description': 'The kind of URL',
- 'enum': ['login', 'attach', 'cli'],
- },
- },
- },
- }
+class GenerateExternalToken(ApiResource):
+ """ Resource for generating a token for external login. """
+ @nickname('generateExternalLoginToken')
+ def post(self):
+ """ Generates a CSRF token explicitly for OIDC/OAuth-associated login. """
+ return {'token': generate_csrf_token(OAUTH_CSRF_TOKEN_NAME)}
- @nickname('retrieveExternalLoginAuthorizationUrl')
- @anon_allowed
- @readonly_call_allowed
- @validate_json_request('GetLogin')
- def post(self, service_id):
- """ Generates the auth URL and CSRF token explicitly for OIDC/OAuth-associated login. """
- login_service = oauth_login.get_service(service_id)
- if login_service is None:
- raise InvalidRequest()
-
- csrf_token = generate_csrf_token(OAUTH_CSRF_TOKEN_NAME)
- kind = request.get_json()['kind']
- redirect_suffix = '' if kind == 'login' else '/' + kind
-
- try:
- login_scopes = login_service.get_login_scopes()
- auth_url = login_service.get_auth_url(url_scheme_and_hostname, redirect_suffix, csrf_token, login_scopes)
- return {'auth_url': auth_url}
- except DiscoveryFailureException as dfe:
- logger.exception('Could not discovery OAuth endpoint information')
- raise DownstreamIssue(dfe.message)
-
-
-@resource('/v1/detachexternal/')
+@resource('/v1/detachexternal/')
@show_if(features.DIRECT_LOGIN)
@internal_only
class DetachExternal(ApiResource):
""" Resource for detaching an external login. """
@require_user_admin
@nickname('detachExternalLogin')
- def post(self, service_id):
+ def post(self, servicename):
""" Request that the current user be detached from the external login service. """
- model.user.detach_external_login(get_authenticated_user(), service_id)
+ model.user.detach_external_login(get_authenticated_user(), servicename)
return {'success': True}
@@ -842,10 +732,6 @@ class Recovery(ApiResource):
'type': 'string',
'description': 'The user\'s email address',
},
- 'recaptcha_response': {
- 'type': 'string',
- 'description': 'The (may be disabled) recaptcha response code for verification',
- },
},
},
}
@@ -866,26 +752,10 @@ class Recovery(ApiResource):
return v
- recovery_data = request.get_json()
-
- # If recaptcha is enabled, then verify the user is a human.
- if features.RECAPTCHA:
- recaptcha_response = recovery_data.get('recaptcha_response', '')
- result = recaptcha2.verify(app.config['RECAPTCHA_SECRET_KEY'],
- recaptcha_response,
- get_request_ip())
-
- if not result['success']:
- return {
- 'message': 'Are you a bot? If not, please revalidate the captcha.'
- }, 400
-
- email = recovery_data['email']
+ email = request.get_json()['email']
user = model.user.find_user_by_email(email)
if not user:
- return {
- 'status': 'sent',
- }
+ raise model.InvalidEmailAddressException('Email address was not found.')
if user.organization:
send_org_recovery_email(user, model.organization.get_admin_users(user))
@@ -895,8 +765,8 @@ class Recovery(ApiResource):
'orgname': redact(user.username),
}
- confirmation_code = model.user.create_reset_password_email_code(email)
- send_recovery_email(email, confirmation_code)
+ code = model.user.create_reset_password_email_code(email)
+ send_recovery_email(email, code.code)
return {
'status': 'sent',
}
@@ -1118,3 +988,4 @@ class Users(ApiResource):
abort(404)
return user_view(user)
+
diff --git a/endpoints/appr/__init__.py b/endpoints/appr/__init__.py
deleted file mode 100644
index c998d8a95..000000000
--- a/endpoints/appr/__init__.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import logging
-
-from functools import wraps
-
-from cnr.exception import Forbidden
-from flask import Blueprint
-
-from app import metric_queue
-from auth.permissions import (AdministerRepositoryPermission, ReadRepositoryPermission,
- ModifyRepositoryPermission)
-from endpoints.appr.decorators import require_repo_permission
-from util.metrics.metricqueue import time_blueprint
-
-
-appr_bp = Blueprint('appr', __name__)
-time_blueprint(appr_bp, metric_queue)
-logger = logging.getLogger(__name__)
-
-
-def _raise_method(repository, scopes):
- raise Forbidden("Unauthorized access for: %s" % repository,
- {"package": repository, "scopes": scopes})
-
-
-def _get_reponame_kwargs(*args, **kwargs):
- return [kwargs['namespace'], kwargs['package_name']]
-
-
-require_app_repo_read = require_repo_permission(ReadRepositoryPermission,
- scopes=['pull'],
- allow_public=True,
- raise_method=_raise_method,
- get_reponame_method=_get_reponame_kwargs)
-
-require_app_repo_write = require_repo_permission(ModifyRepositoryPermission,
- scopes=['pull', 'push'],
- raise_method=_raise_method,
- get_reponame_method=_get_reponame_kwargs)
-
-require_app_repo_admin = require_repo_permission(AdministerRepositoryPermission,
- scopes=['pull', 'push'],
- raise_method=_raise_method,
- get_reponame_method=_get_reponame_kwargs)
diff --git a/endpoints/appr/cnr_backend.py b/endpoints/appr/cnr_backend.py
deleted file mode 100644
index a9e1b2539..000000000
--- a/endpoints/appr/cnr_backend.py
+++ /dev/null
@@ -1,177 +0,0 @@
-import base64
-
-from cnr.exception import raise_package_not_found
-from cnr.models.blob_base import BlobBase
-from cnr.models.channel_base import ChannelBase
-from cnr.models.db_base import CnrDB
-from cnr.models.package_base import PackageBase, manifest_media_type
-
-from flask import request
-from app import storage
-from endpoints.appr.models_cnr import model
-from util.request import get_request_ip
-
-
-class Blob(BlobBase):
- @classmethod
- def upload_url(cls, digest):
- return "cnr/blobs/sha256/%s/%s" % (digest[0:2], digest)
-
- def save(self, content_media_type):
- model.store_blob(self, content_media_type)
-
- @classmethod
- def delete(cls, package_name, digest):
- pass
-
- @classmethod
- def _fetch_b64blob(cls, package_name, digest):
- blobpath = cls.upload_url(digest)
- locations = model.get_blob_locations(digest)
- if not locations:
- raise_package_not_found(package_name, digest)
- return base64.b64encode(storage.get_content(locations, blobpath))
-
- @classmethod
- def download_url(cls, package_name, digest):
- blobpath = cls.upload_url(digest)
- locations = model.get_blob_locations(digest)
- if not locations:
- raise_package_not_found(package_name, digest)
- return storage.get_direct_download_url(locations, blobpath, get_request_ip())
-
-
-class Channel(ChannelBase):
- """ CNR Channel model implemented against the Quay data model. """
-
- def __init__(self, name, package, current=None):
- super(Channel, self).__init__(name, package, current=current)
- self._channel_data = None
-
- def _exists(self):
- """ Check if the channel is saved already """
- return model.channel_exists(self.package, self.name)
-
- @classmethod
- def get(cls, name, package):
- chanview = model.fetch_channel(package, name, with_releases=False)
- return cls(name, package, chanview.current)
-
- def save(self):
- model.update_channel(self.package, self.name, self.current)
-
- def delete(self):
- model.delete_channel(self.package, self.name)
-
- @classmethod
- def all(cls, package_name):
- return [
- Channel(c.name, package_name, c.current) for c in model.list_channels(package_name)
- ]
-
- @property
- def _channel(self):
- if self._channel_data is None:
- self._channel_data = model.fetch_channel(self.package, self.name)
- return self._channel_data
-
- def releases(self):
- """ Returns the list of versions """
- return self._channel.releases
-
- def _add_release(self, release):
- return model.update_channel(self.package, self.name, release)._asdict
-
- def _remove_release(self, release):
- model.delete_channel(self.package, self.name)
-
-
-class User(object):
- """ User in CNR models """
-
- @classmethod
- def get_user(cls, username, password):
- """ Returns True if user creds is valid """
- return model.get_user(username, password)
-
-
-class Package(PackageBase):
- """ CNR Package model implemented against the Quay data model. """
-
- @classmethod
- def _apptuple_to_dict(cls, apptuple):
- return {
- 'release': apptuple.release,
- 'created_at': apptuple.created_at,
- 'digest': apptuple.manifest.digest,
- 'mediaType': apptuple.manifest.mediaType,
- 'package': apptuple.name,
- 'content': apptuple.manifest.content._asdict()
- }
-
- @classmethod
- def create_repository(cls, package_name, visibility, owner):
- model.create_application(package_name, visibility, owner)
-
- @classmethod
- def exists(cls, package_name):
- return model.application_exists(package_name)
-
- @classmethod
- def all(cls, organization=None, media_type=None, search=None, username=None, **kwargs):
- return [
- dict(x._asdict())
- for x in model.list_applications(namespace=organization, media_type=media_type,
- search=search, username=username)
- ]
-
- @classmethod
- def _fetch(cls, package_name, release, media_type):
- data = model.fetch_release(package_name, release, manifest_media_type(media_type))
- return cls._apptuple_to_dict(data)
-
- @classmethod
- def all_releases(cls, package_name, media_type=None):
- return model.list_releases(package_name, media_type)
-
- @classmethod
- def search(cls, query, username=None):
- return model.basic_search(query, username=username)
-
- def _save(self, force=False, **kwargs):
- user = kwargs['user']
- visibility = kwargs['visibility']
- model.create_release(self, user, visibility, force)
-
- @classmethod
- def _delete(cls, package_name, release, media_type):
- model.delete_release(package_name, release, manifest_media_type(media_type))
-
- @classmethod
- def isdeleted_release(cls, package, release):
- return model.release_exists(package, release)
-
- def channels(self, channel_class, iscurrent=True):
- return [
- c.name
- for c in model.list_release_channels(self.package, self.release, active=iscurrent)
- ]
-
- @classmethod
- def manifests(cls, package, release=None):
- return model.list_manifests(package, release)
-
- @classmethod
- def dump_all(cls, blob_cls):
- raise NotImplementedError
-
-
-class QuayDB(CnrDB):
- """ Wrapper Class to embed all CNR Models """
- Channel = Channel
- Package = Package
- Blob = Blob
-
- @classmethod
- def reset_db(cls, force=False):
- pass
diff --git a/endpoints/appr/decorators.py b/endpoints/appr/decorators.py
deleted file mode 100644
index 8df6a46a9..000000000
--- a/endpoints/appr/decorators.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import logging
-
-from functools import wraps
-
-from data import model
-from util.http import abort
-
-
-logger = logging.getLogger(__name__)
-
-
-def _raise_unauthorized(repository, scopes):
- raise StandardError("Unauthorized acces to %s", repository)
-
-
-def _get_reponame_kwargs(*args, **kwargs):
- return [kwargs['namespace'], kwargs['package_name']]
-
-
-def disallow_for_image_repository(get_reponame_method=_get_reponame_kwargs):
- def wrapper(func):
- @wraps(func)
- def wrapped(*args, **kwargs):
- namespace_name, repo_name = get_reponame_method(*args, **kwargs)
- image_repo = model.repository.get_repository(namespace_name, repo_name, kind_filter='image')
- if image_repo is not None:
- logger.debug('Tried to invoked a CNR method on an image repository')
- abort(405, message='Cannot push an application to an image repository with the same name')
- return func(*args, **kwargs)
- return wrapped
- return wrapper
-
-
-def require_repo_permission(permission_class, scopes=None, allow_public=False,
- raise_method=_raise_unauthorized,
- get_reponame_method=_get_reponame_kwargs):
- def wrapper(func):
- @wraps(func)
- @disallow_for_image_repository(get_reponame_method=get_reponame_method)
- def wrapped(*args, **kwargs):
- namespace_name, repo_name = get_reponame_method(*args, **kwargs)
- logger.debug('Checking permission %s for repo: %s/%s', permission_class,
- namespace_name, repo_name)
- permission = permission_class(namespace_name, repo_name)
- if (permission.can() or
- (allow_public and
- model.repository.repository_is_public(namespace_name, repo_name))):
- return func(*args, **kwargs)
- repository = namespace_name + '/' + repo_name
- raise_method(repository, scopes)
- return wrapped
- return wrapper
diff --git a/endpoints/appr/models_cnr.py b/endpoints/appr/models_cnr.py
deleted file mode 100644
index 89216127c..000000000
--- a/endpoints/appr/models_cnr.py
+++ /dev/null
@@ -1,316 +0,0 @@
-from datetime import datetime
-
-import cnr.semver
-
-from cnr.exception import raise_package_not_found, raise_channel_not_found, CnrException
-
-import features
-import data.model
-
-from app import storage, authentication
-from data import appr_model
-from data.database import Repository, MediaType, db_transaction
-from data.appr_model.models import NEW_MODELS
-from endpoints.appr.models_interface import (
- ApplicationManifest, ApplicationRelease, ApplicationSummaryView, AppRegistryDataInterface,
- BlobDescriptor, ChannelView, ChannelReleasesView)
-from util.audit import track_and_log
-from util.morecollections import AttrDict
-from util.names import parse_robot_username
-
-
-
-class ReadOnlyException(CnrException):
- status_code = 405
- errorcode = "read-only"
-
-
-def _strip_sha256_header(digest):
- if digest.startswith('sha256:'):
- return digest.split('sha256:')[1]
- return digest
-
-
-def _split_package_name(package):
- """ Returns the namespace and package-name """
- return package.split("/")
-
-
-def _join_package_name(ns, name):
- """ Returns a app-name in the 'namespace/name' format """
- return "%s/%s" % (ns, name)
-
-
-def _timestamp_to_iso(timestamp, in_ms=True):
- if in_ms:
- timestamp = timestamp / 1000
- return datetime.fromtimestamp(timestamp).isoformat()
-
-
-def _application(package):
- ns, name = _split_package_name(package)
- repo = data.model.repository.get_app_repository(ns, name)
- if repo is None:
- raise_package_not_found(package)
- return repo
-
-
-class CNRAppModel(AppRegistryDataInterface):
- def __init__(self, models_ref, is_readonly):
- self.models_ref = models_ref
- self.is_readonly = is_readonly
-
- def log_action(self, event_name, namespace_name, repo_name=None, analytics_name=None,
- analytics_sample=1, metadata=None):
- metadata = {} if metadata is None else metadata
-
- repo = None
- if repo_name is not None:
- db_repo = data.model.repository.get_repository(namespace_name, repo_name,
- kind_filter='application')
- repo = AttrDict({
- 'id': db_repo.id,
- 'name': db_repo.name,
- 'namespace_name': db_repo.namespace_user.username,
- 'is_free_namespace': db_repo.namespace_user.stripe_id is None,
- })
- track_and_log(event_name, repo, analytics_name=analytics_name,
- analytics_sample=analytics_sample, **metadata)
-
- def list_applications(self, namespace=None, media_type=None, search=None, username=None,
- with_channels=False):
- """ Lists all repositories that contain applications, with optional filtering to a specific
- namespace and view a specific user.
- """
-
- views = []
- for repo in appr_model.package.list_packages_query(self.models_ref, namespace, media_type,
- search, username=username):
- tag_set_prefetch = getattr(repo, self.models_ref.tag_set_prefetch_name)
- releases = [t.name for t in tag_set_prefetch]
- if not releases:
- continue
- available_releases = [
- str(x) for x in sorted(cnr.semver.versions(releases, False), reverse=True)]
- channels = None
- if with_channels:
- channels = [
- ChannelView(name=chan.name, current=chan.linked_tag.name)
- for chan in appr_model.channel.get_repo_channels(repo, self.models_ref)]
-
- app_name = _join_package_name(repo.namespace_user.username, repo.name)
- manifests = self.list_manifests(app_name, available_releases[0])
- view = ApplicationSummaryView(
- namespace=repo.namespace_user.username,
- name=app_name,
- visibility=repo.visibility.name,
- default=available_releases[0],
- channels=channels,
- manifests=manifests,
- releases=available_releases,
- updated_at=_timestamp_to_iso(tag_set_prefetch[-1].lifetime_start),
- created_at=_timestamp_to_iso(tag_set_prefetch[0].lifetime_start),)
- views.append(view)
- return views
-
- def application_is_public(self, package_name):
- """
- Returns:
- * True if the repository is public
- """
- namespace, name = _split_package_name(package_name)
- return data.model.repository.repository_is_public(namespace, name)
-
- def create_application(self, package_name, visibility, owner):
- """ Create a new app repository, owner is the user who creates it """
- if self.is_readonly:
- raise ReadOnlyException('Currently in read-only mode')
-
- ns, name = _split_package_name(package_name)
- data.model.repository.create_repository(ns, name, owner, visibility, 'application')
-
- def application_exists(self, package_name):
- """ Create a new app repository, owner is the user who creates it """
- ns, name = _split_package_name(package_name)
- return data.model.repository.get_repository(ns, name, kind_filter='application') is not None
-
- def basic_search(self, query, username=None):
- """ Returns an array of matching AppRepositories in the format: 'namespace/name'
- Note:
- * Only 'public' repositories are returned
-
- Todo:
- * Filter results with readeable reposistory for the user (including visibilitys)
- """
- return [
- _join_package_name(r.namespace_user.username, r.name)
- for r in data.model.repository.get_app_search(lookup=query, username=username, limit=50)]
-
- def list_releases(self, package_name, media_type=None):
- """ Return the list of all releases of an Application
- Example:
- >>> get_app_releases('ant31/rocketchat')
- ['1.7.1', '1.7.0', '1.7.2']
-
- Todo:
- * Paginate
- """
- return appr_model.release.get_releases(_application(package_name), self.models_ref, media_type)
-
- def list_manifests(self, package_name, release=None):
- """ Returns the list of all manifests of an Application.
-
- Todo:
- * Paginate
- """
- try:
- repo = _application(package_name)
- return list(appr_model.manifest.get_manifest_types(repo, self.models_ref, release))
- except (Repository.DoesNotExist, self.models_ref.Tag.DoesNotExist):
- raise_package_not_found(package_name, release)
-
- def fetch_release(self, package_name, release, media_type):
- """
- Retrieves an AppRelease from it's repository-name and release-name
- """
- repo = _application(package_name)
- try:
- tag, manifest, blob = appr_model.release.get_app_release(repo, release, media_type,
- self.models_ref)
- created_at = _timestamp_to_iso(tag.lifetime_start)
-
- blob_descriptor = BlobDescriptor(digest=_strip_sha256_header(blob.digest),
- mediaType=blob.media_type.name, size=blob.size, urls=[])
-
- app_manifest = ApplicationManifest(
- digest=manifest.digest, mediaType=manifest.media_type.name, content=blob_descriptor)
-
- app_release = ApplicationRelease(release=tag.name, created_at=created_at, name=package_name,
- manifest=app_manifest)
- return app_release
- except (self.models_ref.Tag.DoesNotExist,
- self.models_ref.Manifest.DoesNotExist,
- self.models_ref.Blob.DoesNotExist,
- Repository.DoesNotExist,
- MediaType.DoesNotExist):
- raise_package_not_found(package_name, release, media_type)
-
- def store_blob(self, cnrblob, content_media_type):
- if self.is_readonly:
- raise ReadOnlyException('Currently in read-only mode')
-
- fp = cnrblob.packager.io_file
- path = cnrblob.upload_url(cnrblob.digest)
- locations = storage.preferred_locations
- storage.stream_write(locations, path, fp, 'application/x-gzip')
- db_blob = appr_model.blob.get_or_create_blob(cnrblob.digest, cnrblob.size, content_media_type,
- locations, self.models_ref)
- return BlobDescriptor(mediaType=content_media_type,
- digest=_strip_sha256_header(db_blob.digest), size=db_blob.size, urls=[])
-
- def create_release(self, package, user, visibility, force=False):
- """ Add an app-release to a repository
- package is an instance of data.cnr.package.Package
- """
- if self.is_readonly:
- raise ReadOnlyException('Currently in read-only mode')
-
- manifest = package.manifest()
- ns, name = package.namespace, package.name
- repo = data.model.repository.get_or_create_repository(ns, name, user, visibility=visibility,
- repo_kind='application')
- tag_name = package.release
- appr_model.release.create_app_release(repo, tag_name, package.manifest(),
- manifest['content']['digest'], self.models_ref, force)
-
- def delete_release(self, package_name, release, media_type):
- """ Remove/Delete an app-release from an app-repository.
- It does not delete the entire app-repository, only a single release
- """
- if self.is_readonly:
- raise ReadOnlyException('Currently in read-only mode')
-
- repo = _application(package_name)
- try:
- appr_model.release.delete_app_release(repo, release, media_type, self.models_ref)
- except (self.models_ref.Channel.DoesNotExist,
- self.models_ref.Tag.DoesNotExist,
- MediaType.DoesNotExist):
- raise_package_not_found(package_name, release, media_type)
-
- def release_exists(self, package, release):
- """ Return true if a release with that name already exist or
- have existed (include deleted ones) """
- # TODO: Figure out why this isn't implemented.
-
- def channel_exists(self, package_name, channel_name):
- """ Returns true if channel exists """
- repo = _application(package_name)
- return appr_model.tag.tag_exists(repo, channel_name, self.models_ref, "channel")
-
- def delete_channel(self, package_name, channel_name):
- """ Delete an AppChannel
- Note:
- It doesn't delete the AppReleases
- """
- if self.is_readonly:
- raise ReadOnlyException('Currently in read-only mode')
-
- repo = _application(package_name)
- try:
- appr_model.channel.delete_channel(repo, channel_name, self.models_ref)
- except (self.models_ref.Channel.DoesNotExist, self.models_ref.Tag.DoesNotExist):
- raise_channel_not_found(package_name, channel_name)
-
- def list_channels(self, package_name):
- """ Returns all AppChannel for a package """
- repo = _application(package_name)
- channels = appr_model.channel.get_repo_channels(repo, self.models_ref)
- return [ChannelView(name=chan.name, current=chan.linked_tag.name) for chan in channels]
-
- def fetch_channel(self, package_name, channel_name, with_releases=True):
- """ Returns an AppChannel """
- repo = _application(package_name)
-
- try:
- channel = appr_model.channel.get_channel(repo, channel_name, self.models_ref)
- except (self.models_ref.Channel.DoesNotExist, self.models_ref.Tag.DoesNotExist):
- raise_channel_not_found(package_name, channel_name)
-
- if with_releases:
- releases = appr_model.channel.get_channel_releases(repo, channel, self.models_ref)
- chanview = ChannelReleasesView(
- current=channel.linked_tag.name, name=channel.name,
- releases=[channel.linked_tag.name] + [c.name for c in releases])
- else:
- chanview = ChannelView(current=channel.linked_tag.name, name=channel.name)
-
- return chanview
-
- def list_release_channels(self, package_name, release, active=True):
- repo = _application(package_name)
- try:
- channels = appr_model.channel.get_tag_channels(repo, release, self.models_ref, active=active)
- return [ChannelView(name=c.name, current=c.linked_tag.name) for c in channels]
- except (self.models_ref.Channel.DoesNotExist, self.models_ref.Tag.DoesNotExist):
- raise_package_not_found(package_name, release)
-
- def update_channel(self, package_name, channel_name, release):
- """ Append a new release to the AppChannel
- Returns:
- A new AppChannel with the release
- """
- if self.is_readonly:
- raise ReadOnlyException('Currently in read-only mode')
-
- repo = _application(package_name)
- channel = appr_model.channel.create_or_update_channel(repo, channel_name, release,
- self.models_ref)
- return ChannelView(current=channel.linked_tag.name, name=channel.name)
-
- def get_blob_locations(self, digest):
- return appr_model.blob.get_blob_locations(digest, self.models_ref)
-
-
-# Phase 3: Read and write from new tables.
-model = CNRAppModel(NEW_MODELS, features.READONLY_APP_REGISTRY)
diff --git a/endpoints/appr/models_interface.py b/endpoints/appr/models_interface.py
deleted file mode 100644
index 6ebf949ac..000000000
--- a/endpoints/appr/models_interface.py
+++ /dev/null
@@ -1,191 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from collections import namedtuple
-
-from six import add_metaclass
-
-
-class BlobDescriptor(namedtuple('Blob', ['mediaType', 'size', 'digest', 'urls'])):
- """ BlobDescriptor describes a blob with its mediatype, size and digest.
- A BlobDescriptor is used to retrieves the actual blob.
- """
-
-
-class ChannelReleasesView(namedtuple('ChannelReleasesView', ['name', 'current', 'releases'])):
- """ A channel is a pointer to a Release (current).
- Releases are the previous tags pointed by channel (history).
- """
-
-
-class ChannelView(namedtuple('ChannelView', ['name', 'current'])):
- """ A channel is a pointer to a Release (current).
- """
-
-
-class ApplicationSummaryView(
- namedtuple('ApplicationSummaryView', [
- 'name', 'namespace', 'visibility', 'default', 'manifests', 'channels', 'releases',
- 'updated_at', 'created_at'
- ])):
- """ ApplicationSummaryView is an aggregated view of an application repository.
- """
-
-
-class ApplicationManifest(namedtuple('ApplicationManifest', ['mediaType', 'digest', 'content'])):
- """ ApplicationManifest embed the BlobDescriptor and some metadata around it.
- An ApplicationManifest is content-addressable.
- """
-
-
-class ApplicationRelease(
- namedtuple('ApplicationRelease', ['release', 'name', 'created_at', 'manifest'])):
- """ The ApplicationRelease associates an ApplicationManifest to a repository and release.
- """
-
-
-@add_metaclass(ABCMeta)
-class AppRegistryDataInterface(object):
- """ Interface that represents all data store interactions required by a App Registry.
- """
-
- @abstractmethod
- def list_applications(self, namespace=None, media_type=None, search=None, username=None,
- with_channels=False):
- """ Lists all repositories that contain applications, with optional filtering to a specific
- namespace and/or to those visible to a specific user.
-
- Returns: list of ApplicationSummaryView
- """
- pass
-
- @abstractmethod
- def application_is_public(self, package_name):
- """
- Returns true if the application is public
- """
- pass
-
- @abstractmethod
- def create_application(self, package_name, visibility, owner):
- """ Create a new app repository, owner is the user who creates it """
- pass
-
- @abstractmethod
- def application_exists(self, package_name):
- """ Returns true if the application exists """
- pass
-
- @abstractmethod
- def basic_search(self, query, username=None):
- """ Returns an array of matching application in the format: 'namespace/name'
- Note:
- * Only 'public' repositories are returned
- """
- pass
-
- # @TODO: Paginate
- @abstractmethod
- def list_releases(self, package_name, media_type=None):
- """ Returns the list of all releases(names) of an AppRepository
- Example:
- >>> get_app_releases('ant31/rocketchat')
- ['1.7.1', '1.7.0', '1.7.2']
- """
- pass
-
- # @TODO: Paginate
- @abstractmethod
- def list_manifests(self, package_name, release=None):
- """ Returns the list of all available manifests type of an Application across all releases or
- for a specific one.
-
- Example:
- >>> get_app_releases('ant31/rocketchat')
- ['1.7.1', '1.7.0', '1.7.2']
- """
- pass
-
- @abstractmethod
- def fetch_release(self, package_name, release, media_type):
- """
- Returns an ApplicationRelease
- """
- pass
-
- @abstractmethod
- def store_blob(self, cnrblob, content_media_type):
- """
- Upload the blob content to a storage location and creates a Blob entry in the DB.
-
- Returns a BlobDescriptor
- """
- pass
-
- @abstractmethod
- def create_release(self, package, user, visibility, force=False):
- """ Creates and returns an ApplicationRelease
- - package is a data.model.Package object
- - user is the owner of the package
- - visibility is a string: 'public' or 'private'
- """
- pass
-
- @abstractmethod
- def release_exists(self, package, release):
- """ Return true if a release with that name already exist or
- has existed (including deleted ones)
- """
- pass
-
- @abstractmethod
- def delete_release(self, package_name, release, media_type):
- """ Remove/Delete an app-release from an app-repository.
- It does not delete the entire app-repository, only a single release
- """
- pass
-
- @abstractmethod
- def list_release_channels(self, package_name, release, active=True):
- """ Returns a list of Channel that are/was pointing to a release.
- If active is True, returns only active Channel (lifetime_end not null)
- """
- pass
-
- @abstractmethod
- def channel_exists(self, package_name, channel_name):
- """ Returns true if the channel with the given name exists under the matching package """
- pass
-
- @abstractmethod
- def update_channel(self, package_name, channel_name, release):
- """ Append a new release to the Channel
- Returns a new Channel with the release as current
- """
- pass
-
- @abstractmethod
- def delete_channel(self, package_name, channel_name):
- """ Delete a Channel, it doesn't delete/touch the ApplicationRelease pointed by the channel """
-
- # @TODO: Paginate
- @abstractmethod
- def list_channels(self, package_name):
- """ Returns all AppChannel for a package """
- pass
-
- @abstractmethod
- def fetch_channel(self, package_name, channel_name, with_releases=True):
- """ Returns an Channel
- Raises: ChannelNotFound, PackageNotFound
- """
- pass
-
- @abstractmethod
- def log_action(self, event_name, namespace_name, repo_name=None, analytics_name=None,
- analytics_sample=1, **kwargs):
- """ Logs an action to the audit log. """
- pass
-
- @abstractmethod
- def get_blob_locations(self, digest):
- """ Returns a list of strings for the locations in which a Blob is present. """
- pass
diff --git a/endpoints/appr/registry.py b/endpoints/appr/registry.py
deleted file mode 100644
index 0b470f878..000000000
--- a/endpoints/appr/registry.py
+++ /dev/null
@@ -1,318 +0,0 @@
-import logging
-from base64 import b64encode
-
-import cnr
-from cnr.api.impl import registry as cnr_registry
-from cnr.api.registry import _pull, repo_name
-from cnr.exception import (
- ChannelNotFound, CnrException, Forbidden, InvalidParams, InvalidRelease, InvalidUsage,
- PackageAlreadyExists, PackageNotFound, PackageReleaseNotFound, UnableToLockResource,
- UnauthorizedAccess, Unsupported)
-from flask import jsonify, request
-
-from auth.auth_context import get_authenticated_user
-from auth.credentials import validate_credentials
-from auth.decorators import process_auth
-from auth.permissions import CreateRepositoryPermission, ModifyRepositoryPermission
-from data.logs_model import logs_model
-from endpoints.appr import appr_bp, require_app_repo_read, require_app_repo_write
-from endpoints.appr.cnr_backend import Blob, Channel, Package, User
-from endpoints.appr.decorators import disallow_for_image_repository
-from endpoints.appr.models_cnr import model
-from endpoints.decorators import anon_allowed, anon_protect, check_region_blacklisted
-from util.names import REPOSITORY_NAME_REGEX, TAG_REGEX
-
-logger = logging.getLogger(__name__)
-
-
-@appr_bp.errorhandler(Unsupported)
-@appr_bp.errorhandler(PackageAlreadyExists)
-@appr_bp.errorhandler(InvalidRelease)
-@appr_bp.errorhandler(Forbidden)
-@appr_bp.errorhandler(UnableToLockResource)
-@appr_bp.errorhandler(UnauthorizedAccess)
-@appr_bp.errorhandler(PackageNotFound)
-@appr_bp.errorhandler(PackageReleaseNotFound)
-@appr_bp.errorhandler(CnrException)
-@appr_bp.errorhandler(InvalidUsage)
-@appr_bp.errorhandler(InvalidParams)
-@appr_bp.errorhandler(ChannelNotFound)
-def render_error(error):
- response = jsonify({"error": error.to_dict()})
- response.status_code = error.status_code
- return response
-
-
-@appr_bp.route("/version")
-@anon_allowed
-def version():
- return jsonify({"cnr-api": cnr.__version__})
-
-
-@appr_bp.route("/api/v1/users/login", methods=['POST'])
-@anon_allowed
-def login():
- values = request.get_json(force=True, silent=True) or {}
- username = values.get('user', {}).get('username')
- password = values.get('user', {}).get('password')
- if not username or not password:
- raise InvalidUsage('Missing username or password')
-
- result, _ = validate_credentials(username, password)
- if not result.auth_valid:
- raise UnauthorizedAccess(result.error_message)
-
- return jsonify({'token': "basic " + b64encode("%s:%s" % (username, password))})
-
-
-# @TODO: Redirect to S3 url
-@appr_bp.route(
- "/api/v1/packages///blobs/sha256/",
- methods=['GET'],
- strict_slashes=False,)
-@process_auth
-@require_app_repo_read
-@check_region_blacklisted(namespace_name_kwarg='namespace')
-@anon_protect
-def blobs(namespace, package_name, digest):
- reponame = repo_name(namespace, package_name)
- data = cnr_registry.pull_blob(reponame, digest, blob_class=Blob)
- json_format = request.args.get('format', None) == 'json'
- return _pull(data, json_format=json_format)
-
-
-@appr_bp.route("/api/v1/packages", methods=['GET'], strict_slashes=False)
-@process_auth
-@anon_protect
-def list_packages():
- namespace = request.args.get('namespace', None)
- media_type = request.args.get('media_type', None)
- query = request.args.get('query', None)
- user = get_authenticated_user()
- username = None
- if user:
- username = user.username
- result_data = cnr_registry.list_packages(namespace, package_class=Package, search=query,
- media_type=media_type, username=username)
- return jsonify(result_data)
-
-
-@appr_bp.route(
- "/api/v1/packages////",
- methods=['DELETE'], strict_slashes=False)
-@process_auth
-@require_app_repo_write
-@anon_protect
-def delete_package(namespace, package_name, release, media_type):
- reponame = repo_name(namespace, package_name)
- result = cnr_registry.delete_package(reponame, release, media_type, package_class=Package)
- logs_model.log_action('delete_tag', namespace, repository_name=package_name,
- metadata={'release': release, 'mediatype': media_type})
- return jsonify(result)
-
-
-@appr_bp.route(
- "/api/v1/packages////",
- methods=['GET'], strict_slashes=False)
-@process_auth
-@require_app_repo_read
-@check_region_blacklisted(namespace_name_kwarg='namespace')
-@anon_protect
-def show_package(namespace, package_name, release, media_type):
- reponame = repo_name(namespace, package_name)
- result = cnr_registry.show_package(reponame, release, media_type, channel_class=Channel,
- package_class=Package)
- return jsonify(result)
-
-
-@appr_bp.route("/api/v1/packages//", methods=['GET'],
- strict_slashes=False)
-@process_auth
-@require_app_repo_read
-@anon_protect
-def show_package_releases(namespace, package_name):
- reponame = repo_name(namespace, package_name)
- media_type = request.args.get('media_type', None)
- result = cnr_registry.show_package_releases(reponame, media_type=media_type,
- package_class=Package)
- return jsonify(result)
-
-
-@appr_bp.route("/api/v1/packages///",
- methods=['GET'], strict_slashes=False)
-@process_auth
-@require_app_repo_read
-@anon_protect
-def show_package_release_manifests(namespace, package_name, release):
- reponame = repo_name(namespace, package_name)
- result = cnr_registry.show_package_manifests(reponame, release, package_class=Package)
- return jsonify(result)
-
-
-@appr_bp.route(
- "/api/v1/packages/////pull",
- methods=['GET'],
- strict_slashes=False,)
-@process_auth
-@require_app_repo_read
-@check_region_blacklisted(namespace_name_kwarg='namespace')
-@anon_protect
-def pull(namespace, package_name, release, media_type):
- logger.debug('Pull of release %s of app repository %s/%s', release, namespace, package_name)
- reponame = repo_name(namespace, package_name)
- data = cnr_registry.pull(reponame, release, media_type, Package, blob_class=Blob)
- logs_model.log_action('pull_repo', namespace, repository_name=package_name,
- metadata={'release': release, 'mediatype': media_type})
- json_format = request.args.get('format', None) == 'json'
- return _pull(data, json_format)
-
-
-@appr_bp.route("/api/v1/packages//", methods=['POST'],
- strict_slashes=False)
-@disallow_for_image_repository()
-@process_auth
-@anon_protect
-def push(namespace, package_name):
- reponame = repo_name(namespace, package_name)
-
- if not REPOSITORY_NAME_REGEX.match(package_name):
- logger.debug('Found invalid repository name CNR push: %s', reponame)
- raise InvalidUsage('invalid repository name: %s' % reponame)
-
- values = request.get_json(force=True, silent=True) or {}
- private = values.get('visibility', 'private')
-
- owner = get_authenticated_user()
- if not Package.exists(reponame):
- if not CreateRepositoryPermission(namespace).can():
- raise Forbidden("Unauthorized access for: %s" % reponame,
- {"package": reponame,
- "scopes": ['create']})
- Package.create_repository(reponame, private, owner)
- logs_model.log_action('create_repo', namespace, repository_name=package_name)
-
- if not ModifyRepositoryPermission(namespace, package_name).can():
- raise Forbidden("Unauthorized access for: %s" % reponame,
- {"package": reponame,
- "scopes": ['push']})
-
- if not 'release' in values:
- raise InvalidUsage('Missing release')
-
- if not 'media_type' in values:
- raise InvalidUsage('Missing media_type')
-
- if not 'blob' in values:
- raise InvalidUsage('Missing blob')
-
- release_version = str(values['release'])
- media_type = values['media_type']
- force = request.args.get('force', 'false') == 'true'
-
- blob = Blob(reponame, values['blob'])
- app_release = cnr_registry.push(reponame, release_version, media_type, blob, force,
- package_class=Package, user=owner, visibility=private)
- logs_model.log_action('push_repo', namespace, repository_name=package_name,
- metadata={'release': release_version})
- return jsonify(app_release)
-
-
-@appr_bp.route("/api/v1/packages/search", methods=['GET'], strict_slashes=False)
-@process_auth
-@anon_protect
-def search_packages():
- query = request.args.get("q")
- user = get_authenticated_user()
- username = None
- if user:
- username = user.username
-
- search_results = cnr_registry.search(query, Package, username=username)
- return jsonify(search_results)
-
-
-# CHANNELS
-@appr_bp.route("/api/v1/packages///channels",
- methods=['GET'], strict_slashes=False)
-@process_auth
-@require_app_repo_read
-@anon_protect
-def list_channels(namespace, package_name):
- reponame = repo_name(namespace, package_name)
- return jsonify(cnr_registry.list_channels(reponame, channel_class=Channel))
-
-
-@appr_bp.route(
- "/api/v1/packages///channels/",
- methods=['GET'], strict_slashes=False)
-@process_auth
-@require_app_repo_read
-@anon_protect
-def show_channel(namespace, package_name, channel_name):
- reponame = repo_name(namespace, package_name)
- channel = cnr_registry.show_channel(reponame, channel_name, channel_class=Channel)
- return jsonify(channel)
-
-
-@appr_bp.route(
- "/api/v1/packages///channels//",
- methods=['POST'],
- strict_slashes=False,)
-@process_auth
-@require_app_repo_write
-@anon_protect
-def add_channel_release(namespace, package_name, channel_name, release):
- _check_channel_name(channel_name, release)
- reponame = repo_name(namespace, package_name)
- result = cnr_registry.add_channel_release(reponame, channel_name, release, channel_class=Channel,
- package_class=Package)
- logs_model.log_action('create_tag', namespace, repository_name=package_name,
- metadata={'channel': channel_name, 'release': release})
- return jsonify(result)
-
-
-def _check_channel_name(channel_name, release=None):
- if not TAG_REGEX.match(channel_name):
- logger.debug('Found invalid channel name CNR add channel release: %s', channel_name)
- raise InvalidUsage("Found invalid channelname %s" % release,
- {'name': channel_name,
- 'release': release})
-
- if release is not None and not TAG_REGEX.match(release):
- logger.debug('Found invalid release name CNR add channel release: %s', release)
- raise InvalidUsage('Found invalid channel release name %s' % release,
- {'name': channel_name,
- 'release': release})
-
-
-@appr_bp.route(
- "/api/v1/packages///channels//",
- methods=['DELETE'],
- strict_slashes=False,)
-@process_auth
-@require_app_repo_write
-@anon_protect
-def delete_channel_release(namespace, package_name, channel_name, release):
- _check_channel_name(channel_name, release)
- reponame = repo_name(namespace, package_name)
- result = cnr_registry.delete_channel_release(reponame, channel_name, release,
- channel_class=Channel, package_class=Package)
- logs_model.log_action('delete_tag', namespace, repository_name=package_name,
- metadata={'channel': channel_name, 'release': release})
- return jsonify(result)
-
-
-@appr_bp.route(
- "/api/v1/packages///channels/",
- methods=['DELETE'],
- strict_slashes=False,)
-@process_auth
-@require_app_repo_write
-@anon_protect
-def delete_channel(namespace, package_name, channel_name):
- _check_channel_name(channel_name)
- reponame = repo_name(namespace, package_name)
- result = cnr_registry.delete_channel(reponame, channel_name, channel_class=Channel)
- logs_model.log_action('delete_tag', namespace, repository_name=package_name,
- metadata={'channel': channel_name})
- return jsonify(result)
diff --git a/endpoints/appr/test/test_api.py b/endpoints/appr/test/test_api.py
deleted file mode 100644
index 99af88c2c..000000000
--- a/endpoints/appr/test/test_api.py
+++ /dev/null
@@ -1,163 +0,0 @@
-import uuid
-
-import pytest
-
-from cnr.tests.conftest import *
-from cnr.tests.test_apiserver import BaseTestServer
-from cnr.tests.test_models import CnrTestModels
-
-import data.appr_model.blob as appr_blob
-
-from data.database import User
-from data.model import organization, user
-from endpoints.appr import registry # Needed to register the endpoint
-from endpoints.appr.cnr_backend import Channel, Package, QuayDB
-from endpoints.appr.models_cnr import model as appr_app_model
-
-from test.fixtures import *
-
-
-def create_org(namespace, owner):
- try:
- User.get(username=namespace)
- except User.DoesNotExist:
- organization.create_organization(namespace, "%s@test.com" % str(uuid.uuid1()), owner)
-
-
-class ChannelTest(Channel):
- @classmethod
- def dump_all(cls, package_class=None):
- result = []
- for repo in appr_app_model.list_applications(with_channels=True):
- for chan in repo.channels:
- result.append({'name': chan.name, 'current': chan.current, 'package': repo.name})
- return result
-
-
-class PackageTest(Package):
- def _save(self, force, **kwargs):
- owner = user.get_user('devtable')
- create_org(self.namespace, owner)
- super(PackageTest, self)._save(force, user=owner, visibility="public")
-
- @classmethod
- def create_repository(cls, package_name, visibility, owner):
- ns, _ = package_name.split("/")
- owner = user.get_user('devtable')
- visibility = "public"
- create_org(ns, owner)
- return super(PackageTest, cls).create_repository(package_name, visibility, owner)
-
- @classmethod
- def dump_all(cls, blob_cls):
- result = []
- for repo in appr_app_model.list_applications(with_channels=True):
- package_name = repo.name
- for release in repo.releases:
- for mtype in cls.manifests(package_name, release):
- package = appr_app_model.fetch_release(package_name, release, mtype)
- blob = blob_cls.get(package_name, package.manifest.content.digest)
- app_data = cls._apptuple_to_dict(package)
- app_data.pop('digest')
- app_data['channels'] = [
- x.name
- for x in appr_app_model.list_release_channels(package_name, package.release, False)
- ]
- app_data['blob'] = blob.b64blob
- result.append(app_data)
- return result
-
-
-@pytest.fixture(autouse=True)
-def quaydb(monkeypatch, app):
- monkeypatch.setattr('endpoints.appr.cnr_backend.QuayDB.Package', PackageTest)
- monkeypatch.setattr('endpoints.appr.cnr_backend.Package', PackageTest)
- monkeypatch.setattr('endpoints.appr.registry.Package', PackageTest)
- monkeypatch.setattr('cnr.models.Package', PackageTest)
-
- monkeypatch.setattr('endpoints.appr.cnr_backend.QuayDB.Channel', ChannelTest)
- monkeypatch.setattr('endpoints.appr.registry.Channel', ChannelTest)
- monkeypatch.setattr('cnr.models.Channel', ChannelTest)
-
-
-class TestServerQuayDB(BaseTestServer):
- DB_CLASS = QuayDB
-
- @property
- def token(self):
- return "basic ZGV2dGFibGU6cGFzc3dvcmQ="
-
- def test_search_package_match(self, db_with_data1, client):
- """ TODO: search cross namespace and package name """
- BaseTestServer.test_search_package_match(self, db_with_data1, client)
-
- def test_list_search_package_match(self, db_with_data1, client):
- url = self._url_for("api/v1/packages")
- res = self.Client(client, self.headers()).get(url, params={'query': 'rocketchat'})
- assert res.status_code == 200
- assert len(self.json(res)) == 1
-
- def test_list_search_package_no_match(self, db_with_data1, client):
- url = self._url_for("api/v1/packages")
- res = self.Client(client, self.headers()).get(url, params={'query': 'toto'})
- assert res.status_code == 200
- assert len(self.json(res)) == 0
-
- @pytest.mark.xfail
- def test_push_package_already_exists_force(self, db_with_data1, package_b64blob, client):
- """ No force push implemented """
- BaseTestServer.test_push_package_already_exists_force(self, db_with_data1, package_b64blob,
- client)
-
- @pytest.mark.xfail
- def test_delete_channel_release_absent_release(self, db_with_data1, client):
- BaseTestServer.test_delete_channel_release_absent_release(self, db_with_data1, client)
-
- @pytest.mark.xfail
- def test_get_absent_blob(self, newdb, client):
- pass
-
-
-class TestQuayModels(CnrTestModels):
- DB_CLASS = QuayDB
-
- @pytest.mark.xfail
- def test_channel_delete_releases(self, db_with_data1):
- """ Can't remove a release from the channel, only delete the channel entirely """
- CnrTestModels.test_channel_delete_releases(self, db_with_data1)
-
- @pytest.mark.xfail
- def test_forbiddeb_db_reset(self, db_class):
- pass
-
- @pytest.mark.xfail
- def test_db_restore(self, newdb, dbdata1):
- # This will fail as long as CNR tests use a mediatype with v1.
- pass
-
- def test_push_same_blob(self, db_with_data1):
- p = db_with_data1.Package.get("titi/rocketchat", ">1.2", 'kpm')
- assert p.package == "titi/rocketchat"
- assert p.release == "2.0.1"
- assert p.digest == "d3b54b7912fe770a61b59ab612a442eac52a8a5d8d05dbe92bf8f212d68aaa80"
- blob = db_with_data1.Blob.get("titi/rocketchat", p.digest)
- bdb = appr_blob.get_blob(p.digest, appr_app_model.models_ref)
- newblob = db_with_data1.Blob("titi/app2", blob.b64blob)
- p2 = db_with_data1.Package("titi/app2", "1.0.0", "helm", newblob)
- p2.save()
- b2db = appr_blob.get_blob(p2.digest, appr_app_model.models_ref)
- assert b2db.id == bdb.id
-
- def test_force_push_different_blob(self, db_with_data1):
- p = db_with_data1.Package.get("titi/rocketchat", "2.0.1", 'kpm')
- assert p.package == "titi/rocketchat"
- assert p.release == "2.0.1"
- assert p.digest == "d3b54b7912fe770a61b59ab612a442eac52a8a5d8d05dbe92bf8f212d68aaa80"
- blob = db_with_data1.Blob.get(
- "titi/rocketchat", "72ed15c9a65961ecd034cca098ec18eb99002cd402824aae8a674a8ae41bd0ef")
- p2 = db_with_data1.Package("titi/rocketchat", "2.0.1", "kpm", blob)
- p2.save(force=True)
- pnew = db_with_data1.Package.get("titi/rocketchat", "2.0.1", 'kpm')
- assert pnew.package == "titi/rocketchat"
- assert pnew.release == "2.0.1"
- assert pnew.digest == "72ed15c9a65961ecd034cca098ec18eb99002cd402824aae8a674a8ae41bd0ef"
diff --git a/endpoints/appr/test/test_api_security.py b/endpoints/appr/test/test_api_security.py
deleted file mode 100644
index c3e52b30c..000000000
--- a/endpoints/appr/test/test_api_security.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import base64
-import pytest
-
-from flask import url_for
-
-from data import model
-from endpoints.appr.registry import appr_bp, blobs
-from endpoints.test.shared import client_with_identity
-from test.fixtures import *
-
-BLOB_ARGS = {'digest': 'abcd1235'}
-PACKAGE_ARGS = {'release': 'r', 'media_type': 'foo'}
-RELEASE_ARGS = {'release': 'r'}
-CHANNEL_ARGS = {'channel_name': 'c'}
-CHANNEL_RELEASE_ARGS = {'channel_name': 'c', 'release': 'r'}
-
-@pytest.mark.parametrize('resource,method,params,owned_by,is_public,identity,expected', [
- ('appr.blobs', 'GET', BLOB_ARGS, 'devtable', False, 'public', 403),
- ('appr.blobs', 'GET', BLOB_ARGS, 'devtable', False, 'devtable', 404),
- ('appr.blobs', 'GET', BLOB_ARGS, 'devtable', True, 'public', 404),
- ('appr.blobs', 'GET', BLOB_ARGS, 'devtable', True, 'devtable', 404),
-
- ('appr.delete_package', 'DELETE', PACKAGE_ARGS, 'devtable', False, 'public', 403),
- ('appr.delete_package', 'DELETE', PACKAGE_ARGS, 'devtable', False, 'devtable', 404),
- ('appr.delete_package', 'DELETE', PACKAGE_ARGS, 'devtable', True, 'public', 403),
- ('appr.delete_package', 'DELETE', PACKAGE_ARGS, 'devtable', True, 'devtable', 404),
-
- ('appr.show_package', 'GET', PACKAGE_ARGS, 'devtable', False, 'public', 403),
- ('appr.show_package', 'GET', PACKAGE_ARGS, 'devtable', False, 'devtable', 404),
- ('appr.show_package', 'GET', PACKAGE_ARGS, 'devtable', True, 'public', 404),
- ('appr.show_package', 'GET', PACKAGE_ARGS, 'devtable', True, 'devtable', 404),
-
- ('appr.show_package_releases', 'GET', {}, 'devtable', False, 'public', 403),
- ('appr.show_package_releases', 'GET', {}, 'devtable', False, 'devtable', 200),
- ('appr.show_package_releases', 'GET', {}, 'devtable', True, 'public', 200),
- ('appr.show_package_releases', 'GET', {}, 'devtable', True, 'devtable', 200),
-
- ('appr.show_package_release_manifests', 'GET', RELEASE_ARGS, 'devtable', False, 'public', 403),
- ('appr.show_package_release_manifests', 'GET', RELEASE_ARGS, 'devtable', False, 'devtable', 200),
- ('appr.show_package_release_manifests', 'GET', RELEASE_ARGS, 'devtable', True, 'public', 200),
- ('appr.show_package_release_manifests', 'GET', RELEASE_ARGS, 'devtable', True, 'devtable', 200),
-
- ('appr.pull', 'GET', PACKAGE_ARGS, 'devtable', False, 'public', 403),
- ('appr.pull', 'GET', PACKAGE_ARGS, 'devtable', False, 'devtable', 404),
- ('appr.pull', 'GET', PACKAGE_ARGS, 'devtable', True, 'public', 404),
- ('appr.pull', 'GET', PACKAGE_ARGS, 'devtable', True, 'devtable', 404),
-
- ('appr.push', 'POST', {}, 'devtable', False, 'public', 403),
- ('appr.push', 'POST', {}, 'devtable', False, 'devtable', 400),
- ('appr.push', 'POST', {}, 'devtable', True, 'public', 403),
- ('appr.push', 'POST', {}, 'devtable', True, 'devtable', 400),
-
- ('appr.list_channels', 'GET', {}, 'devtable', False, 'public', 403),
- ('appr.list_channels', 'GET', {}, 'devtable', False, 'devtable', 200),
- ('appr.list_channels', 'GET', {}, 'devtable', True, 'public', 200),
- ('appr.list_channels', 'GET', {}, 'devtable', True, 'devtable', 200),
-
- ('appr.show_channel', 'GET', CHANNEL_ARGS, 'devtable', False, 'public', 403),
- ('appr.show_channel', 'GET', CHANNEL_ARGS, 'devtable', False, 'devtable', 404),
- ('appr.show_channel', 'GET', CHANNEL_ARGS, 'devtable', True, 'public', 404),
- ('appr.show_channel', 'GET', CHANNEL_ARGS, 'devtable', True, 'devtable', 404),
-
- ('appr.delete_channel', 'DELETE', CHANNEL_ARGS, 'devtable', False, 'public', 403),
- ('appr.delete_channel', 'DELETE', CHANNEL_ARGS, 'devtable', False, 'devtable', 404),
- ('appr.delete_channel', 'DELETE', CHANNEL_ARGS, 'devtable', True, 'public', 403),
- ('appr.delete_channel', 'DELETE', CHANNEL_ARGS, 'devtable', True, 'devtable', 404),
-
- ('appr.add_channel_release', 'POST', CHANNEL_RELEASE_ARGS, 'devtable', False, 'public', 403),
- ('appr.add_channel_release', 'POST', CHANNEL_RELEASE_ARGS, 'devtable', False, 'devtable', 404),
- ('appr.add_channel_release', 'POST', CHANNEL_RELEASE_ARGS, 'devtable', True, 'public', 403),
- ('appr.add_channel_release', 'POST', CHANNEL_RELEASE_ARGS, 'devtable', True, 'devtable', 404),
-
- ('appr.delete_channel_release', 'DELETE', CHANNEL_RELEASE_ARGS, 'devtable', False, 'public', 403),
- ('appr.delete_channel_release', 'DELETE', CHANNEL_RELEASE_ARGS, 'devtable', False, 'devtable', 404),
- ('appr.delete_channel_release', 'DELETE', CHANNEL_RELEASE_ARGS, 'devtable', True, 'public', 403),
- ('appr.delete_channel_release', 'DELETE', CHANNEL_RELEASE_ARGS, 'devtable', True, 'devtable', 404),
-])
-def test_api_security(resource, method, params, owned_by, is_public, identity, expected, app, client):
- app.register_blueprint(appr_bp, url_prefix='/cnr')
-
- with client_with_identity(identity, client) as cl:
- owner = model.user.get_user(owned_by)
- visibility = 'public' if is_public else 'private'
- model.repository.create_repository(owned_by, 'someapprepo', owner, visibility=visibility,
- repo_kind='application')
-
- params['namespace'] = owned_by
- params['package_name'] = 'someapprepo'
- params['_csrf_token'] = '123csrfforme'
-
- url = url_for(resource, **params)
- headers = {}
- if identity is not None:
- headers['authorization'] = 'basic ' + base64.b64encode('%s:password' % identity)
-
- rv = cl.open(url, headers=headers, method=method)
- assert rv.status_code == expected
diff --git a/endpoints/appr/test/test_appr_decorators.py b/endpoints/appr/test/test_appr_decorators.py
deleted file mode 100644
index 77519d6bd..000000000
--- a/endpoints/appr/test/test_appr_decorators.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import pytest
-
-from werkzeug.exceptions import HTTPException
-
-from data import model
-from endpoints.appr import require_app_repo_read
-
-from test.fixtures import *
-
-def test_require_app_repo_read(app):
- called = [False]
-
- # Ensure that trying to read an *image* repository fails.
- @require_app_repo_read
- def empty(**kwargs):
- called[0] = True
-
- with pytest.raises(HTTPException):
- empty(namespace='devtable', package_name='simple')
- assert not called[0]
diff --git a/endpoints/appr/test/test_digest_prefix.py b/endpoints/appr/test/test_digest_prefix.py
deleted file mode 100644
index 089becd43..000000000
--- a/endpoints/appr/test/test_digest_prefix.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import pytest
-from endpoints.appr.models_cnr import _strip_sha256_header
-
-
-@pytest.mark.parametrize('digest,expected', [
- ('sha256:251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a',
- '251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a'),
- ('251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a',
- '251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a'),])
-def test_stip_sha256(digest, expected):
- assert _strip_sha256_header(digest) == expected
diff --git a/endpoints/appr/test/test_registry.py b/endpoints/appr/test/test_registry.py
deleted file mode 100644
index bd6602675..000000000
--- a/endpoints/appr/test/test_registry.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import base64
-import json
-
-from mock import patch
-
-import pytest
-
-from flask import url_for
-
-from data import model
-from endpoints.appr.registry import appr_bp
-
-from test.fixtures import *
-
-
-@pytest.mark.parametrize('login_data, expected_code', [
- ({
- "username": "devtable",
- "password": "password"
- }, 200),
- ({
- "username": "devtable",
- "password": "badpass"
- }, 401),
- ({
- "username": "devtable+dtrobot",
- "password": "badpass"
- }, 401),
- ({
- "username": "devtable+dtrobot2",
- "password": "badpass"
- }, 401),
-])
-def test_login(login_data, expected_code, app, client):
- if "+" in login_data['username'] and login_data['password'] is None:
- username, robotname = login_data['username'].split("+")
- _, login_data['password'] = model.user.create_robot(robotname, model.user.get_user(username))
-
- url = url_for('appr.login')
- headers = {'Content-Type': 'application/json'}
- data = {'user': login_data}
-
- rv = client.open(url, method='POST', data=json.dumps(data), headers=headers)
- assert rv.status_code == expected_code
-
-
-@pytest.mark.parametrize('release_name', [
- '1.0',
- '1',
- 1,
-])
-def test_invalid_release_name(release_name, app, client):
- params = {
- 'namespace': 'devtable',
- 'package_name': 'someapprepo',
- }
-
- url = url_for('appr.push', **params)
- auth = base64.b64encode('devtable:password')
- headers = {'Content-Type': 'application/json', 'Authorization': 'Basic ' + auth}
- data = {
- 'release': release_name,
- 'media_type': 'application/vnd.cnr.manifest.v1+json',
- 'blob': 'H4sIAFQwWVoAA+3PMQrCQBAF0Bxlb+Bk143nETGIIEoSC29vMMFOu3TvNb/5DH/Ot8f02jWbiohDremT3ZKR90uuUlty7nKJNmqKtkQuTarbzlo8x+k4zFOu4+lyH4afvbnW93/urH98EwAAAAAAAAAAADb0BsdwExIAKAAA',
- }
-
- rv = client.open(url, method='POST', data=json.dumps(data), headers=headers)
- assert rv.status_code == 422
-
-
-@pytest.mark.parametrize('readonly, expected_status', [
- (True, 405),
- (False, 422),
-])
-def test_readonly(readonly, expected_status, app, client):
- params = {
- 'namespace': 'devtable',
- 'package_name': 'someapprepo',
- }
-
- url = url_for('appr.push', **params)
- auth = base64.b64encode('devtable:password')
- headers = {'Content-Type': 'application/json', 'Authorization': 'Basic ' + auth}
- data = {
- 'release': '1.0',
- 'media_type': 'application/vnd.cnr.manifest.v0+json',
- 'blob': 'H4sIAFQwWVoAA+3PMQrCQBAF0Bxlb+Bk143nETGIIEoSC29vMMFOu3TvNb/5DH/Ot8f02jWbiohDremT3ZKR90uuUlty7nKJNmqKtkQuTarbzlo8x+k4zFOu4+lyH4afvbnW93/urH98EwAAAAAAAAAAADb0BsdwExIAKAAA',
- }
-
- with patch('endpoints.appr.models_cnr.model.is_readonly', readonly):
- rv = client.open(url, method='POST', data=json.dumps(data), headers=headers)
- assert rv.status_code == expected_status
diff --git a/endpoints/bitbuckettrigger.py b/endpoints/bitbuckettrigger.py
index 7e521c10d..8c8052235 100644
--- a/endpoints/bitbuckettrigger.py
+++ b/endpoints/bitbuckettrigger.py
@@ -4,11 +4,11 @@ from flask import request, redirect, url_for, Blueprint
from flask_login import current_user
from app import app
-from auth.decorators import require_session_login
+from auth.process import require_session_login
from buildtrigger.basehandler import BuildTriggerHandler
from buildtrigger.bitbuckethandler import BitbucketBuildTrigger
from data import model
-from endpoints.decorators import route_show_if
+from endpoints.common import route_show_if
from util.http import abort
import features
@@ -40,7 +40,8 @@ def attach_bitbucket_build_trigger(trigger_uuid):
repository = trigger.repository.name
repo_path = '%s/%s' % (namespace, repository)
- full_url = url_for('web.buildtrigger', path=repo_path, trigger=trigger.uuid)
+ full_url = '%s%s%s' % (url_for('web.repository', path=repo_path), '?tab=builds&newtrigger=',
+ trigger.uuid)
logger.debug('Redirecting to full url: %s', full_url)
return redirect(full_url)
diff --git a/endpoints/building.py b/endpoints/building.py
index 247d0a932..9ad61f8a1 100644
--- a/endpoints/building.py
+++ b/endpoints/building.py
@@ -5,19 +5,20 @@ from datetime import datetime, timedelta
from flask import request
-from app import app, dockerfile_build_queue, metric_queue
+from app import app, dockerfile_build_queue
from data import model
-from data.logs_model import logs_model
-from data.database import db, RepositoryState
+from data.database import db
from auth.auth_context import get_authenticated_user
-from notifications import spawn_notification
+from endpoints.notificationhelper import spawn_notification
from util.names import escape_tag
from util.morecollections import AttrDict
-from util.request import get_request_ip
logger = logging.getLogger(__name__)
+MAX_BUILD_QUEUE_RATE_ITEMS = app.config.get('MAX_BUILD_QUEUE_RATE_ITEMS', -1)
+MAX_BUILD_QUEUE_RATE_SECS = app.config.get('MAX_BUILD_QUEUE_RATE_SECS', -1)
+
class MaximumBuildsQueuedException(Exception):
"""
@@ -27,35 +28,16 @@ class MaximumBuildsQueuedException(Exception):
pass
-class BuildTriggerDisabledException(Exception):
- """
- This exception is raised when a build is required, but the build trigger has been disabled.
- """
- pass
-
-
def start_build(repository, prepared_build, pull_robot_name=None):
- # Ensure that builds are only run in image repositories.
- if repository.kind.name != 'image':
- raise Exception('Attempt to start a build for application repository %s' % repository.id)
-
- # Ensure the repository isn't in mirror or read-only mode.
- if repository.state != RepositoryState.NORMAL:
- raise Exception(('Attempt to start a build for a non-normal repository: %s %s' %
- (repository.id, repository.state)))
-
- # Ensure that disabled triggers are not run.
- if prepared_build.trigger is not None and not prepared_build.trigger.enabled:
- raise BuildTriggerDisabledException
-
- if repository.namespace_user.maximum_queued_builds_count is not None:
- queue_item_canonical_name = [repository.namespace_user.username]
- alive_builds = dockerfile_build_queue.num_alive_jobs(queue_item_canonical_name)
- if alive_builds >= repository.namespace_user.maximum_queued_builds_count:
- logger.debug('Prevented queueing of build under namespace %s due to reaching max: %s',
- repository.namespace_user.username,
- repository.namespace_user.maximum_queued_builds_count)
- raise MaximumBuildsQueuedException()
+ if MAX_BUILD_QUEUE_RATE_ITEMS > 0 and MAX_BUILD_QUEUE_RATE_SECS > 0:
+ queue_item_canonical_name = [repository.namespace_user.username, repository.name]
+ now = datetime.utcnow()
+ available_min = now - timedelta(seconds=MAX_BUILD_QUEUE_RATE_SECS)
+ available_builds = dockerfile_build_queue.num_available_jobs_between(available_min,
+ now,
+ queue_item_canonical_name)
+ if available_builds >= MAX_BUILD_QUEUE_RATE_ITEMS:
+ raise MaximumBuildsQueuedException()
host = app.config['SERVER_HOSTNAME']
repo_path = '%s/%s/%s' % (host, repository.namespace_user.username, repository.name)
@@ -69,7 +51,6 @@ def start_build(repository, prepared_build, pull_robot_name=None):
'docker_tags': prepared_build.tags,
'registry': host,
'build_subdir': prepared_build.subdirectory,
- 'context': prepared_build.context,
'trigger_metadata': prepared_build.metadata or {},
'is_manual': prepared_build.is_manual,
'manual_user': get_authenticated_user().username if get_authenticated_user() else None,
@@ -97,10 +78,6 @@ def start_build(repository, prepared_build, pull_robot_name=None):
build_request.queue_id = queue_id
build_request.save()
- # Add the queueing of the build to the metrics queue.
- metric_queue.repository_build_queued.Inc(labelvalues=[repository.namespace_user.username,
- repository.name])
-
# Add the build to the repo's log and spawn the build_queued notification.
event_log_metadata = {
'build_id': build_request.uuid,
@@ -116,10 +93,10 @@ def start_build(repository, prepared_build, pull_robot_name=None):
event_log_metadata['trigger_kind'] = prepared_build.trigger.service.name
event_log_metadata['trigger_metadata'] = prepared_build.metadata or {}
- logs_model.log_action('build_dockerfile', repository.namespace_user.username,
- ip=get_request_ip(), metadata=event_log_metadata, repository=repository)
+ model.log.log_action('build_dockerfile', repository.namespace_user.username,
+ ip=request.remote_addr, metadata=event_log_metadata, repository=repository)
- # TODO: remove when more endpoints have been converted to using interfaces
+ # TODO(jzelinskie): remove when more endpoints have been converted to using interfaces
repo = AttrDict({
'namespace_name': repository.namespace_user.username,
'name': repository.name,
@@ -142,7 +119,6 @@ class PreparedBuild(object):
self._tags = None
self._build_name = None
self._subdirectory = None
- self._context = None
self._metadata = None
self._trigger = trigger
self._is_manual = None
@@ -245,20 +221,6 @@ class PreparedBuild(object):
self._subdirectory = value
- @property
- def context(self):
- if self._context is None:
- raise Exception('Missing property context')
-
- return self._context
-
- @context.setter
- def context(self, value):
- if self._context:
- raise Exception('Property context already set')
-
- self._context = value
-
@property
def metadata(self):
if self._metadata is None:
diff --git a/endpoints/common.py b/endpoints/common.py
index 6ce1e745d..6fd7ff348 100644
--- a/endpoints/common.py
+++ b/endpoints/common.py
@@ -1,45 +1,118 @@
import logging
+import json
+import string
import datetime
import os
+import re
-from flask import make_response, render_template, request, session
+from random import SystemRandom
+from functools import wraps
+
+from cachetools import lru_cache
+from flask import make_response, render_template, request, abort, session
from flask_login import login_user
from flask_principal import identity_changed
-import endpoints.decorated # Register the various exceptions via decorators.
+import endpoints.decorated # Register the various exceptions via decorators.
import features
-from app import app, oauth_apps, oauth_login, LoginWrappedDBUser, user_analytics, IS_KUBERNETES
+from app import app, oauth_apps, LoginWrappedDBUser, user_analytics, license_validator
from auth import scopes
from auth.permissions import QuayDeferredPermissionUser
from config import frontend_visible_config
from external_libraries import get_external_javascript, get_external_css
-from endpoints.common_models_pre_oci import pre_oci_model as model
-from endpoints.csrf import generate_csrf_token, QUAY_CSRF_UPDATED_HEADER_NAME
-from util.config.provider.k8sprovider import QE_NAMESPACE
+from util.names import parse_namespace_repository
from util.secscan import PRIORITY_LEVELS
-from util.saas.useranalytics import build_error_callback
from util.timedeltastring import convert_to_timedelta
-from _init import __version__
logger = logging.getLogger(__name__)
+route_data = None
-JS_BUNDLE_NAME = 'bundle'
+CACHE_BUSTERS_JSON = 'static/dist/cachebusters.json'
+CACHE_BUSTERS = None
-def common_login(user_uuid, permanent_session=True):
- """ Performs login of the given user, with optional non-permanence on the session.
- Returns a tuple with (success, headers to set on success).
- """
- user = model.get_user(user_uuid)
- if user is None:
- return (False, None)
+def get_cache_busters():
+ """ Retrieves the cache busters hashes. """
+ global CACHE_BUSTERS
+ if CACHE_BUSTERS is not None:
+ return CACHE_BUSTERS
- if login_user(LoginWrappedDBUser(user_uuid)):
- logger.debug('Successfully signed in as user %s with uuid %s', user.username, user_uuid)
- new_identity = QuayDeferredPermissionUser.for_id(user_uuid)
+ if not os.path.exists(CACHE_BUSTERS_JSON):
+ return {}
+
+ with open(CACHE_BUSTERS_JSON, 'r') as f:
+ CACHE_BUSTERS = json.loads(f.read())
+ return CACHE_BUSTERS
+
+
+def parse_repository_name(include_tag=False,
+ ns_kwarg_name='namespace_name',
+ repo_kwarg_name='repo_name',
+ tag_kwarg_name='tag_name',
+ incoming_repo_kwarg='repository'):
+ def inner(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ repo_name_components = parse_namespace_repository(kwargs[incoming_repo_kwarg],
+ app.config['LIBRARY_NAMESPACE'],
+ include_tag=include_tag)
+ del kwargs[incoming_repo_kwarg]
+ kwargs[ns_kwarg_name] = repo_name_components[0]
+ kwargs[repo_kwarg_name] = repo_name_components[1]
+ if include_tag:
+ kwargs[tag_kwarg_name] = repo_name_components[2]
+ return func(*args, **kwargs)
+ return wrapper
+ return inner
+
+
+def route_show_if(value):
+ def decorator(f):
+ @wraps(f)
+ def decorated_function(*args, **kwargs):
+ if not value:
+ abort(404)
+
+ return f(*args, **kwargs)
+ return decorated_function
+ return decorator
+
+
+def route_hide_if(value):
+ def decorator(f):
+ @wraps(f)
+ def decorated_function(*args, **kwargs):
+ if value:
+ abort(404)
+
+ return f(*args, **kwargs)
+ return decorated_function
+ return decorator
+
+
+def truthy_param(param):
+ return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'}
+
+
+def param_required(param_name, allow_body=False):
+ def wrapper(wrapped):
+ @wraps(wrapped)
+ def decorated(*args, **kwargs):
+ if param_name not in request.args:
+ if not allow_body or param_name not in request.values:
+ abort(make_response('Required param: %s' % param_name, 400))
+ return wrapped(*args, **kwargs)
+ return decorated
+ return wrapper
+
+
+def common_login(db_user, permanent_session=True):
+ if login_user(LoginWrappedDBUser(db_user.uuid, db_user)):
+ logger.debug('Successfully signed in as: %s (%s)' % (db_user.username, db_user.uuid))
+ new_identity = QuayDeferredPermissionUser.for_user(db_user)
identity_changed.send(app, identity=new_identity)
session['login_time'] = datetime.datetime.now()
@@ -49,68 +122,72 @@ def common_login(user_uuid, permanent_session=True):
session.permanent_session_lifetime = convert_to_timedelta(session_timeout_str)
# Inform our user analytics that we have a new "lead"
- create_lead_future = user_analytics.create_lead(
- user.email,
- user.username,
- user.given_name,
- user.family_name,
- user.company,
- user.location,
- )
+ user_analytics.create_lead(db_user.email, db_user.username, db_user.given_name,
+ db_user.family_name, db_user.company)
+ return True
+ else:
+ logger.debug('User could not be logged in, inactive?.')
+ return False
- create_lead_future.add_done_callback(build_error_callback('Create lead failed'))
+def random_string():
+ random = SystemRandom()
+ return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(8)])
- # Force a new CSRF token.
- headers = {}
- headers[QUAY_CSRF_UPDATED_HEADER_NAME] = generate_csrf_token(force=True)
- return (True, headers)
-
- logger.debug('User could not be logged in, inactive?')
- return (False, None)
-
-
-def _list_files(path, extension, contains=""):
- """ Returns a list of all the files with the given extension found under the given path. """
+def list_files(path, extension):
+ import os
def matches(f):
- return os.path.splitext(f)[1] == '.' + extension and contains in os.path.splitext(f)[0]
+ return os.path.splitext(f)[1] == '.' + extension and f.split(os.path.extsep)[1] != 'spec'
def join_path(dp, f):
# Remove the static/ prefix. It is added in the template.
return os.path.join(dp, f)[len('static/'):]
- filepath = os.path.join('static/', path)
- return [join_path(dp, f) for dp, _, files in os.walk(filepath) for f in files if matches(f)]
-
-
-FONT_AWESOME_5 = 'use.fontawesome.com/releases/v5.0.4/css/all.css'
+ filepath = 'static/' + path
+ return [join_path(dp, f) for dp, dn, files in os.walk(filepath) for f in files if matches(f)]
+@lru_cache(maxsize=1)
+def _get_version_number():
+ try:
+ with open('CHANGELOG.md') as f:
+ return re.search('(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0)
+ except IOError:
+ return ''
def render_page_template(name, route_data=None, **kwargs):
- """ Renders the page template with the given name as the response and returns its contents. """
- main_scripts = _list_files('build', 'js', JS_BUNDLE_NAME)
+ debugging = app.config.get('DEBUGGING', False)
+ if debugging:
+ # If DEBUGGING is enabled, then we load the full set of individual JS and CSS files
+ # from the file system.
+ library_styles = list_files('lib', 'css')
+ main_styles = list_files('css', 'css')
+ library_scripts = list_files('lib', 'js')
+ main_scripts = list_files('js', 'js')
+
+ file_lists = [library_styles, main_styles, library_scripts, main_scripts]
+ for file_list in file_lists:
+ file_list.sort()
+ else:
+ library_styles = []
+ main_styles = ['dist/quay-frontend.css']
+ library_scripts = []
+ main_scripts = ['dist/quay-frontend.min.js']
use_cdn = app.config.get('USE_CDN', True)
if request.args.get('use_cdn') is not None:
use_cdn = request.args.get('use_cdn') == 'true'
- external_styles = get_external_css(local=not use_cdn, exclude=FONT_AWESOME_5)
+ external_styles = get_external_css(local=not use_cdn)
external_scripts = get_external_javascript(local=not use_cdn)
# Add Stripe checkout if billing is enabled.
if features.BILLING:
external_scripts.append('//checkout.stripe.com/checkout.js')
- def get_external_login_config():
- login_config = []
- for login_service in oauth_login.services:
- login_config.append({
- 'id': login_service.service_id(),
- 'title': login_service.service_name(),
- 'config': login_service.get_public_config(),
- 'icon': login_service.get_icon(),
- })
-
- return login_config
+ def add_cachebusters(filenames):
+ cachebusters = get_cache_busters()
+ for filename in filenames:
+ cache_buster = cachebusters.get(filename, random_string()) if not debugging else 'debugging'
+ yield (filename, cache_buster)
def get_oauth_config():
oauth_config = {}
@@ -119,48 +196,46 @@ def render_page_template(name, route_data=None, **kwargs):
return oauth_config
- has_contact = len(app.config.get('CONTACT_INFO', [])) > 0
contact_href = None
if len(app.config.get('CONTACT_INFO', [])) == 1:
contact_href = app.config['CONTACT_INFO'][0]
version_number = ''
if not features.BILLING:
- version_number = 'Quay %s' % __version__
+ version_number = ' - ' + _get_version_number()
- scopes_set = {scope.scope: scope._asdict() for scope in scopes.app_scopes(app.config).values()}
+ resp = make_response(render_template(name,
+ route_data=route_data,
+ external_styles=external_styles,
+ external_scripts=external_scripts,
+ main_styles=add_cachebusters(main_styles),
+ library_styles=add_cachebusters(library_styles),
+ main_scripts=add_cachebusters(main_scripts),
+ library_scripts=add_cachebusters(library_scripts),
+ feature_set=features.get_features(),
+ config_set=frontend_visible_config(app.config),
+ oauth_set=get_oauth_config(),
+ scope_set=scopes.app_scopes(app.config),
+ vuln_priority_set=PRIORITY_LEVELS,
+ enterprise_logo=app.config.get('ENTERPRISE_LOGO_URL', ''),
+ mixpanel_key=app.config.get('MIXPANEL_KEY', ''),
+ munchkin_key=app.config.get('MARKETO_MUNCHKIN_ID', ''),
+ recaptcha_key=app.config.get('RECAPTCHA_SITE_KEY', ''),
+ google_tagmanager_key=app.config.get('GOOGLE_TAGMANAGER_KEY', ''),
+ google_anaytics_key=app.config.get('GOOGLE_ANALYTICS_KEY', ''),
+ sentry_public_dsn=app.config.get('SENTRY_PUBLIC_DSN', ''),
+ is_debug=str(app.config.get('DEBUGGING', False)).lower(),
+ show_chat=features.SUPPORT_CHAT,
+ aci_conversion=features.ACI_CONVERSION,
+ has_billing=features.BILLING,
+ contact_href=contact_href,
+ hostname=app.config['SERVER_HOSTNAME'],
+ preferred_scheme=app.config['PREFERRED_URL_SCHEME'],
+ version_number=version_number,
+ license_insufficient=license_validator.insufficient,
+ license_expiring=license_validator.expiring_soon,
+ **kwargs))
- contents = render_template(name,
- registry_state=app.config.get('REGISTRY_STATE', 'normal'),
- route_data=route_data,
- external_styles=external_styles,
- external_scripts=external_scripts,
- main_scripts=main_scripts,
- feature_set=features.get_features(),
- config_set=frontend_visible_config(app.config),
- oauth_set=get_oauth_config(),
- external_login_set=get_external_login_config(),
- scope_set=scopes_set,
- vuln_priority_set=PRIORITY_LEVELS,
- mixpanel_key=app.config.get('MIXPANEL_KEY', ''),
- munchkin_key=app.config.get('MARKETO_MUNCHKIN_ID', ''),
- recaptcha_key=app.config.get('RECAPTCHA_SITE_KEY', ''),
- google_tagmanager_key=app.config.get('GOOGLE_TAGMANAGER_KEY', ''),
- google_anaytics_key=app.config.get('GOOGLE_ANALYTICS_KEY', ''),
- sentry_public_dsn=app.config.get('SENTRY_PUBLIC_DSN', ''),
- is_debug=str(app.config.get('DEBUGGING', False)).lower(),
- aci_conversion=features.ACI_CONVERSION,
- has_billing=features.BILLING,
- onprem=not app.config.get('FEATURE_BILLING', False),
- contact_href=contact_href,
- has_contact=has_contact,
- hostname=app.config['SERVER_HOSTNAME'],
- preferred_scheme=app.config['PREFERRED_URL_SCHEME'],
- version_number=version_number,
- current_year=datetime.datetime.now().year,
- kubernetes_namespace=IS_KUBERNETES and QE_NAMESPACE,
- **kwargs)
-
- resp = make_response(contents)
resp.headers['X-FRAME-OPTIONS'] = 'DENY'
return resp
+
diff --git a/endpoints/common_models_interface.py b/endpoints/common_models_interface.py
deleted file mode 100644
index 95ccf5685..000000000
--- a/endpoints/common_models_interface.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from collections import namedtuple
-
-from six import add_metaclass
-
-
-USER_FIELDS = ['uuid', 'username', 'email', 'given_name',
- 'family_name', 'company', 'location']
-
-
-class User(namedtuple('User', USER_FIELDS)):
- """
- User represents a user.
- """
-
-
-@add_metaclass(ABCMeta)
-class EndpointsCommonDataInterface(object):
- """
- Interface that represents all data store interactions required by the common endpoints lib.
- """
-
- @abstractmethod
- def get_user(self, user_uuid):
- """
- Returns the User matching the given uuid, if any or None if none.
- """
-
- @abstractmethod
- def get_namespace_uuid(self, namespace_name):
- """
- Returns the uuid of the Namespace with the given name, if any.
- """
diff --git a/endpoints/common_models_pre_oci.py b/endpoints/common_models_pre_oci.py
deleted file mode 100644
index 1f5e01052..000000000
--- a/endpoints/common_models_pre_oci.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from data import model
-from endpoints.common_models_interface import User, EndpointsCommonDataInterface
-
-
-class EndpointsCommonDataPreOCIModel(EndpointsCommonDataInterface):
- def get_user(self, user_uuid):
- user = model.user.get_user_by_uuid(user_uuid)
- if user is None:
- return None
-
- return User(uuid=user.uuid, username=user.username, email=user.email,
- given_name=user.given_name, family_name=user.family_name,
- company=user.company, location=user.location)
-
- def get_namespace_uuid(self, namespace_name):
- user = model.user.get_namespace_user(namespace_name)
- if user is None:
- return None
-
- return user.uuid
-
-pre_oci_model = EndpointsCommonDataPreOCIModel()
diff --git a/endpoints/csrf.py b/endpoints/csrf.py
index 11c225924..b2dbfcff1 100644
--- a/endpoints/csrf.py
+++ b/endpoints/csrf.py
@@ -4,9 +4,7 @@ import base64
import hmac
from functools import wraps
-from flask import session, request, Response
-
-import features
+from flask import session, request
from app import app
from auth.auth_context import get_validated_oauth_token
@@ -17,31 +15,24 @@ logger = logging.getLogger(__name__)
OAUTH_CSRF_TOKEN_NAME = '_oauth_csrf_token'
_QUAY_CSRF_TOKEN_NAME = '_csrf_token'
-_QUAY_CSRF_HEADER_NAME = 'X-CSRF-Token'
-QUAY_CSRF_UPDATED_HEADER_NAME = 'X-Next-CSRF-Token'
-
-
-def generate_csrf_token(session_token_name=_QUAY_CSRF_TOKEN_NAME, force=False):
+def generate_csrf_token(session_token_name=_QUAY_CSRF_TOKEN_NAME):
""" If not present in the session, generates a new CSRF token with the given name
and places it into the session. Returns the generated token.
"""
- if session_token_name not in session or force:
+ if session_token_name not in session:
session[session_token_name] = base64.b64encode(os.urandom(48))
return session[session_token_name]
def verify_csrf(session_token_name=_QUAY_CSRF_TOKEN_NAME,
- request_token_name=_QUAY_CSRF_TOKEN_NAME,
- check_header=True):
+ request_token_name=_QUAY_CSRF_TOKEN_NAME):
""" Verifies that the CSRF token with the given name is found in the session and
that the matching token is found in the request args or values.
"""
token = str(session.get(session_token_name, ''))
found_token = str(request.values.get(request_token_name, ''))
- if check_header and not found_token:
- found_token = str(request.headers.get(_QUAY_CSRF_HEADER_NAME, ''))
if not token or not found_token or not hmac.compare_digest(token, found_token):
msg = 'CSRF Failure. Session token (%s) was %s and request token (%s) was %s'
@@ -51,19 +42,16 @@ def verify_csrf(session_token_name=_QUAY_CSRF_TOKEN_NAME,
def csrf_protect(session_token_name=_QUAY_CSRF_TOKEN_NAME,
request_token_name=_QUAY_CSRF_TOKEN_NAME,
- all_methods=False,
- check_header=True):
+ all_methods=False):
def inner(func):
@wraps(func)
def wrapper(*args, **kwargs):
- # Verify the CSRF token.
- if get_validated_oauth_token() is None:
+ oauth_token = get_validated_oauth_token()
+ if oauth_token is None:
if all_methods or (request.method != "GET" and request.method != "HEAD"):
- verify_csrf(session_token_name, request_token_name, check_header)
+ verify_csrf(session_token_name, request_token_name)
- # Invoke the handler.
- resp = func(*args, **kwargs)
- return resp
+ return func(*args, **kwargs)
return wrapper
return inner
diff --git a/endpoints/decorated.py b/endpoints/decorated.py
index 88216216b..1e19996fe 100644
--- a/endpoints/decorated.py
+++ b/endpoints/decorated.py
@@ -5,7 +5,6 @@ from flask_restful.utils.cors import crossdomain
from app import app
from data import model
-from data.readreplica import ReadOnlyModeException
from util.config.provider.baseprovider import CannotWriteConfigException
from util.useremails import CannotSendEmailException
@@ -16,7 +15,7 @@ logger = logging.getLogger(__name__)
@app.errorhandler(model.DataModelException)
def handle_dme(ex):
logger.exception(ex)
- response = jsonify({'message': str(ex)})
+ response = jsonify({'message': ex.message})
response.status_code = 400
return response
@@ -44,11 +43,3 @@ def handle_too_many_login_attempts(error):
response.headers['Retry-After'] = int(error.retry_after)
return response
-@app.errorhandler(ReadOnlyModeException)
-def handle_readonly(ex):
- logger.exception(ex)
- response = jsonify({'message': 'System is currently read-only. Pulls will succeed but all ' +
- 'write operations are currently suspended.',
- 'is_readonly': True})
- response.status_code = 503
- return response
diff --git a/endpoints/decorators.py b/endpoints/decorators.py
index ecafb1cdb..b032b624a 100644
--- a/endpoints/decorators.py
+++ b/endpoints/decorators.py
@@ -1,78 +1,10 @@
""" Various decorators for endpoint and API handlers. """
-import os
-import logging
-
-from functools import wraps
-from flask import abort, request, make_response
-
import features
-
-from app import app, ip_resolver, model_cache
-from auth.auth_context import get_authenticated_context, get_authenticated_user
-from data.database import RepositoryState
-from data.model.repository import get_repository, get_repository_state
-from data.model.repo_mirror import get_mirroring_robot, get_mirror
-from data.registry_model import registry_model
-from data.readreplica import ReadOnlyModeException
-from util.names import parse_namespace_repository, ImplicitLibraryNamespaceNotAllowed
-from util.http import abort
-from util.request import get_request_ip
-
-logger = logging.getLogger(__name__)
-
-
-def parse_repository_name(include_tag=False,
- ns_kwarg_name='namespace_name',
- repo_kwarg_name='repo_name',
- tag_kwarg_name='tag_name',
- incoming_repo_kwarg='repository'):
- """ Decorator which parses the repository name found in the incoming_repo_kwarg argument,
- and applies its pieces to the decorated function.
- """
- def inner(func):
- @wraps(func)
- def wrapper(*args, **kwargs):
- try:
- repo_name_components = parse_namespace_repository(kwargs[incoming_repo_kwarg],
- app.config['LIBRARY_NAMESPACE'],
- include_tag=include_tag,
- allow_library=features.LIBRARY_SUPPORT)
- except ImplicitLibraryNamespaceNotAllowed:
- abort(400, message='A namespace must be specified explicitly')
-
- del kwargs[incoming_repo_kwarg]
- kwargs[ns_kwarg_name] = repo_name_components[0]
- kwargs[repo_kwarg_name] = repo_name_components[1]
- if include_tag:
- kwargs[tag_kwarg_name] = repo_name_components[2]
- return func(*args, **kwargs)
- return wrapper
- return inner
-
-
-def param_required(param_name, allow_body=False):
- """ Marks a route as requiring a parameter with the given name to exist in the request's arguments
- or (if allow_body=True) in its body values. If the parameter is not present, the request will
- fail with a 400.
- """
- def wrapper(wrapped):
- @wraps(wrapped)
- def decorated(*args, **kwargs):
- if param_name not in request.args:
- if not allow_body or param_name not in request.values:
- abort(400, message='Required param: %s' % param_name)
- return wrapped(*args, **kwargs)
- return decorated
- return wrapper
-
-
-def readonly_call_allowed(func):
- """ Marks a method as allowing for invocation when the registry is in a read only state.
- Only necessary on non-GET methods.
- """
- func.__readonly_call_allowed = True
- return func
+from flask import abort
+from auth.auth_context import (get_validated_oauth_token, get_authenticated_user,
+ get_validated_token, get_grant_context)
+from functools import wraps
def anon_allowed(func):
@@ -89,7 +21,6 @@ def anon_protect(func):
def check_anon_protection(func):
""" Validates a method as requiring some form of valid user auth before it can be executed. """
-
@wraps(func)
def wrapper(*args, **kwargs):
# Skip if anonymous access is allowed.
@@ -97,152 +28,9 @@ def check_anon_protection(func):
return func(*args, **kwargs)
# Check for validated context. If none exists, fail with a 401.
- if get_authenticated_context() and not get_authenticated_context().is_anonymous:
+ if (get_authenticated_user() or get_validated_oauth_token() or get_validated_token() or
+ get_grant_context()):
return func(*args, **kwargs)
- abort(401, message='Anonymous access is not allowed')
-
- return wrapper
-
-
-def check_readonly(func):
- """ Validates that a non-GET method is not invoked when the registry is in read-only mode,
- unless explicitly marked as being allowed.
- """
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- # Skip if a GET method.
- if request.method == 'GET':
- return func(*args, **kwargs)
-
- # Skip if not in read only mode.
- if app.config.get('REGISTRY_STATE', 'normal') != 'readonly':
- return func(*args, **kwargs)
-
- # Skip if readonly access is allowed.
- if hasattr(func, '__readonly_call_allowed'):
- return func(*args, **kwargs)
-
- raise ReadOnlyModeException()
- return wrapper
-
-
-def route_show_if(value):
- """ Adds/shows the decorated route if the given value is True. """
-
- def decorator(f):
- @wraps(f)
- def decorated_function(*args, **kwargs):
- if not value:
- abort(404)
-
- return f(*args, **kwargs)
- return decorated_function
- return decorator
-
-
-def require_xhr_from_browser(func):
- """ Requires that API GET calls made from browsers are made via XHR, in order to prevent
- reflected text attacks.
- """
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- if app.config.get('BROWSER_API_CALLS_XHR_ONLY', False):
- if request.method == 'GET' and request.user_agent.browser:
- has_xhr_header = request.headers.get('X-Requested-With') == 'XMLHttpRequest'
- if not has_xhr_header and not app.config.get('DEBUGGING') == True:
- logger.warning('Disallowed possible RTA to URL %s with user agent %s',
- request.path, request.user_agent)
- abort(400, message='API calls must be invoked with an X-Requested-With header ' +
- 'if called from a browser')
-
- return func(*args, **kwargs)
- return wrapper
-
-
-def check_region_blacklisted(error_class=None, namespace_name_kwarg=None):
- """ Decorator which checks if the incoming request is from a region geo IP blocked
- for the current namespace. The first argument to the wrapped function must be
- the namespace name.
- """
- def wrapper(wrapped):
- @wraps(wrapped)
- def decorated(*args, **kwargs):
- if namespace_name_kwarg:
- namespace_name = kwargs[namespace_name_kwarg]
- else:
- namespace_name = args[0]
-
- region_blacklist = registry_model.get_cached_namespace_region_blacklist(model_cache,
- namespace_name)
- if region_blacklist:
- # Resolve the IP information and block if on the namespace's blacklist.
- remote_ip = get_request_ip()
- resolved_ip_info = ip_resolver.resolve_ip(remote_ip)
- logger.debug('Resolved IP information for IP %s: %s', remote_ip, resolved_ip_info)
-
- if (resolved_ip_info and
- resolved_ip_info.country_iso_code and
- resolved_ip_info.country_iso_code in region_blacklist):
- if error_class:
- raise error_class()
-
- abort(403, 'Pulls of this data have been restricted geographically')
-
- return wrapped(*args, **kwargs)
- return decorated
- return wrapper
-
-
-def check_repository_state(f):
- @wraps(f)
- def wrapper(namespace_name, repo_name, *args, **kwargs):
- """
- Conditionally allow changes depending on the Repository's state.
- NORMAL -> Pass
- READ_ONLY -> Block all POST/PUT/DELETE
- MIRROR -> Same as READ_ONLY, except treat the Mirroring Robot User as Normal
- """
- user = get_authenticated_user()
- if user is None:
- # NOTE: Remaining auth checks will be handled by subsequent decorators.
- return f(namespace_name, repo_name, *args, **kwargs)
-
- repository = get_repository(namespace_name, repo_name)
- if not repository:
- return f(namespace_name, repo_name, *args, **kwargs)
-
- if repository.state == RepositoryState.READ_ONLY:
- abort(405, '%s/%s is in read-only mode.' % (namespace_name, repo_name))
-
- if repository.state == RepositoryState.MIRROR:
- mirror = get_mirror(repository)
- robot = mirror.internal_robot if mirror is not None else None
-
- if mirror is None:
- abort(500, 'Repository %s/%s is set as a mirror but the Mirror configuration is missing.' % (
- namespace_name, repo_name))
-
- elif robot is None:
- abort(400, 'Repository %s/%s is configured for mirroring but no robot is assigned.' % (
- namespace_name, repo_name))
-
- elif user.id != robot.id:
- abort(405,
- 'Repository %s/%s is a mirror. Mirrored repositories cannot be modified directly.' % (
- namespace_name, repo_name))
-
- elif user.id == robot.id:
- pass # User is designated robot for this mirror repo.
-
- else:
- msg = (
- 'An internal error has occurred while verifying repository %s/%s state. Please report '
- 'this to an administrator.'
- ) % (namespace_name, repo_name)
- raise Exception(msg)
-
- return f(namespace_name, repo_name, *args, **kwargs)
+ abort(401)
return wrapper
diff --git a/endpoints/exception.py b/endpoints/exception.py
index abc32cc54..d11266153 100644
--- a/endpoints/exception.py
+++ b/endpoints/exception.py
@@ -1,7 +1,6 @@
from enum import Enum
from flask import url_for
-from werkzeug.exceptions import HTTPException
from auth.auth_context import get_authenticated_user
@@ -33,7 +32,7 @@ ERROR_DESCRIPTION = {
}
-class ApiException(HTTPException):
+class ApiException(Exception):
"""
Represents an error in the application/problem+json format.
@@ -59,12 +58,9 @@ class ApiException(HTTPException):
def __init__(self, error_type, status_code, error_description, payload=None):
Exception.__init__(self)
self.error_description = error_description
- self.code = status_code
+ self.status_code = status_code
self.payload = payload
self.error_type = error_type
- self.data = self.to_dict()
-
- super(ApiException, self).__init__(error_description, None)
def to_dict(self):
rv = dict(self.payload or ())
@@ -75,13 +71,13 @@ class ApiException(HTTPException):
rv['error_type'] = self.error_type.value # TODO: deprecate
rv['title'] = self.error_type.value
- rv['type'] = url_for('api.error', error_type=self.error_type.value, _external=True)
- rv['status'] = self.code
+ rv['type'] = url_for('error', error_type=self.error_type.value, _external=True)
+ rv['status'] = self.status_code
return rv
-class ExternalServiceError(ApiException):
+class ExternalServiceTimeout(ApiException):
def __init__(self, error_description, payload=None):
ApiException.__init__(self, ApiErrorType.external_service_timeout, 520, error_description, payload)
@@ -129,5 +125,5 @@ class NotFound(ApiException):
class DownstreamIssue(ApiException):
- def __init__(self, error_description, payload=None):
- ApiException.__init__(self, ApiErrorType.downstream_issue, 520, error_description, payload)
+ def __init__(self, payload=None):
+ ApiException.__init__(self, ApiErrorType.downstream_issue, 520, 'Downstream Issue', payload)
diff --git a/endpoints/githubtrigger.py b/endpoints/githubtrigger.py
index 3b4b21f0a..1f0d9ca90 100644
--- a/endpoints/githubtrigger.py
+++ b/endpoints/githubtrigger.py
@@ -6,10 +6,10 @@ from flask_login import current_user
import features
from app import app, github_trigger
-from auth.decorators import require_session_login
from auth.permissions import AdministerRepositoryPermission
+from auth.process import require_session_login
from data import model
-from endpoints.decorators import route_show_if, parse_repository_name
+from endpoints.common import route_show_if, parse_repository_name
from util.http import abort
@@ -31,12 +31,11 @@ def attach_github_build_trigger(namespace_name, repo_name):
if not repo:
msg = 'Invalid repository: %s/%s' % (namespace_name, repo_name)
abort(404, message=msg)
- elif repo.kind.name != 'image':
- abort(501)
trigger = model.build.create_build_trigger(repo, 'github', token, current_user.db_user())
repo_path = '%s/%s' % (namespace_name, repo_name)
- full_url = url_for('web.buildtrigger', path=repo_path, trigger=trigger.uuid)
+ full_url = '%s%s%s' % (url_for('web.repository', path=repo_path), '?tab=builds&newtrigger=',
+ trigger.uuid)
logger.debug('Redirecting to full url: %s', full_url)
return redirect(full_url)
diff --git a/endpoints/gitlabtrigger.py b/endpoints/gitlabtrigger.py
index 4d97caffe..4f51a2bdc 100644
--- a/endpoints/gitlabtrigger.py
+++ b/endpoints/gitlabtrigger.py
@@ -6,10 +6,10 @@ from flask_login import current_user
import features
from app import app, gitlab_trigger
-from auth.decorators import require_session_login
from auth.permissions import AdministerRepositoryPermission
+from auth.process import require_session_login
from data import model
-from endpoints.decorators import route_show_if
+from endpoints.common import route_show_if
from util.http import abort
@@ -44,12 +44,11 @@ def attach_gitlab_build_trigger():
if not repo:
msg = 'Invalid repository: %s/%s' % (namespace, repository)
abort(404, message=msg)
- elif repo.kind.name != 'image':
- abort(501)
trigger = model.build.create_build_trigger(repo, 'gitlab', token, current_user.db_user())
repo_path = '%s/%s' % (namespace, repository)
- full_url = url_for('web.buildtrigger', path=repo_path, trigger=trigger.uuid)
+ full_url = '%s%s%s' % (url_for('web.repository', path=repo_path), '?tab=builds&newtrigger=',
+ trigger.uuid)
logger.debug('Redirecting to full url: %s', full_url)
return redirect(full_url)
diff --git a/endpoints/keyserver/__init__.py b/endpoints/keyserver/__init__.py
index f24b30421..13248fcc7 100644
--- a/endpoints/keyserver/__init__.py
+++ b/endpoints/keyserver/__init__.py
@@ -1,22 +1,18 @@
import logging
-
from datetime import datetime, timedelta
from flask import Blueprint, jsonify, abort, request, make_response
from jwt import get_unverified_header
from app import app
-from data.logs_model import logs_model
-from endpoints.keyserver.models_interface import ServiceKeyDoesNotExist
-from endpoints.keyserver.models_pre_oci import pre_oci_model as model
+from data.interfaces.key_server import pre_oci_model as model, ServiceKeyDoesNotExist
+from data.model.log import log_action
from util.security import jwtutil
-from util.request import get_request_ip
logger = logging.getLogger(__name__)
key_server = Blueprint('key_server', __name__)
-
JWT_HEADER_NAME = 'Authorization'
JWT_AUDIENCE = app.config['PREFERRED_URL_SCHEME'] + '://' + app.config['SERVER_HOSTNAME']
@@ -95,7 +91,7 @@ def get_service_key(service, kid):
@key_server.route('/services//keys/', methods=['PUT'])
def put_service_key(service, kid):
- metadata = {'ip': get_request_ip()}
+ metadata = {'ip': request.remote_addr}
rotation_duration = request.args.get('rotation', None)
expiration_date = request.args.get('expiration', None)
@@ -129,16 +125,17 @@ def put_service_key(service, kid):
model.create_service_key('', kid, service, jwk, metadata, expiration_date,
rotation_duration=rotation_duration)
- logs_model.log_action('service_key_create', ip=get_request_ip(), metadata={
+ key_log_metadata = {
'kid': kid,
'preshared': False,
'service': service,
'name': '',
'expiration_date': expiration_date,
'user_agent': request.headers.get('User-Agent'),
- 'ip': get_request_ip(),
- })
+ 'ip': request.remote_addr,
+ }
+ log_action('service_key_create', None, metadata=key_log_metadata, ip=request.remote_addr)
return make_response('', 202)
# Key is going to be rotated.
@@ -153,16 +150,17 @@ def put_service_key(service, kid):
except ServiceKeyDoesNotExist:
abort(404)
- logs_model.log_action('service_key_rotate', ip=get_request_ip(), metadata={
+ key_log_metadata = {
'kid': kid,
'signer_kid': signer_key.kid,
'service': service,
'name': signer_key.name,
'expiration_date': expiration_date,
'user_agent': request.headers.get('User-Agent'),
- 'ip': get_request_ip(),
- })
+ 'ip': request.remote_addr,
+ }
+ log_action('service_key_rotate', None, metadata=key_log_metadata, ip=request.remote_addr)
return make_response('', 200)
@@ -189,15 +187,16 @@ def delete_service_key(service, kid):
except ServiceKeyDoesNotExist:
abort(404)
- logs_model.log_action('service_key_delete', ip=get_request_ip(), metadata={
+ key_log_metadata = {
'kid': kid,
'signer_kid': signer_key.kid,
'service': service,
'name': signer_key.name,
'user_agent': request.headers.get('User-Agent'),
- 'ip': get_request_ip(),
- })
+ 'ip': request.remote_addr,
+ }
+ log_action('service_key_delete', None, metadata=key_log_metadata, ip=request.remote_addr)
return make_response('', 204)
abort(403)
diff --git a/endpoints/keyserver/models_interface.py b/endpoints/keyserver/models_interface.py
deleted file mode 100644
index 977c2f6b4..000000000
--- a/endpoints/keyserver/models_interface.py
+++ /dev/null
@@ -1,65 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from collections import namedtuple
-
-from six import add_metaclass
-
-
-class ServiceKey(namedtuple('ServiceKey', ['name', 'kid', 'service', 'jwk', 'metadata',
- 'created_date', 'expiration_date', 'rotation_duration',
- 'approval'])):
- """
- Service Key represents a public key (JWK) being used by an instance of a particular service to
- authenticate with other services.
- """
- pass
-
-
-class ServiceKeyException(Exception):
- pass
-
-
-class ServiceKeyDoesNotExist(ServiceKeyException):
- pass
-
-
-@add_metaclass(ABCMeta)
-class KeyServerDataInterface(object):
- """
- Interface that represents all data store interactions required by a JWT key service.
- """
-
- @abstractmethod
- def list_service_keys(self, service):
- """
- Returns a list of service keys or an empty list if the service does not exist.
- """
- pass
-
- @abstractmethod
- def get_service_key(self, signer_kid, service=None, alive_only=None, approved_only=None):
- """
- Returns a service kid with the given kid or raises ServiceKeyNotFound.
- """
- pass
-
- @abstractmethod
- def create_service_key(self, name, kid, service, jwk, metadata, expiration_date,
- rotation_duration=None):
- """
- Stores a service key.
- """
- pass
-
- @abstractmethod
- def replace_service_key(self, old_kid, kid, jwk, metadata, expiration_date):
- """
- Replaces a service with a new key or raises ServiceKeyNotFound.
- """
- pass
-
- @abstractmethod
- def delete_service_key(self, kid):
- """
- Deletes and returns a service key with the given kid or raises ServiceKeyNotFound.
- """
- pass
diff --git a/notifications/notificationevent.py b/endpoints/notificationevent.py
similarity index 67%
rename from notifications/notificationevent.py
rename to endpoints/notificationevent.py
index 085dceb0d..a662522b1 100644
--- a/notifications/notificationevent.py
+++ b/endpoints/notificationevent.py
@@ -1,16 +1,17 @@
import logging
import time
+import json
import re
from datetime import datetime
-from notifications import build_repository_event_data
+from endpoints.notificationhelper import build_event_data
from util.jinjautil import get_template_env
+from util.morecollections import AttrDict
from util.secscan import PRIORITY_LEVELS, get_priority_for_index
+template_env = get_template_env("events")
logger = logging.getLogger(__name__)
-TEMPLATE_ENV = get_template_env("events")
-
class InvalidNotificationEventException(Exception):
pass
@@ -35,12 +36,12 @@ class NotificationEvent(object):
"""
Returns a human readable HTML message for the given notification data.
"""
- return TEMPLATE_ENV.get_template(self.event_name() + '.html').render({
+ return template_env.get_template(self.event_name() + '.html').render({
'event_data': event_data,
'notification_data': notification_data
})
- def get_sample_data(self, namespace_name, repo_name, event_config):
+ def get_sample_data(self, notification):
"""
Returns sample data for testing the raising of this notification, with an example notification.
"""
@@ -67,14 +68,6 @@ class NotificationEvent(object):
raise InvalidNotificationEventException('Unable to find event: %s' % eventname)
- @classmethod
- def event_names(cls):
- for subc in cls.__subclasses__():
- if subc.event_name() is None:
- for subsubc in subc.__subclasses__():
- yield subsubc.event_name()
- else:
- yield subc.event_name()
@staticmethod
def _get_event(cls, eventname):
@@ -98,64 +91,19 @@ class RepoPushEvent(NotificationEvent):
def get_summary(self, event_data, notification_data):
return 'Repository %s updated' % (event_data['repository'])
- def get_sample_data(self, namespace_name, repo_name, event_config):
- return build_repository_event_data(namespace_name, repo_name, {
- 'updated_tags': ['latest', 'foo'],
+ def get_sample_data(self, notification):
+ # TODO(jzelinskie): remove when more endpoints have been converted to using
+ # interfaces
+ repo = AttrDict({
+ 'namespace_name': notification.repository.namespace_user.username,
+ 'name': notification.repository.name,
+ })
+ return build_event_data(repo, {
+ 'updated_tags': {'latest': 'someimageid', 'foo': 'anotherimage'},
'pruned_image_count': 3
})
-class RepoMirrorSyncStartedEvent(NotificationEvent):
- @classmethod
- def event_name(cls):
- return 'repo_mirror_sync_started'
-
- def get_level(self, event_data, notification_data):
- return 'info'
-
- def get_summary(self, event_data, notification_data):
- return 'Repository Mirror started for %s' % (event_data['message'])
-
- def get_sample_data(self, namespace_name, repo_name, event_config):
- return build_repository_event_data(namespace_name, repo_name, {
- 'message': 'TEST NOTIFICATION'
- })
-
-
-class RepoMirrorSyncSuccessEvent(NotificationEvent):
- @classmethod
- def event_name(cls):
- return 'repo_mirror_sync_success'
-
- def get_level(self, event_data, notification_data):
- return 'success'
-
- def get_summary(self, event_data, notification_data):
- return 'Repository Mirror success for %s' % (event_data['message'])
-
- def get_sample_data(self, namespace_name, repo_name, event_config):
- return build_repository_event_data(namespace_name, repo_name, {
- 'message': 'TEST NOTIFICATION'
- })
-
-
-class RepoMirrorSyncFailedEvent(NotificationEvent):
- @classmethod
- def event_name(cls):
- return 'repo_mirror_sync_failed'
-
- def get_level(self, event_data, notification_data):
- return 'error'
-
- def get_summary(self, event_data, notification_data):
- return 'Repository Mirror failed for %s' % (event_data['message'])
-
- def get_sample_data(self, namespace_name, repo_name, event_config):
- return build_repository_event_data(namespace_name, repo_name, {
- 'message': 'TEST NOTIFICATION'
- })
-
-
def _build_summary(event_data):
""" Returns a summary string for the build data found in the event data block. """
summary = 'for repository %s [%s]' % (event_data['repository'], event_data['build_id'][0:7])
@@ -164,17 +112,14 @@ def _build_summary(event_data):
class VulnerabilityFoundEvent(NotificationEvent):
CONFIG_LEVEL = 'level'
- PRIORITY_KEY = 'priority'
VULNERABILITY_KEY = 'vulnerability'
- MULTIPLE_VULNERABILITY_KEY = 'vulnerabilities'
@classmethod
def event_name(cls):
return 'vulnerability_found'
def get_level(self, event_data, notification_data):
- vuln_data = event_data[VulnerabilityFoundEvent.VULNERABILITY_KEY]
- priority = vuln_data[VulnerabilityFoundEvent.PRIORITY_KEY]
+ priority = event_data['vulnerability']['priority']
if priority == 'Defcon1' or priority == 'Critical':
return 'error'
@@ -183,21 +128,28 @@ class VulnerabilityFoundEvent(NotificationEvent):
return 'info'
- def get_sample_data(self, namespace_name, repo_name, event_config):
- level = event_config.get(VulnerabilityFoundEvent.CONFIG_LEVEL, 'Critical')
- return build_repository_event_data(namespace_name, repo_name, {
+ def get_sample_data(self, notification):
+ event_config = json.loads(notification.event_config_json)
+
+ # TODO(jzelinskie): remove when more endpoints have been converted to using
+ # interfaces
+ repo = AttrDict({
+ 'namespace_name': notification.repository.namespace_user.username,
+ 'name': notification.repository.name,
+ })
+ return build_event_data(repo, {
'tags': ['latest', 'prod', 'foo', 'bar', 'baz'],
'image': 'some-image-id',
'vulnerability': {
'id': 'CVE-FAKE-CVE',
'description': 'A futurist vulnerability',
'link': 'https://security-tracker.debian.org/tracker/CVE-FAKE-CVE',
- 'priority': get_priority_for_index(level)
+ 'priority': get_priority_for_index(event_config[VulnerabilityFoundEvent.CONFIG_LEVEL])
},
})
def should_perform(self, event_data, notification_data):
- event_config = notification_data.event_config_dict
+ event_config = json.loads(notification_data.event_config_json)
if VulnerabilityFoundEvent.CONFIG_LEVEL not in event_config:
return True
@@ -214,24 +166,9 @@ class VulnerabilityFoundEvent(NotificationEvent):
return actual_level_index <= filter_level_index
def get_summary(self, event_data, notification_data):
- vuln_key = VulnerabilityFoundEvent.VULNERABILITY_KEY
- priority_key = VulnerabilityFoundEvent.PRIORITY_KEY
-
- multiple_vulns = event_data.get(VulnerabilityFoundEvent.MULTIPLE_VULNERABILITY_KEY)
- if multiple_vulns is not None:
- top_priority = multiple_vulns[0].get(priority_key, 'Unknown')
- matching = [v for v in multiple_vulns if v.get(priority_key, 'Unknown') == top_priority]
-
- msg = '%s %s' % (len(matching), top_priority)
- if len(matching) < len(multiple_vulns):
- msg += ' and %s more' % (len(multiple_vulns) - len(matching))
-
- msg += ' vulnerabilities were detected in repository %s in %s tags'
- return msg % (event_data['repository'], len(event_data['tags']))
- else:
- msg = '%s vulnerability detected in repository %s in %s tags'
- return msg % (event_data[vuln_key][priority_key], event_data['repository'],
- len(event_data['tags']))
+ msg = '%s vulnerability detected in repository %s in %s tags'
+ return msg % (event_data['vulnerability']['priority'], event_data['repository'],
+ len(event_data['tags']))
class BaseBuildEvent(NotificationEvent):
@@ -240,10 +177,10 @@ class BaseBuildEvent(NotificationEvent):
return None
def should_perform(self, event_data, notification_data):
- if not notification_data.event_config_dict:
+ if not notification_data.event_config_json:
return True
- event_config = notification_data.event_config_dict
+ event_config = json.loads(notification_data.event_config_json)
ref_regex = event_config.get('ref-regex') or None
if ref_regex is None:
return True
@@ -270,9 +207,16 @@ class BuildQueueEvent(BaseBuildEvent):
def get_level(self, event_data, notification_data):
return 'info'
- def get_sample_data(self, namespace_name, repo_name, event_config):
+ def get_sample_data(self, notification):
build_uuid = 'fake-build-id'
- return build_repository_event_data(namespace_name, repo_name, {
+
+ # TODO(jzelinskie): remove when more endpoints have been converted to using
+ # interfaces
+ repo = AttrDict({
+ 'namespace_name': notification.repository.namespace_user.username,
+ 'name': notification.repository.name,
+ })
+ return build_event_data(repo, {
'is_manual': False,
'build_id': build_uuid,
'build_name': 'some-fake-build',
@@ -308,9 +252,16 @@ class BuildStartEvent(BaseBuildEvent):
def get_level(self, event_data, notification_data):
return 'info'
- def get_sample_data(self, namespace_name, repo_name, event_config):
+ def get_sample_data(self, notification):
build_uuid = 'fake-build-id'
- return build_repository_event_data(namespace_name, repo_name, {
+
+ # TODO(jzelinskie): remove when more endpoints have been converted to using
+ # interfaces
+ repo = AttrDict({
+ 'namespace_name': notification.repository.namespace_user.username,
+ 'name': notification.repository.name,
+ })
+ return build_event_data(repo, {
'build_id': build_uuid,
'build_name': 'some-fake-build',
'docker_tags': ['latest', 'foo', 'bar'],
@@ -335,9 +286,16 @@ class BuildSuccessEvent(BaseBuildEvent):
def get_level(self, event_data, notification_data):
return 'success'
- def get_sample_data(self, namespace_name, repo_name, event_config):
+ def get_sample_data(self, notification):
build_uuid = 'fake-build-id'
- return build_repository_event_data(namespace_name, repo_name, {
+
+ # TODO(jzelinskie): remove when more endpoints have been converted to using
+ # interfaces
+ repo = AttrDict({
+ 'namespace_name': notification.repository.namespace_user.username,
+ 'name': notification.repository.name,
+ })
+ return build_event_data(repo, {
'build_id': build_uuid,
'build_name': 'some-fake-build',
'docker_tags': ['latest', 'foo', 'bar'],
@@ -363,9 +321,16 @@ class BuildFailureEvent(BaseBuildEvent):
def get_level(self, event_data, notification_data):
return 'error'
- def get_sample_data(self, namespace_name, repo_name, event_config):
+ def get_sample_data(self, notification):
build_uuid = 'fake-build-id'
- return build_repository_event_data(namespace_name, repo_name, {
+
+ # TODO(jzelinskie): remove when more endpoints have been converted to using
+ # interfaces
+ repo = AttrDict({
+ 'namespace_name': notification.repository.namespace_user.username,
+ 'name': notification.repository.name,
+ })
+ return build_event_data(repo, {
'build_id': build_uuid,
'build_name': 'some-fake-build',
'docker_tags': ['latest', 'foo', 'bar'],
@@ -388,7 +353,7 @@ class BuildFailureEvent(BaseBuildEvent):
}
}
}
- }, subpage='/build/%s' % build_uuid)
+ }, subpage='/build?current=%s' % build_uuid)
def get_summary(self, event_data, notification_data):
return 'Build failure ' + _build_summary(event_data)
@@ -402,9 +367,16 @@ class BuildCancelledEvent(BaseBuildEvent):
def get_level(self, event_data, notification_data):
return 'info'
- def get_sample_data(self, namespace_name, repo_name, event_config):
+ def get_sample_data(self, notification):
build_uuid = 'fake-build-id'
- return build_repository_event_data(namespace_name, repo_name, {
+
+ # TODO(jzelinskie): remove when more endpoints have been converted to using
+ # interfaces
+ repo = AttrDict({
+ 'namespace_name': notification.repository.namespace_user.username,
+ 'name': notification.repository.name,
+ })
+ return build_event_data(repo, {
'build_id': build_uuid,
'build_name': 'some-fake-build',
'docker_tags': ['latest', 'foo', 'bar'],
@@ -420,3 +392,4 @@ class BuildCancelledEvent(BaseBuildEvent):
def get_summary(self, event_data, notification_data):
return 'Build cancelled ' + _build_summary(event_data)
+
diff --git a/notifications/__init__.py b/endpoints/notificationhelper.py
similarity index 81%
rename from notifications/__init__.py
rename to endpoints/notificationhelper.py
index 04903167d..37e5bc56b 100644
--- a/notifications/__init__.py
+++ b/endpoints/notificationhelper.py
@@ -6,17 +6,15 @@ from app import app, notification_queue
from data import model
from auth.auth_context import get_authenticated_user, get_validated_oauth_token
+
DEFAULT_BATCH_SIZE = 1000
-def build_repository_event_data(namespace_name, repo_name, extra_data=None, subpage=None):
- """ Builds the basic repository data for an event, including the repository's name, Docker URL
- and homepage. If extra_data is specified, it is appended to the dictionary before it is
- returned.
- """
- repo_string = '%s/%s' % (namespace_name, repo_name)
+def build_event_data(repo, extra_data=None, subpage=None):
+ repo_string = '%s/%s' % (repo.namespace_name, repo.name)
homepage = '%s://%s/repository/%s' % (app.config['PREFERRED_URL_SCHEME'],
- app.config['SERVER_HOSTNAME'], repo_string)
+ app.config['SERVER_HOSTNAME'],
+ repo_string)
if subpage:
if not subpage.startswith('/'):
@@ -26,8 +24,8 @@ def build_repository_event_data(namespace_name, repo_name, extra_data=None, subp
event_data = {
'repository': repo_string,
- 'namespace': namespace_name,
- 'name': repo_name,
+ 'namespace': repo.namespace_name,
+ 'name': repo.name,
'docker_url': '%s/%s' % (app.config['SERVER_HOSTNAME'], repo_string),
'homepage': homepage,
}
@@ -35,7 +33,6 @@ def build_repository_event_data(namespace_name, repo_name, extra_data=None, subp
event_data.update(extra_data or {})
return event_data
-
def build_notification_data(notification, event_data, performer_data=None):
if not performer_data:
performer_data = {}
@@ -66,13 +63,12 @@ def notification_batch(batch_size=DEFAULT_BATCH_SIZE):
the callable will be bulk inserted into the queue with the specified batch size.
"""
with notification_queue.batch_insert(batch_size) as queue_put:
-
def spawn_notification_batch(repo, event_name, extra_data=None, subpage=None, pathargs=None,
performer_data=None):
- event_data = build_repository_event_data(repo.namespace_name, repo.name,
- extra_data=extra_data, subpage=subpage)
+ event_data = build_event_data(repo, extra_data=extra_data, subpage=subpage)
- notifications = model.notification.list_repo_notifications(repo.namespace_name, repo.name,
+ notifications = model.notification.list_repo_notifications(repo.namespace_name,
+ repo.name,
event_name=event_name)
path = [repo.namespace_name, repo.name, event_name] + (pathargs or [])
for notification in list(notifications):
diff --git a/notifications/notificationmethod.py b/endpoints/notificationmethod.py
similarity index 80%
rename from notifications/notificationmethod.py
rename to endpoints/notificationmethod.py
index 73ac9d77e..b315be9b6 100644
--- a/notifications/notificationmethod.py
+++ b/endpoints/notificationmethod.py
@@ -1,8 +1,9 @@
import logging
-import re
import json
+import re
import requests
+
from flask_mail import Message
from app import mail, app, OVERRIDE_CONFIG_DIRECTORY
@@ -10,9 +11,10 @@ from data import model
from util.config.validator import SSL_FILENAMES
from workers.queueworker import JobException
+
logger = logging.getLogger(__name__)
-METHOD_TIMEOUT = app.config.get('NOTIFICATION_SEND_TIMEOUT', 10) # Seconds
+METHOD_TIMEOUT = app.config.get('NOTIFICATION_SEND_TIMEOUT', 10) # Seconds
class InvalidNotificationMethodException(Exception):
@@ -27,11 +29,10 @@ class NotificationMethodPerformException(JobException):
pass
-def _ssl_cert():
- if app.config['PREFERRED_URL_SCHEME'] == 'https':
- return [OVERRIDE_CONFIG_DIRECTORY + f for f in SSL_FILENAMES]
-
- return None
+SSLClientCert = None
+if app.config['PREFERRED_URL_SCHEME'] == 'https':
+ # TODO(jschorr): move this into the config provider library
+ SSLClientCert = [OVERRIDE_CONFIG_DIRECTORY + f for f in SSL_FILENAMES]
class NotificationMethod(object):
@@ -45,19 +46,18 @@ class NotificationMethod(object):
"""
raise NotImplementedError
- def validate(self, namespace_name, repo_name, config_data):
+ def validate(self, repository, config_data):
"""
Validates that the notification can be created with the given data. Throws
a CannotValidateNotificationMethodException on failure.
"""
raise NotImplementedError
-
def perform(self, notification_obj, event_handler, notification_data):
"""
Performs the notification method.
- notification_obj: The notification namedtuple.
+ notification_obj: The noticication record itself.
event_handler: The NotificationEvent handler.
notification_data: The dict of notification data placed in the queue.
"""
@@ -77,15 +77,13 @@ class QuayNotificationMethod(NotificationMethod):
def method_name(cls):
return 'quay_notification'
- def validate(self, namespace_name, repo_name, config_data):
- _, err_message, _ = self.find_targets(namespace_name, config_data)
+ def validate(self, repository, config_data):
+ status, err_message, target_users = self.find_targets(repository, config_data)
if err_message:
- raise CannotValidateNotificationMethodException(err_message)
+ raise CannotValidateNotificationMethodException(err_message)
- def find_targets(self, namespace_name, config_data):
- target_info = config_data.get('target', None)
- if not target_info or not target_info.get('kind'):
- return (True, 'Missing target', [])
+ def find_targets(self, repository, config_data):
+ target_info = config_data['target']
if target_info['kind'] == 'user':
target = model.user.get_nonrobot_user(target_info['name'])
@@ -95,13 +93,13 @@ class QuayNotificationMethod(NotificationMethod):
return (True, None, [target])
elif target_info['kind'] == 'org':
- try:
- target = model.organization.get_organization(target_info['name'])
- except model.organization.InvalidOrganizationException:
+ target = model.organization.get_organization(target_info['name'])
+ if not target:
+ # Just to be safe.
return (True, 'Unknown organization %s' % target_info['name'], None)
# Only repositories under the organization can cause notifications to that org.
- if target_info['name'] != namespace_name:
+ if target_info['name'] != repository.namespace_user.username:
return (False, 'Organization name must match repository namespace')
return (True, None, [target])
@@ -109,7 +107,8 @@ class QuayNotificationMethod(NotificationMethod):
# Lookup the team.
org_team = None
try:
- org_team = model.team.get_organization_team(namespace_name, target_info['name'])
+ org_team = model.team.get_organization_team(repository.namespace_user.username,
+ target_info['name'])
except model.InvalidTeamException:
# Probably deleted.
return (True, 'Unknown team %s' % target_info['name'], None)
@@ -117,6 +116,7 @@ class QuayNotificationMethod(NotificationMethod):
# Lookup the team's members
return (True, None, model.organization.get_organization_team_members(org_team.id))
+
def perform(self, notification_obj, event_handler, notification_data):
repository = notification_obj.repository
if not repository:
@@ -124,8 +124,8 @@ class QuayNotificationMethod(NotificationMethod):
return
# Lookup the target user or team to which we'll send the notification.
- config_data = notification_obj.method_config_dict
- status, err_message, target_users = self.find_targets(repository.namespace_name, config_data)
+ config_data = json.loads(notification_obj.config_json)
+ status, err_message, target_users = self.find_targets(repository, config_data)
if not status:
raise NotificationMethodPerformException(err_message)
@@ -140,33 +140,36 @@ class EmailMethod(NotificationMethod):
def method_name(cls):
return 'email'
- def validate(self, namespace_name, repo_name, config_data):
+ def validate(self, repository, config_data):
email = config_data.get('email', '')
if not email:
raise CannotValidateNotificationMethodException('Missing e-mail address')
- record = model.repository.get_email_authorized_for_repo(namespace_name, repo_name, email)
+ record = model.repository.get_email_authorized_for_repo(repository.namespace_user.username,
+ repository.name, email)
if not record or not record.confirmed:
raise CannotValidateNotificationMethodException('The specified e-mail address '
'is not authorized to receive '
'notifications for this repository')
+
def perform(self, notification_obj, event_handler, notification_data):
- config_data = notification_obj.method_config_dict
+ config_data = json.loads(notification_obj.config_json)
email = config_data.get('email', '')
if not email:
return
- with app.app_context():
- msg = Message(event_handler.get_summary(notification_data['event_data'], notification_data),
- recipients=[email])
- msg.html = event_handler.get_message(notification_data['event_data'], notification_data)
+ msg = Message(event_handler.get_summary(notification_data['event_data'], notification_data),
+ sender='support@quay.io',
+ recipients=[email])
+ msg.html = event_handler.get_message(notification_data['event_data'], notification_data)
- try:
+ try:
+ with app.app_context():
mail.send(msg)
- except Exception as ex:
- logger.exception('Email was unable to be sent')
- raise NotificationMethodPerformException(ex.message)
+ except Exception as ex:
+ logger.exception('Email was unable to be sent: %s' % ex.message)
+ raise NotificationMethodPerformException(ex.message)
class WebhookMethod(NotificationMethod):
@@ -174,13 +177,13 @@ class WebhookMethod(NotificationMethod):
def method_name(cls):
return 'webhook'
- def validate(self, namespace_name, repo_name, config_data):
+ def validate(self, repository, config_data):
url = config_data.get('url', '')
if not url:
raise CannotValidateNotificationMethodException('Missing webhook URL')
def perform(self, notification_obj, event_handler, notification_data):
- config_data = notification_obj.method_config_dict
+ config_data = json.loads(notification_obj.config_json)
url = config_data.get('url', '')
if not url:
return
@@ -189,16 +192,16 @@ class WebhookMethod(NotificationMethod):
headers = {'Content-type': 'application/json'}
try:
- resp = requests.post(url, data=json.dumps(payload), headers=headers, cert=_ssl_cert(),
+ resp = requests.post(url, data=json.dumps(payload), headers=headers, cert=SSLClientCert,
timeout=METHOD_TIMEOUT)
- if resp.status_code / 100 != 2:
+ if resp.status_code/100 != 2:
error_message = '%s response for webhook to url: %s' % (resp.status_code, url)
logger.error(error_message)
logger.error(resp.content)
raise NotificationMethodPerformException(error_message)
except requests.exceptions.RequestException as ex:
- logger.exception('Webhook was unable to be sent')
+ logger.exception('Webhook was unable to be sent: %s' % ex.message)
raise NotificationMethodPerformException(ex.message)
@@ -206,23 +209,22 @@ class FlowdockMethod(NotificationMethod):
""" Method for sending notifications to Flowdock via the Team Inbox API:
https://www.flowdock.com/api/team-inbox
"""
-
@classmethod
def method_name(cls):
return 'flowdock'
- def validate(self, namespace_name, repo_name, config_data):
+ def validate(self, repository, config_data):
token = config_data.get('flow_api_token', '')
if not token:
raise CannotValidateNotificationMethodException('Missing Flowdock API Token')
def perform(self, notification_obj, event_handler, notification_data):
- config_data = notification_obj.method_config_dict
+ config_data = json.loads(notification_obj.config_json)
token = config_data.get('flow_api_token', '')
if not token:
return
- owner = model.user.get_user_or_org(notification_obj.repository.namespace_name)
+ owner = model.user.get_user_or_org(notification_obj.repository.namespace_user.username)
if not owner:
# Something went wrong.
return
@@ -231,11 +233,11 @@ class FlowdockMethod(NotificationMethod):
headers = {'Content-type': 'application/json'}
payload = {
'source': 'Quay',
- 'from_address': 'support@quay.io',
+ 'from_address': 'support@quay.io',
'subject': event_handler.get_summary(notification_data['event_data'], notification_data),
'content': event_handler.get_message(notification_data['event_data'], notification_data),
'from_name': owner.username,
- 'project': (notification_obj.repository.namespace_name + ' ' +
+ 'project': (notification_obj.repository.namespace_user.username + ' ' +
notification_obj.repository.name),
'tags': ['#' + event_handler.event_name()],
'link': notification_data['event_data']['homepage']
@@ -243,14 +245,14 @@ class FlowdockMethod(NotificationMethod):
try:
resp = requests.post(url, data=json.dumps(payload), headers=headers, timeout=METHOD_TIMEOUT)
- if resp.status_code / 100 != 2:
+ if resp.status_code/100 != 2:
error_message = '%s response for flowdock to url: %s' % (resp.status_code, url)
logger.error(error_message)
logger.error(resp.content)
raise NotificationMethodPerformException(error_message)
except requests.exceptions.RequestException as ex:
- logger.exception('Flowdock method was unable to be sent')
+ logger.exception('Flowdock method was unable to be sent: %s' % ex.message)
raise NotificationMethodPerformException(ex.message)
@@ -258,12 +260,11 @@ class HipchatMethod(NotificationMethod):
""" Method for sending notifications to Hipchat via the API:
https://www.hipchat.com/docs/apiv2/method/send_room_notification
"""
-
@classmethod
def method_name(cls):
return 'hipchat'
- def validate(self, namespace_name, repo_name, config_data):
+ def validate(self, repository, config_data):
if not config_data.get('notification_token', ''):
raise CannotValidateNotificationMethodException('Missing Hipchat Room Notification Token')
@@ -271,14 +272,15 @@ class HipchatMethod(NotificationMethod):
raise CannotValidateNotificationMethodException('Missing Hipchat Room ID')
def perform(self, notification_obj, event_handler, notification_data):
- config_data = notification_obj.method_config_dict
+ config_data = json.loads(notification_obj.config_json)
+
token = config_data.get('notification_token', '')
room_id = config_data.get('room_id', '')
if not token or not room_id:
return
- owner = model.user.get_user_or_org(notification_obj.repository.namespace_name)
+ owner = model.user.get_user_or_org(notification_obj.repository.namespace_user.username)
if not owner:
# Something went wrong.
return
@@ -304,20 +306,19 @@ class HipchatMethod(NotificationMethod):
try:
resp = requests.post(url, data=json.dumps(payload), headers=headers, timeout=METHOD_TIMEOUT)
- if resp.status_code / 100 != 2:
+ if resp.status_code/100 != 2:
error_message = '%s response for hipchat to url: %s' % (resp.status_code, url)
logger.error(error_message)
logger.error(resp.content)
raise NotificationMethodPerformException(error_message)
except requests.exceptions.RequestException as ex:
- logger.exception('Hipchat method was unable to be sent')
+ logger.exception('Hipchat method was unable to be sent: %s' % ex.message)
raise NotificationMethodPerformException(ex.message)
from HTMLParser import HTMLParser
-
class SlackAdjuster(HTMLParser):
def __init__(self):
self.reset()
@@ -335,7 +336,7 @@ class SlackAdjuster(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == 'a':
- self.result.append('<%s|' % (self.get_attr(attrs, 'href'),))
+ self.result.append('<%s|' % (self.get_attr(attrs, 'href'), ))
if tag == 'i':
self.result.append('_')
@@ -359,7 +360,6 @@ class SlackAdjuster(HTMLParser):
def get_data(self):
return ''.join(self.result)
-
def adjust_tags(html):
s = SlackAdjuster()
s.feed(html)
@@ -374,7 +374,7 @@ class SlackMethod(NotificationMethod):
def method_name(cls):
return 'slack'
- def validate(self, namespace_name, repo_name, config_data):
+ def validate(self, repository, config_data):
if not config_data.get('url', ''):
raise CannotValidateNotificationMethodException('Missing Slack Callback URL')
@@ -385,12 +385,13 @@ class SlackMethod(NotificationMethod):
return adjust_tags(message)
def perform(self, notification_obj, event_handler, notification_data):
- config_data = notification_obj.method_config_dict
+ config_data = json.loads(notification_obj.config_json)
+
url = config_data.get('url', '')
if not url:
return
- owner = model.user.get_user_or_org(notification_obj.repository.namespace_name)
+ owner = model.user.get_user_or_org(notification_obj.repository.namespace_user.username)
if not owner:
# Something went wrong.
return
@@ -423,7 +424,7 @@ class SlackMethod(NotificationMethod):
try:
resp = requests.post(url, data=json.dumps(payload), headers=headers, timeout=METHOD_TIMEOUT)
- if resp.status_code / 100 != 2:
+ if resp.status_code/100 != 2:
error_message = '%s response for Slack to url: %s' % (resp.status_code, url)
logger.error(error_message)
logger.error(resp.content)
diff --git a/endpoints/oauth/__init__.py b/endpoints/oauth/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/endpoints/oauth/login.py b/endpoints/oauth/login.py
deleted file mode 100644
index 89c81f9c6..000000000
--- a/endpoints/oauth/login.py
+++ /dev/null
@@ -1,302 +0,0 @@
-import logging
-import time
-import recaptcha2
-
-from collections import namedtuple
-from flask import request, redirect, url_for, Blueprint, abort, session
-from peewee import IntegrityError
-
-import features
-
-from app import app, analytics, get_app_url, oauth_login, authentication, url_scheme_and_hostname
-from auth.auth_context import get_authenticated_user
-from auth.decorators import require_session_login
-from data import model
-from data.users.shared import can_create_user
-from endpoints.common import common_login
-from endpoints.web import index, render_page_template_with_routedata
-from endpoints.csrf import csrf_protect, OAUTH_CSRF_TOKEN_NAME, generate_csrf_token
-from oauth.login import OAuthLoginException
-from util.validation import generate_valid_usernames
-from util.request import get_request_ip
-
-logger = logging.getLogger(__name__)
-client = app.config['HTTPCLIENT']
-oauthlogin = Blueprint('oauthlogin', __name__)
-
-oauthlogin_csrf_protect = csrf_protect(OAUTH_CSRF_TOKEN_NAME, 'state', all_methods=True,
- check_header=False)
-
-
-OAuthResult = namedtuple('oauthresult', ['user_obj', 'service_name', 'error_message',
- 'register_redirect', 'requires_verification'])
-
-def _oauthresult(user_obj=None, service_name=None, error_message=None, register_redirect=False,
- requires_verification=False):
- return OAuthResult(user_obj, service_name, error_message, register_redirect,
- requires_verification)
-
-def _get_response(result):
- if result.error_message is not None:
- return _render_ologin_error(result.service_name, result.error_message, result.register_redirect)
-
- return _perform_login(result.user_obj, result.service_name)
-
-def _conduct_oauth_login(auth_system, login_service, lid, lusername, lemail, metadata=None,
- captcha_verified=False):
- """ Conducts login from the result of an OAuth service's login flow and returns
- the status of the login, as well as the followup step. """
- service_id = login_service.service_id()
- service_name = login_service.service_name()
-
- # Check for an existing account *bound to this service*. If found, conduct login of that account
- # and redirect.
- user_obj = model.user.verify_federated_login(service_id, lid)
- if user_obj is not None:
- return _oauthresult(user_obj=user_obj, service_name=service_name)
-
- # If the login service has a bound field name, and we have a defined internal auth type that is
- # not the database, then search for an existing account with that matching field. This allows
- # users to setup SSO while also being backed by something like LDAP.
- bound_field_name = login_service.login_binding_field()
- if auth_system.federated_service is not None and bound_field_name is not None:
- # Perform lookup.
- logger.debug('Got oauth bind field name of "%s"', bound_field_name)
- lookup_value = None
- if bound_field_name == 'sub':
- lookup_value = lid
- elif bound_field_name == 'username':
- lookup_value = lusername
- elif bound_field_name == 'email':
- lookup_value = lemail
-
- if lookup_value is None:
- logger.error('Missing lookup value for OAuth login')
- return _oauthresult(service_name=service_name,
- error_message='Configuration error in this provider')
-
- (user_obj, err) = auth_system.link_user(lookup_value)
- if err is not None:
- logger.debug('%s %s not found: %s', bound_field_name, lookup_value, err)
- msg = '%s %s not found in backing auth system' % (bound_field_name, lookup_value)
- return _oauthresult(service_name=service_name, error_message=msg)
-
- # Found an existing user. Bind their internal auth account to this service as well.
- result = _attach_service(login_service, user_obj, lid, lusername)
- if result.error_message is not None:
- return result
-
- return _oauthresult(user_obj=user_obj, service_name=service_name)
-
- # Otherwise, we need to create a new user account.
- blacklisted_domains = app.config.get('BLACKLISTED_EMAIL_DOMAINS', [])
- if not can_create_user(lemail, blacklisted_domains=blacklisted_domains):
- error_message = 'User creation is disabled. Please contact your administrator'
- return _oauthresult(service_name=service_name, error_message=error_message)
-
- if features.RECAPTCHA and not captcha_verified:
- return _oauthresult(service_name=service_name, requires_verification=True)
-
- # Try to create the user
- try:
- # Generate a valid username.
- new_username = None
- for valid in generate_valid_usernames(lusername):
- if model.user.get_user_or_org(valid):
- continue
-
- new_username = valid
- break
-
- requires_password = auth_system.requires_distinct_cli_password
- prompts = model.user.get_default_user_prompts(features)
- user_obj = model.user.create_federated_user(new_username, lemail, service_id, lid,
- set_password_notification=requires_password,
- metadata=metadata or {},
- confirm_username=features.USERNAME_CONFIRMATION,
- prompts=prompts,
- email_required=features.MAILING)
-
- # Success, tell analytics
- analytics.track(user_obj.username, 'register', {'service': service_name.lower()})
- return _oauthresult(user_obj=user_obj, service_name=service_name)
-
- except model.InvalidEmailAddressException:
- message = ("The e-mail address {0} is already associated "
- "with an existing {1} account. \n"
- "Please log in with your username and password and "
- "associate your {2} account to use it in the future.")
- message = message.format(lemail, app.config['REGISTRY_TITLE_SHORT'], service_name)
- return _oauthresult(service_name=service_name, error_message=message,
- register_redirect=True)
-
- except model.DataModelException as ex:
- return _oauthresult(service_name=service_name, error_message=str(ex))
-
-def _render_ologin_error(service_name, error_message=None, register_redirect=False):
- """ Returns a Flask response indicating an OAuth error. """
-
- user_creation = bool(features.USER_CREATION and features.DIRECT_LOGIN and
- not features.INVITE_ONLY_USER_CREATION)
- error_info = {
- 'reason': 'ologinerror',
- 'service_name': service_name,
- 'error_message': error_message or 'Could not load user data. The token may have expired',
- 'service_url': get_app_url(),
- 'user_creation': user_creation,
- 'register_redirect': register_redirect,
- }
-
- resp = index('', error_info=error_info)
- resp.status_code = 400
- return resp
-
-def _perform_login(user_obj, service_name):
- """ Attempts to login the given user, returning the Flask result of whether the login succeeded.
- """
- success, _ = common_login(user_obj.uuid)
- if success:
- if model.user.has_user_prompts(user_obj):
- return redirect(url_for('web.updateuser'))
- else:
- return redirect(url_for('web.index'))
- else:
- return _render_ologin_error(service_name, 'Could not login. Account may be disabled')
-
-def _attach_service(login_service, user_obj, lid, lusername):
- """ Attaches the given user account to the given service, with the given service user ID and
- service username.
- """
- metadata = {
- 'service_username': lusername,
- }
-
- try:
- model.user.attach_federated_login(user_obj, login_service.service_id(), lid,
- metadata=metadata)
- return _oauthresult(user_obj=user_obj)
- except IntegrityError:
- err = '%s account %s is already attached to a %s account' % (
- login_service.service_name(), lusername, app.config['REGISTRY_TITLE_SHORT'])
- return _oauthresult(service_name=login_service.service_name(), error_message=err)
-
-def _register_service(login_service):
- """ Registers the given login service, adding its callback and attach routes to the blueprint. """
-
- @oauthlogin_csrf_protect
- def callback_func():
- # Check for a callback error.
- error = request.values.get('error', None)
- if error:
- return _render_ologin_error(login_service.service_name(), error)
-
- # Exchange the OAuth code for login information.
- code = request.values.get('code')
- try:
- lid, lusername, lemail = login_service.exchange_code_for_login(app.config, client, code, '')
- except OAuthLoginException as ole:
- logger.exception('Got login exception')
- return _render_ologin_error(login_service.service_name(), str(ole))
-
- # Conduct login.
- metadata = {
- 'service_username': lusername,
- }
-
- # Conduct OAuth login.
- captcha_verified = (int(time.time()) - session.get('captcha_verified', 0)) <= 600
- session['captcha_verified'] = 0
-
- result = _conduct_oauth_login(authentication, login_service, lid, lusername, lemail,
- metadata=metadata, captcha_verified=captcha_verified)
- if result.requires_verification:
- return render_page_template_with_routedata('oauthcaptcha.html',
- recaptcha_site_key=app.config['RECAPTCHA_SITE_KEY'],
- callback_url=request.base_url)
-
- return _get_response(result)
-
-
- @require_session_login
- @oauthlogin_csrf_protect
- def attach_func():
- # Check for a callback error.
- error = request.values.get('error', None)
- if error:
- return _render_ologin_error(login_service.service_name(), error)
-
- # Exchange the OAuth code for login information.
- code = request.values.get('code')
- try:
- lid, lusername, _ = login_service.exchange_code_for_login(app.config, client, code, '/attach')
- except OAuthLoginException as ole:
- return _render_ologin_error(login_service.service_name(), str(ole))
-
- # Conduct attach.
- user_obj = get_authenticated_user()
- result = _attach_service(login_service, user_obj, lid, lusername)
- if result.error_message is not None:
- return _get_response(result)
-
- return redirect(url_for('web.user_view', path=user_obj.username, tab='external'))
-
- def captcha_func():
- recaptcha_response = request.values.get('recaptcha_response', '')
- result = recaptcha2.verify(app.config['RECAPTCHA_SECRET_KEY'],
- recaptcha_response,
- get_request_ip())
-
- if not result['success']:
- abort(400)
-
- # Save that the captcha was verified.
- session['captcha_verified'] = int(time.time())
-
- # Redirect to the normal OAuth flow again, so that the user can now create an account.
- csrf_token = generate_csrf_token(OAUTH_CSRF_TOKEN_NAME)
- login_scopes = login_service.get_login_scopes()
- auth_url = login_service.get_auth_url(url_scheme_and_hostname, '', csrf_token, login_scopes)
- return redirect(auth_url)
-
- @require_session_login
- @oauthlogin_csrf_protect
- def cli_token_func():
- # Check for a callback error.
- error = request.values.get('error', None)
- if error:
- return _render_ologin_error(login_service.service_name(), error)
-
- # Exchange the OAuth code for the ID token.
- code = request.values.get('code')
- try:
- idtoken, _ = login_service.exchange_code_for_tokens(app.config, client, code, '/cli')
- except OAuthLoginException as ole:
- return _render_ologin_error(login_service.service_name(), str(ole))
-
- user_obj = get_authenticated_user()
- return redirect(url_for('web.user_view', path=user_obj.username, tab='settings',
- idtoken=idtoken))
-
- oauthlogin.add_url_rule('/%s/callback/captcha' % login_service.service_id(),
- '%s_oauth_captcha' % login_service.service_id(),
- captcha_func,
- methods=['POST'])
-
- oauthlogin.add_url_rule('/%s/callback' % login_service.service_id(),
- '%s_oauth_callback' % login_service.service_id(),
- callback_func,
- methods=['GET', 'POST'])
-
- oauthlogin.add_url_rule('/%s/callback/attach' % login_service.service_id(),
- '%s_oauth_attach' % login_service.service_id(),
- attach_func,
- methods=['GET', 'POST'])
-
- oauthlogin.add_url_rule('/%s/callback/cli' % login_service.service_id(),
- '%s_oauth_cli' % login_service.service_id(),
- cli_token_func,
- methods=['GET', 'POST'])
-
-# Register the routes for each of the login services.
-for current_service in oauth_login.services:
- _register_service(current_service)
diff --git a/endpoints/oauth/test/test_login.py b/endpoints/oauth/test/test_login.py
deleted file mode 100644
index 12a26e7ee..000000000
--- a/endpoints/oauth/test/test_login.py
+++ /dev/null
@@ -1,222 +0,0 @@
-import pytest
-
-from mock import patch
-
-from data import model, database
-from data.users import get_users_handler, DatabaseUsers
-from endpoints.oauth.login import _conduct_oauth_login
-from oauth.services.github import GithubOAuthService
-from test.test_ldap import mock_ldap
-
-from test.fixtures import *
-
-@pytest.fixture(params=[None, 'username', 'email'])
-def login_service(request, app):
- config = {'GITHUB': {}}
- if request is not None:
- config['GITHUB']['LOGIN_BINDING_FIELD'] = request.param
-
- return GithubOAuthService(config, 'GITHUB')
-
-
-@pytest.fixture(params=['Database', 'LDAP'])
-def auth_system(request):
- return _get_users_handler(request.param)
-
-def _get_users_handler(auth_type):
- config = {}
- config['AUTHENTICATION_TYPE'] = auth_type
- config['LDAP_BASE_DN'] = ['dc=quay', 'dc=io']
- config['LDAP_ADMIN_DN'] = 'uid=testy,ou=employees,dc=quay,dc=io'
- config['LDAP_ADMIN_PASSWD'] = 'password'
- config['LDAP_USER_RDN'] = ['ou=employees']
-
- return get_users_handler(config, None, None)
-
-def test_existing_account(auth_system, login_service):
- login_service_lid = 'someexternaluser'
-
- # Create an existing bound federated user.
- created_user = model.user.create_federated_user('someuser', 'example@example.com',
- login_service.service_id(),
- login_service_lid, False)
- existing_user_count = database.User.select().count()
-
- with mock_ldap():
- result = _conduct_oauth_login(auth_system, login_service,
- login_service_lid, login_service_lid,
- 'example@example.com')
-
- assert result.user_obj == created_user
-
- # Ensure that no addtional users were created.
- current_user_count = database.User.select().count()
- assert current_user_count == existing_user_count
-
-
-def test_new_account_via_database(login_service):
- existing_user_count = database.User.select().count()
- login_service_lid = 'someexternaluser'
- internal_auth = DatabaseUsers()
-
- # Conduct login. Since the external user doesn't (yet) bind to a user in the database,
- # a new user should be created and bound to the external service.
- result = _conduct_oauth_login(internal_auth, login_service, login_service_lid, login_service_lid,
- 'example@example.com')
- assert result.user_obj is not None
-
- current_user_count = database.User.select().count()
- assert current_user_count == existing_user_count + 1
-
- # Find the user and ensure it is bound.
- new_user = model.user.get_user(login_service_lid)
- federated_login = model.user.lookup_federated_login(new_user, login_service.service_id())
- assert federated_login is not None
-
- # Ensure that a notification was created.
- assert list(model.notification.list_notifications(result.user_obj,
- kind_name='password_required'))
-
-@pytest.mark.parametrize('open_creation, invite_only, has_invite, expect_success', [
- # Open creation -> Success!
- (True, False, False, True),
-
- # Open creation + invite only + no invite -> Failure!
- (True, True, False, False),
-
- # Open creation + invite only + invite -> Success!
- (True, True, True, True),
-
- # Close creation -> Failure!
- (False, False, False, False),
-])
-def test_flagged_user_creation(open_creation, invite_only, has_invite, expect_success, login_service):
- login_service_lid = 'someexternaluser'
- email = 'some@example.com'
-
- if has_invite:
- inviter = model.user.get_user('devtable')
- team = model.team.get_organization_team('buynlarge', 'owners')
- model.team.add_or_invite_to_team(inviter, team, email=email)
-
- internal_auth = DatabaseUsers()
-
- with patch('features.USER_CREATION', open_creation):
- with patch('features.INVITE_ONLY_USER_CREATION', invite_only):
- # Conduct login.
- result = _conduct_oauth_login(internal_auth, login_service, login_service_lid, login_service_lid,
- email)
- assert (result.user_obj is not None) == expect_success
- assert (result.error_message is None) == expect_success
-
-@pytest.mark.parametrize('binding_field, lid, lusername, lemail, expected_error', [
- # No binding field + newly seen user -> New unlinked user
- (None, 'someid', 'someunknownuser', 'someemail@example.com', None),
-
- # sub binding field + unknown sub -> Error.
- ('sub', 'someid', 'someuser', 'foo@bar.com',
- 'sub someid not found in backing auth system'),
-
- # username binding field + unknown username -> Error.
- ('username', 'someid', 'someunknownuser', 'foo@bar.com',
- 'username someunknownuser not found in backing auth system'),
-
- # email binding field + unknown email address -> Error.
- ('email', 'someid', 'someuser', 'someemail@example.com',
- 'email someemail@example.com not found in backing auth system'),
-
- # No binding field + newly seen user -> New unlinked user.
- (None, 'someid', 'someuser', 'foo@bar.com', None),
-
- # username binding field + valid username -> fully bound user.
- ('username', 'someid', 'someuser', 'foo@bar.com', None),
-
- # sub binding field + valid sub -> fully bound user.
- ('sub', 'someuser', 'someusername', 'foo@bar.com', None),
-
- # email binding field + valid email -> fully bound user.
- ('email', 'someid', 'someuser', 'foo@bar.com', None),
-
- # username binding field + valid username + invalid email -> fully bound user.
- ('username', 'someid', 'someuser', 'another@email.com', None),
-
- # email binding field + valid email + invalid username -> fully bound user.
- ('email', 'someid', 'someotherusername', 'foo@bar.com', None),
-])
-def test_new_account_via_ldap(binding_field, lid, lusername, lemail, expected_error, app):
- existing_user_count = database.User.select().count()
-
- config = {'GITHUB': {}}
- if binding_field is not None:
- config['GITHUB']['LOGIN_BINDING_FIELD'] = binding_field
-
- external_auth = GithubOAuthService(config, 'GITHUB')
- internal_auth = _get_users_handler('LDAP')
-
- with mock_ldap():
- # Conduct OAuth login.
- result = _conduct_oauth_login(internal_auth, external_auth, lid, lusername, lemail)
- assert result.error_message == expected_error
-
- current_user_count = database.User.select().count()
- if expected_error is None:
- # Ensure that the new user was created and that it is bound to both the
- # external login service and to LDAP (if a binding_field was given).
- assert current_user_count == existing_user_count + 1
- assert result.user_obj is not None
-
- # Check the service bindings.
- external_login = model.user.lookup_federated_login(result.user_obj,
- external_auth.service_id())
- assert external_login is not None
-
- internal_login = model.user.lookup_federated_login(result.user_obj,
- internal_auth.federated_service)
- if binding_field is not None:
- assert internal_login is not None
- else:
- assert internal_login is None
-
- # Ensure that no notification was created.
- assert not list(model.notification.list_notifications(result.user_obj,
- kind_name='password_required'))
- else:
- # Ensure that no addtional users were created.
- assert current_user_count == existing_user_count
-
-
-def test_existing_account_in_ldap(app):
- config = {'GITHUB': {'LOGIN_BINDING_FIELD': 'username'}}
-
- external_auth = GithubOAuthService(config, 'GITHUB')
- internal_auth = _get_users_handler('LDAP')
-
- # Add an existing federated user bound to the LDAP account associated with `someuser`.
- bound_user = model.user.create_federated_user('someuser', 'foo@bar.com',
- internal_auth.federated_service, 'someuser', False)
-
- existing_user_count = database.User.select().count()
-
- with mock_ldap():
- # Conduct OAuth login with the same lid and bound field. This should find the existing LDAP
- # user (via the `username` binding), and then bind Github to it as well.
- result = _conduct_oauth_login(internal_auth, external_auth, bound_user.username,
- bound_user.username, bound_user.email)
- assert result.error_message is None
-
- # Ensure that the same user was returned, and that it is now bound to the Github account
- # as well.
- assert result.user_obj.id == bound_user.id
-
- # Ensure that no additional users were created.
- current_user_count = database.User.select().count()
- assert current_user_count == existing_user_count
-
- # Check the service bindings.
- external_login = model.user.lookup_federated_login(result.user_obj,
- external_auth.service_id())
- assert external_login is not None
-
- internal_login = model.user.lookup_federated_login(result.user_obj,
- internal_auth.federated_service)
- assert internal_login is not None
diff --git a/endpoints/oauthlogin.py b/endpoints/oauthlogin.py
new file mode 100644
index 000000000..17cb6da20
--- /dev/null
+++ b/endpoints/oauthlogin.py
@@ -0,0 +1,371 @@
+import logging
+import requests
+
+from flask import request, redirect, url_for, Blueprint
+from flask_login import current_user
+from peewee import IntegrityError
+
+import features
+
+from app import app, analytics, get_app_url, github_login, google_login, dex_login
+from auth.process import require_session_login
+from data import model
+from endpoints.common import common_login, route_show_if
+from endpoints.web import index
+from endpoints.csrf import csrf_protect, OAUTH_CSRF_TOKEN_NAME
+from util.security.jwtutil import decode, InvalidTokenError
+from util.validation import generate_valid_usernames
+
+logger = logging.getLogger(__name__)
+client = app.config['HTTPCLIENT']
+oauthlogin = Blueprint('oauthlogin', __name__)
+
+oauthlogin_csrf_protect = csrf_protect(OAUTH_CSRF_TOKEN_NAME, 'state', all_methods=True)
+
+def render_ologin_error(service_name, error_message=None, register_redirect=False):
+ user_creation = bool(features.USER_CREATION and features.DIRECT_LOGIN)
+ error_info = {
+ 'reason': 'ologinerror',
+ 'service_name': service_name,
+ 'error_message': error_message or 'Could not load user data. The token may have expired',
+ 'service_url': get_app_url(),
+ 'user_creation': user_creation,
+ 'register_redirect': register_redirect,
+ }
+
+ resp = index('', error_info=error_info)
+ resp.status_code = 400
+ return resp
+
+
+def get_user(service, token):
+ token_param = {
+ 'access_token': token,
+ 'alt': 'json',
+ }
+ got_user = client.get(service.user_endpoint(), params=token_param)
+ if got_user.status_code != requests.codes.ok:
+ return {}
+
+ return got_user.json()
+
+
+def conduct_oauth_login(service, user_id, username, email, metadata=None):
+ service_name = service.service_name()
+ to_login = model.user.verify_federated_login(service_name.lower(), user_id)
+ if not to_login:
+ # See if we can create a new user.
+ if not features.USER_CREATION:
+ error_message = 'User creation is disabled. Please contact your administrator'
+ return render_ologin_error(service_name, error_message)
+
+ # Try to create the user
+ try:
+ new_username = None
+ for valid in generate_valid_usernames(username):
+ if model.user.get_user_or_org(valid):
+ continue
+
+ new_username = valid
+ break
+
+ prompts = model.user.get_default_user_prompts(features)
+ to_login = model.user.create_federated_user(new_username, email, service_name.lower(),
+ user_id, set_password_notification=True,
+ metadata=metadata or {},
+ prompts=prompts)
+
+ # Success, tell analytics
+ analytics.track(to_login.username, 'register', {'service': service_name.lower()})
+
+ except model.InvalidEmailAddressException:
+ message = "The e-mail address %s is already associated " % (email, )
+ message = message + "with an existing %s account." % (app.config['REGISTRY_TITLE_SHORT'], )
+ message = message + "\nPlease log in with your username and password and "
+ message = message + "associate your %s account to use it in the future." % (service_name, )
+
+ return render_ologin_error(service_name, message, register_redirect=True)
+
+ except model.DataModelException as ex:
+ return render_ologin_error(service_name, ex.message)
+
+ if common_login(to_login):
+ if model.user.has_user_prompts(to_login):
+ return redirect(url_for('web.updateuser'))
+ else:
+ return redirect(url_for('web.index'))
+
+ return render_ologin_error(service_name)
+
+
+def get_email_username(user_data):
+ username = user_data['email']
+ at = username.find('@')
+ if at > 0:
+ username = username[0:at]
+
+ return username
+
+
+@oauthlogin.route('/google/callback', methods=['GET'])
+@route_show_if(features.GOOGLE_LOGIN)
+@oauthlogin_csrf_protect
+def google_oauth_callback():
+ error = request.args.get('error', None)
+ if error:
+ return render_ologin_error('Google', error)
+
+ code = request.args.get('code')
+ token = google_login.exchange_code_for_token(app.config, client, code, form_encode=True)
+ if token is None:
+ return render_ologin_error('Google')
+
+ user_data = get_user(google_login, token)
+ if not user_data or not user_data.get('id', None) or not user_data.get('email', None):
+ return render_ologin_error('Google')
+
+ if not user_data.get('verified_email', False):
+ return render_ologin_error(
+ 'Google',
+ 'A verified e-mail address is required for login. Please verify your ' +
+ 'e-mail address in Google and try again.',
+ )
+
+ username = get_email_username(user_data)
+ metadata = {
+ 'service_username': user_data['email']
+ }
+
+ return conduct_oauth_login(google_login, user_data['id'], username, user_data['email'],
+ metadata=metadata)
+
+
+@oauthlogin.route('/github/callback', methods=['GET'])
+@route_show_if(features.GITHUB_LOGIN)
+@oauthlogin_csrf_protect
+def github_oauth_callback():
+ error = request.args.get('error', None)
+ if error:
+ return render_ologin_error('GitHub', error)
+
+ # Exchange the OAuth code.
+ code = request.args.get('code')
+ token = github_login.exchange_code_for_token(app.config, client, code)
+ if token is None:
+ return render_ologin_error('GitHub')
+
+ # Retrieve the user's information.
+ user_data = get_user(github_login, token)
+ if not user_data or 'login' not in user_data:
+ return render_ologin_error('GitHub')
+
+ username = user_data['login']
+ github_id = user_data['id']
+
+ v3_media_type = {
+ 'Accept': 'application/vnd.github.v3'
+ }
+
+ token_param = {
+ 'access_token': token,
+ }
+
+ # Retrieve the user's orgnizations (if organization filtering is turned on)
+ if github_login.allowed_organizations() is not None:
+ get_orgs = client.get(github_login.orgs_endpoint(), params=token_param,
+ headers={'Accept': 'application/vnd.github.moondragon+json'})
+
+ organizations = set([org.get('login').lower() for org in get_orgs.json()])
+ matching_organizations = organizations & set(github_login.allowed_organizations())
+ if not matching_organizations:
+ err = """You are not a member of an allowed GitHub organization.
+ Please contact your system administrator if you believe this is in error."""
+ return render_ologin_error('GitHub', err)
+
+ # Find the e-mail address for the user: we will accept any email, but we prefer the primary
+ get_email = client.get(github_login.email_endpoint(), params=token_param,
+ headers=v3_media_type)
+ if get_email.status_code / 100 != 2:
+ return render_ologin_error('GitHub')
+
+ found_email = None
+ for user_email in get_email.json():
+ if not github_login.is_enterprise() and not user_email['verified']:
+ continue
+
+ found_email = user_email['email']
+ if user_email['primary']:
+ break
+
+ if found_email is None:
+ err = 'There is no verified e-mail address attached to the GitHub account.'
+ return render_ologin_error('GitHub', err)
+
+ metadata = {
+ 'service_username': username
+ }
+
+ return conduct_oauth_login(github_login, github_id, username, found_email, metadata=metadata)
+
+
+@oauthlogin.route('/google/callback/attach', methods=['GET'])
+@route_show_if(features.GOOGLE_LOGIN)
+@require_session_login
+@oauthlogin_csrf_protect
+def google_oauth_attach():
+ code = request.args.get('code')
+ token = google_login.exchange_code_for_token(app.config, client, code,
+ redirect_suffix='/attach', form_encode=True)
+ if token is None:
+ return render_ologin_error('Google')
+
+ user_data = get_user(google_login, token)
+ if not user_data or not user_data.get('id', None):
+ return render_ologin_error('Google')
+
+ if not user_data.get('verified_email', False):
+ return render_ologin_error(
+ 'Google',
+ 'A verified e-mail address is required for login. Please verify your ' +
+ 'e-mail address in Google and try again.',
+ )
+
+ google_id = user_data['id']
+ user_obj = current_user.db_user()
+
+ username = get_email_username(user_data)
+ metadata = {
+ 'service_username': user_data['email']
+ }
+
+ try:
+ model.user.attach_federated_login(user_obj, 'google', google_id, metadata=metadata)
+ except IntegrityError:
+ err = 'Google account %s is already attached to a %s account' % (
+ username, app.config['REGISTRY_TITLE_SHORT'])
+ return render_ologin_error('Google', err)
+
+ return redirect(url_for('web.user_view', path=user_obj.username, tab='external'))
+
+
+@oauthlogin.route('/github/callback/attach', methods=['GET'])
+@route_show_if(features.GITHUB_LOGIN)
+@require_session_login
+@oauthlogin_csrf_protect
+def github_oauth_attach():
+ code = request.args.get('code')
+ token = github_login.exchange_code_for_token(app.config, client, code)
+ if token is None:
+ return render_ologin_error('GitHub')
+
+ user_data = get_user(github_login, token)
+ if not user_data:
+ return render_ologin_error('GitHub')
+
+ github_id = user_data['id']
+ user_obj = current_user.db_user()
+
+ username = user_data['login']
+ metadata = {
+ 'service_username': username
+ }
+
+ try:
+ model.user.attach_federated_login(user_obj, 'github', github_id, metadata=metadata)
+ except IntegrityError:
+ err = 'Github account %s is already attached to a %s account' % (
+ username, app.config['REGISTRY_TITLE_SHORT'])
+
+ return render_ologin_error('GitHub', err)
+
+ return redirect(url_for('web.user_view', path=user_obj.username, tab='external'))
+
+
+def decode_user_jwt(token, oidc_provider):
+ try:
+ return decode(token, oidc_provider.get_public_key(), algorithms=['RS256'],
+ audience=oidc_provider.client_id(),
+ issuer=oidc_provider.issuer)
+ except InvalidTokenError:
+ # Public key may have expired. Try to retrieve an updated public key and use it to decode.
+ return decode(token, oidc_provider.get_public_key(force_refresh=True), algorithms=['RS256'],
+ audience=oidc_provider.client_id(),
+ issuer=oidc_provider.issuer)
+
+
+@oauthlogin.route('/dex/callback', methods=['GET', 'POST'])
+@route_show_if(features.DEX_LOGIN)
+@oauthlogin_csrf_protect
+def dex_oauth_callback():
+ error = request.values.get('error', None)
+ if error:
+ return render_ologin_error(dex_login.public_title, error)
+
+ code = request.values.get('code')
+ if not code:
+ return render_ologin_error(dex_login.public_title, 'Missing OAuth code')
+
+ token = dex_login.exchange_code_for_token(app.config, client, code, client_auth=True,
+ form_encode=True)
+ if token is None:
+ return render_ologin_error(dex_login.public_title)
+
+ try:
+ payload = decode_user_jwt(token, dex_login)
+ except InvalidTokenError:
+ logger.exception('Exception when decoding returned JWT')
+ return render_ologin_error(
+ dex_login.public_title,
+ 'Could not decode response. Please contact your system administrator about this error.',
+ )
+
+ username = get_email_username(payload)
+ metadata = {}
+
+ dex_id = payload['sub']
+ email_address = payload['email']
+
+ if not payload.get('email_verified', False):
+ return render_ologin_error(
+ dex_login.public_title,
+ 'A verified e-mail address is required for login. Please verify your ' +
+ 'e-mail address in %s and try again.' % dex_login.public_title,
+ )
+
+
+ return conduct_oauth_login(dex_login, dex_id, username, email_address,
+ metadata=metadata)
+
+
+@oauthlogin.route('/dex/callback/attach', methods=['GET', 'POST'])
+@route_show_if(features.DEX_LOGIN)
+@require_session_login
+@oauthlogin_csrf_protect
+def dex_oauth_attach():
+ code = request.args.get('code')
+ token = dex_login.exchange_code_for_token(app.config, client, code, redirect_suffix='/attach',
+ client_auth=True, form_encode=True)
+ if token is None:
+ return render_ologin_error(dex_login.public_title)
+
+ try:
+ payload = decode_user_jwt(token, dex_login)
+ except InvalidTokenError:
+ logger.exception('Exception when decoding returned JWT')
+ return render_ologin_error(
+ dex_login.public_title,
+ 'Could not decode response. Please contact your system administrator about this error.',
+ )
+
+ user_obj = current_user.db_user()
+ dex_id = payload['sub']
+ metadata = {}
+
+ try:
+ model.user.attach_federated_login(user_obj, 'dex', dex_id, metadata=metadata)
+ except IntegrityError:
+ err = '%s account is already attached to a %s account' % (dex_login.public_title,
+ app.config['REGISTRY_TITLE_SHORT'])
+ return render_ologin_error(dex_login.public_title, err)
+
+ return redirect(url_for('web.user_view', path=user_obj.username, tab='external'))
diff --git a/endpoints/realtime.py b/endpoints/realtime.py
index 9112b8146..cc113da0c 100644
--- a/endpoints/realtime.py
+++ b/endpoints/realtime.py
@@ -5,7 +5,7 @@ from flask import request, Blueprint, abort, Response
from flask_login import current_user
from app import userevents
-from auth.decorators import require_session_login
+from auth.process import require_session_login
from data.userevent import CannotReadUserEventsException
@@ -53,17 +53,12 @@ def user_test():
@require_session_login
def user_subscribe():
def wrapper(listener):
- logger.debug('Beginning streaming of user events')
- try:
- yield 'data: %s\n\n' % json.dumps({})
+ yield 'data: %s\n\n' % json.dumps({})
- for event_id, data in listener.event_stream():
- message = {'event': event_id, 'data': data}
- json_string = json.dumps(message)
- yield 'data: %s\n\n' % json_string
- finally:
- logger.debug('Closing listener due to exception')
- listener.stop()
+ for event_id, data in listener.event_stream():
+ message = {'event': event_id, 'data': data}
+ json_string = json.dumps(message)
+ yield 'data: %s\n\n' % json_string
events = request.args.get('events', '').split(',')
if not events:
@@ -74,10 +69,4 @@ def user_subscribe():
except CannotReadUserEventsException:
abort(504)
- def on_close():
- logger.debug('Closing listener due to response close')
- listener.stop()
-
- r = Response(wrapper(listener), mimetype="text/event-stream")
- r.call_on_close(on_close)
- return r
+ return Response(wrapper(listener), mimetype="text/event-stream")
diff --git a/endpoints/secscan.py b/endpoints/secscan.py
index 1f49bf23c..6ce803e1e 100644
--- a/endpoints/secscan.py
+++ b/endpoints/secscan.py
@@ -5,7 +5,7 @@ import features
from app import secscan_notification_queue
from flask import request, make_response, Blueprint, abort
-from endpoints.decorators import route_show_if, anon_allowed
+from endpoints.common import route_show_if
logger = logging.getLogger(__name__)
secscan = Blueprint('secscan', __name__)
@@ -25,9 +25,3 @@ def secscan_notification():
secscan_notification_queue.put(name, json.dumps(notification))
return make_response('Okay')
-
-
-@secscan.route('/_internal_ping')
-@anon_allowed
-def internal_ping():
- return make_response('true', 200)
diff --git a/endpoints/test/__init__.py b/endpoints/test/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/endpoints/test/shared.py b/endpoints/test/shared.py
deleted file mode 100644
index fa6430445..000000000
--- a/endpoints/test/shared.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import datetime
-import json
-import base64
-
-from contextlib import contextmanager
-from data import model
-
-from flask import g
-from flask_principal import Identity
-
-CSRF_TOKEN_KEY = '_csrf_token'
-
-@contextmanager
-def client_with_identity(auth_username, client):
- with client.session_transaction() as sess:
- if auth_username and auth_username is not None:
- loaded = model.user.get_user(auth_username)
- sess['user_id'] = loaded.uuid
- sess['login_time'] = datetime.datetime.now()
- else:
- sess['user_id'] = 'anonymous'
-
- yield client
-
- with client.session_transaction() as sess:
- sess['user_id'] = None
- sess['login_time'] = None
- sess[CSRF_TOKEN_KEY] = None
-
-
-@contextmanager
-def toggle_feature(name, enabled):
- """ Context manager which temporarily toggles a feature. """
- import features
- previous_value = getattr(features, name)
- setattr(features, name, enabled)
- yield
- setattr(features, name, previous_value)
-
-
-def add_csrf_param(client, params):
- """ Returns a params dict with the CSRF parameter added. """
- params = params or {}
-
- with client.session_transaction() as sess:
- params[CSRF_TOKEN_KEY] = 'sometoken'
- sess[CSRF_TOKEN_KEY] = 'sometoken'
-
- return params
-
-
-def gen_basic_auth(username, password):
- """ Generates a basic auth header. """
- return 'Basic ' + base64.b64encode("%s:%s" % (username, password))
-
-
-def conduct_call(client, resource, url_for, method, params, body=None, expected_code=200,
- headers=None, raw_body=None):
- """ Conducts a call to a Flask endpoint. """
- params = add_csrf_param(client, params)
-
- final_url = url_for(resource, **params)
-
- headers = headers or {}
- headers.update({"Content-Type": "application/json"})
-
- if body is not None:
- body = json.dumps(body)
-
- if raw_body is not None:
- body = raw_body
-
- # Required for anonymous calls to not exception.
- g.identity = Identity(None, 'none')
-
- rv = client.open(final_url, method=method, data=body, headers=headers)
- msg = '%s %s: got %s expected: %s | %s' % (method, final_url, rv.status_code, expected_code,
- rv.data)
- assert rv.status_code == expected_code, msg
- return rv
diff --git a/endpoints/test/test_anon_checked.py b/endpoints/test/test_anon_checked.py
deleted file mode 100644
index 94cc9b9aa..000000000
--- a/endpoints/test/test_anon_checked.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import pytest
-
-from app import app
-from endpoints.v1 import v1_bp
-from endpoints.v2 import v2_bp
-from endpoints.verbs import verbs
-
-@pytest.mark.parametrize('blueprint', [
- v2_bp,
- v1_bp,
- verbs,
-])
-def test_verify_blueprint(blueprint):
- class Checker(object):
- def __init__(self):
- self.first_registration = True
- self.app = app
-
- def add_url_rule(self, rule, endpoint, view_function, methods=None):
- result = ('__anon_protected' in dir(view_function) or
- '__anon_allowed' in dir(view_function))
- error_message = ('Missing anonymous access protection decorator on function ' +
- '%s under blueprint %s' % (endpoint, blueprint.name))
- assert result, error_message
-
- for deferred_function in blueprint.deferred_functions:
- deferred_function(Checker())
diff --git a/endpoints/test/test_building.py b/endpoints/test/test_building.py
deleted file mode 100644
index 222149785..000000000
--- a/endpoints/test/test_building.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import pytest
-
-from data import model
-from buildtrigger.triggerutil import raise_if_skipped_build, SkipRequestException
-from endpoints.building import (start_build, PreparedBuild, MaximumBuildsQueuedException,
- BuildTriggerDisabledException)
-
-from test.fixtures import *
-
-def test_maximum_builds(app):
- # Change the maximum number of builds to 1.
- user = model.user.create_user('foobar', 'password', 'foo@example.com')
- user.maximum_queued_builds_count = 1
- user.save()
-
- repo = model.repository.create_repository('foobar', 'somerepo', user)
-
- # Try to queue a build; should succeed.
- prepared_build = PreparedBuild()
- prepared_build.build_name = 'foo'
- prepared_build.is_manual = True
- prepared_build.dockerfile_id = 'foobar'
- prepared_build.archive_url = 'someurl'
- prepared_build.tags = ['latest']
- prepared_build.subdirectory = '/'
- prepared_build.context = '/'
- prepared_build.metadata = {}
-
- start_build(repo, prepared_build)
-
- # Try to queue a second build; should fail.
- with pytest.raises(MaximumBuildsQueuedException):
- start_build(repo, prepared_build)
-
-
-def test_start_build_disabled_trigger(app):
- trigger = model.build.list_build_triggers('devtable', 'building')[0]
- trigger.enabled = False
- trigger.save()
-
- build = PreparedBuild(trigger=trigger)
-
- with pytest.raises(BuildTriggerDisabledException):
- start_build(trigger.repository, build)
-
-
-@pytest.mark.parametrize('ref, expected_tags', [
- ('ref/heads/somebranch', ['somebranch']),
- ('ref/heads/master', ['master', 'latest']),
-
- ('ref/tags/somebranch', ['somebranch']),
- ('ref/tags/master', ['master', 'latest']),
-
- ('ref/heads/slash/branch', ['slash_branch']),
- ('ref/tags/slash/tag', ['slash_tag']),
-
- ('ref/heads/foobar#2', ['foobar_2']),
-])
-def test_tags_for_ref(ref, expected_tags):
- prepared = PreparedBuild()
- prepared.tags_from_ref(ref, default_branch='master')
- assert set(prepared._tags) == set(expected_tags)
-
-
-@pytest.mark.parametrize('metadata, config', [
- ({}, {}),
- pytest.param({'ref': 'ref/heads/master'}, {'branchtag_regex': 'nothing'}, id='branchtag regex'),
- pytest.param({
- 'ref': 'ref/heads/master',
- 'commit_info': {
- 'message': '[skip build]',
- },
- }, {}, id='commit message'),
-])
-def test_skip(metadata, config):
- prepared = PreparedBuild()
- prepared.metadata = metadata
- config = config
-
- with pytest.raises(SkipRequestException):
- raise_if_skipped_build(prepared, config)
-
-
-def test_does_not_skip():
- prepared = PreparedBuild()
- prepared.metadata = {
- 'ref': 'ref/heads/master',
- 'commit_info': {
- 'message': 'some cool message',
- },
- }
-
- config = {
- 'branchtag_regex': '(master)|(heads/master)',
- }
-
- raise_if_skipped_build(prepared, config)
diff --git a/endpoints/test/test_common.py b/endpoints/test/test_common.py
deleted file mode 100644
index d99033cde..000000000
--- a/endpoints/test/test_common.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import pytest
-
-from endpoints.common import common_login
-from endpoints.csrf import QUAY_CSRF_UPDATED_HEADER_NAME
-
-from test.fixtures import *
-from endpoints.common_models_pre_oci import pre_oci_model as model
-
-@pytest.mark.parametrize('username, expect_success', [
- # Valid users.
- ('devtable', True),
- ('public', True),
-
- # Org.
- ('buynlarge', False),
-
- # Robot.
- ('devtable+dtrobot', False),
-
- # Unverified user.
- ('unverified', False),
-])
-def test_common_login(username, expect_success, app):
- uuid = model.get_namespace_uuid(username)
- with app.app_context():
- success, headers = common_login(uuid)
- assert success == expect_success
- if success:
- assert QUAY_CSRF_UPDATED_HEADER_NAME in headers
diff --git a/endpoints/test/test_decorators.py b/endpoints/test/test_decorators.py
deleted file mode 100644
index e7866e25d..000000000
--- a/endpoints/test/test_decorators.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from data import model
-from endpoints.api import api
-from endpoints.api.repository import Repository
-from endpoints.test.shared import conduct_call
-from test.fixtures import *
-
-
-@pytest.mark.parametrize('user_agent, include_header, expected_code', [
- ('curl/whatever', True, 200),
- ('curl/whatever', False, 200),
-
- ('Mozilla/whatever', True, 200),
- ('Mozilla/5.0', True, 200),
- ('Mozilla/5.0 (Windows NT 5.1; Win64; x64)', False, 400),
-])
-def test_require_xhr_from_browser(user_agent, include_header, expected_code, app, client):
- # Create a public repo with a dot in its name.
- user = model.user.get_user('devtable')
- model.repository.create_repository('devtable', 'somerepo.bat', user, 'public')
-
- # Retrieve the repository and ensure we either allow it through or fail, depending on the
- # user agent and header.
- params = {
- 'repository': 'devtable/somerepo.bat'
- }
-
- headers = {
- 'User-Agent': user_agent,
- }
-
- if include_header:
- headers['X-Requested-With'] = 'XMLHttpRequest'
-
- conduct_call(client, Repository, api.url_for, 'GET', params, headers=headers,
- expected_code=expected_code)
diff --git a/endpoints/test/test_webhooks.py b/endpoints/test/test_webhooks.py
deleted file mode 100644
index 1061f106b..000000000
--- a/endpoints/test/test_webhooks.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import base64
-import pytest
-
-from flask import url_for
-
-from data import model
-from endpoints.test.shared import conduct_call
-from test.fixtures import *
-
-def test_start_build_disabled_trigger(app, client):
- trigger = model.build.list_build_triggers('devtable', 'building')[0]
- trigger.enabled = False
- trigger.save()
-
- params = {
- 'trigger_uuid': trigger.uuid,
- }
-
- headers = {
- 'Authorization': 'Basic ' + base64.b64encode('devtable:password'),
- }
-
- conduct_call(client, 'webhooks.build_trigger_webhook', url_for, 'POST', params, None, 400,
- headers=headers)
diff --git a/endpoints/trackhelper.py b/endpoints/trackhelper.py
new file mode 100644
index 000000000..0aa66cefa
--- /dev/null
+++ b/endpoints/trackhelper.py
@@ -0,0 +1,92 @@
+import logging
+import random
+
+from urlparse import urlparse
+
+from flask import request
+
+from app import analytics, userevents
+from data import model
+from auth.registry_jwt_auth import get_granted_entity
+from auth.auth_context import (get_authenticated_user, get_validated_token,
+ get_validated_oauth_token)
+
+logger = logging.getLogger(__name__)
+
+def track_and_log(event_name, repo_obj, analytics_name=None, analytics_sample=1, **kwargs):
+ repo_name = repo_obj.name
+ namespace_name = repo_obj.namespace_name,
+ metadata = {
+ 'repo': repo_name,
+ 'namespace': namespace_name,
+ }
+ metadata.update(kwargs)
+
+ analytics_id = 'anonymous'
+
+ authenticated_oauth_token = get_validated_oauth_token()
+ authenticated_user = get_authenticated_user()
+ authenticated_token = get_validated_token() if not authenticated_user else None
+
+ if not authenticated_user and not authenticated_token and not authenticated_oauth_token:
+ entity = get_granted_entity()
+ if entity:
+ authenticated_user = entity.user
+ authenticated_token = entity.token
+ authenticated_oauth_token = entity.oauth
+
+ logger.debug('Logging the %s to Mixpanel and the log system', event_name)
+ if authenticated_oauth_token:
+ metadata['oauth_token_id'] = authenticated_oauth_token.id
+ metadata['oauth_token_application_id'] = authenticated_oauth_token.application.client_id
+ metadata['oauth_token_application'] = authenticated_oauth_token.application.name
+ analytics_id = 'oauth:{0}'.format(authenticated_oauth_token.id)
+ elif authenticated_user:
+ metadata['username'] = authenticated_user.username
+ analytics_id = authenticated_user.username
+ elif authenticated_token:
+ metadata['token'] = authenticated_token.friendly_name
+ metadata['token_code'] = authenticated_token.code
+
+ if authenticated_token.kind:
+ metadata['token_type'] = authenticated_token.kind.name
+
+ analytics_id = 'token:{0}'.format(authenticated_token.code)
+ else:
+ metadata['public'] = True
+ analytics_id = 'anonymous'
+
+ # Publish the user event (if applicable)
+ logger.debug('Checking publishing %s to the user events system', event_name)
+ if authenticated_user and not authenticated_user.robot:
+ logger.debug('Publishing %s to the user events system', event_name)
+ user_event_data = {
+ 'action': event_name,
+ 'repository': repo_name,
+ 'namespace': namespace_name,
+ }
+
+ event = userevents.get_event(authenticated_user.username)
+ event.publish_event_data('docker-cli', user_event_data)
+
+ # Save the action to mixpanel.
+ if random.random() < analytics_sample:
+ if analytics_name is None:
+ analytics_name = event_name
+
+ logger.debug('Logging the %s to Mixpanel', analytics_name)
+
+ request_parsed = urlparse(request.url_root)
+ extra_params = {
+ 'repository': '%s/%s' % (namespace_name, repo_name),
+ 'user-agent': request.user_agent.string,
+ 'hostname': request_parsed.hostname,
+ }
+
+ analytics.track(analytics_id, analytics_name, extra_params)
+
+ # Log the action to the database.
+ logger.debug('Logging the %s to logs system', event_name)
+ model.log.log_action(event_name, namespace_name, performer=authenticated_user,
+ ip=request.remote_addr, metadata=metadata, repository=repo_obj)
+ logger.debug('Track and log of %s complete', event_name)
diff --git a/endpoints/v1/__init__.py b/endpoints/v1/__init__.py
index 2248222d2..18ef430c4 100644
--- a/endpoints/v1/__init__.py
+++ b/endpoints/v1/__init__.py
@@ -1,21 +1,14 @@
-import logging
+from flask import Blueprint, make_response
-from functools import wraps
-
-from flask import Blueprint, make_response, jsonify
-
-import features
-
-from app import metric_queue, app
-from data.readreplica import ReadOnlyModeException
+from app import metric_queue, license_validator
from endpoints.decorators import anon_protect, anon_allowed
from util.metrics.metricqueue import time_blueprint
-from util.http import abort
+
v1_bp = Blueprint('v1', __name__)
+license_validator.enforce_license_before_request(v1_bp)
time_blueprint(v1_bp, metric_queue)
-logger = logging.getLogger(__name__)
# Note: This is *not* part of the Docker index spec. This is here for our own health check,
# since we have nginx handle the _ping below.
@@ -35,42 +28,6 @@ def ping():
return response
-@v1_bp.app_errorhandler(ReadOnlyModeException)
-def handle_readonly(ex):
- response = jsonify({'message': 'System is currently read-only. Pulls will succeed but all ' +
- 'write operations are currently suspended.',
- 'is_readonly': True})
- response.status_code = 503
- return response
-
-
-def check_v1_push_enabled(namespace_name_kwarg='namespace_name'):
- """ Decorator which checks if V1 push is enabled for the current namespace. The first argument
- to the wrapped function must be the namespace name or there must be a kwarg with the
- name `namespace_name`.
- """
- def wrapper(wrapped):
- @wraps(wrapped)
- def decorated(*args, **kwargs):
- if namespace_name_kwarg in kwargs:
- namespace_name = kwargs[namespace_name_kwarg]
- else:
- namespace_name = args[0]
-
- if features.RESTRICTED_V1_PUSH:
- whitelist = app.config.get('V1_PUSH_WHITELIST') or []
- logger.debug('V1 push is restricted to whitelist: %s', whitelist)
- if namespace_name not in whitelist:
- abort(405,
- message=('V1 push support has been deprecated. To enable for this ' +
- 'namespace, please contact support.'))
-
- return wrapped(*args, **kwargs)
- return decorated
- return wrapper
-
-
-from endpoints.v1 import (
- index,
- registry,
- tag,)
+from endpoints.v1 import index
+from endpoints.v1 import registry
+from endpoints.v1 import tag
diff --git a/endpoints/v1/index.py b/endpoints/v1/index.py
index 3030b20e8..c4f815f98 100644
--- a/endpoints/v1/index.py
+++ b/endpoints/v1/index.py
@@ -6,24 +6,21 @@ from functools import wraps
from flask import request, make_response, jsonify, session
-from app import userevents, metric_queue, storage, docker_v2_signing_key
-from auth.auth_context import get_authenticated_context, get_authenticated_user
-from auth.credentials import validate_credentials, CredentialKind
-from auth.decorators import process_auth
-from auth.permissions import (
- ModifyRepositoryPermission, UserAdminPermission, ReadRepositoryPermission,
- CreateRepositoryPermission, repository_read_grant, repository_write_grant)
-from auth.signedgrant import generate_signed_token
-from data import model
-from data.registry_model import registry_model
-from data.registry_model.manifestbuilder import create_manifest_builder, lookup_manifest_builder
-from endpoints.decorators import (anon_protect, anon_allowed, parse_repository_name,
- check_repository_state, check_readonly)
-from endpoints.v1 import v1_bp, check_v1_push_enabled
-from notifications import spawn_notification
-from util.audit import track_and_log
+from data.interfaces.v1 import pre_oci_model as model
+from app import authentication, userevents, metric_queue
+from auth.auth_context import get_authenticated_user, get_validated_token, get_validated_oauth_token
+from auth.permissions import (ModifyRepositoryPermission, UserAdminPermission,
+ ReadRepositoryPermission, CreateRepositoryPermission,
+ repository_read_grant, repository_write_grant)
+from auth.process import process_auth, generate_signed_token
from util.http import abort
from util.names import REPOSITORY_NAME_REGEX
+from endpoints.common import parse_repository_name
+from endpoints.v1 import v1_bp
+from endpoints.trackhelper import track_and_log
+from endpoints.notificationhelper import spawn_notification
+from endpoints.decorators import anon_protect, anon_allowed
+
logger = logging.getLogger(__name__)
@@ -33,18 +30,6 @@ class GrantType(object):
WRITE_REPOSITORY = 'write'
-def ensure_namespace_enabled(f):
- @wraps(f)
- def wrapper(namespace_name, repo_name, *args, **kwargs):
- namespace = model.user.get_namespace_user(namespace_name)
- is_namespace_enabled = namespace is not None and namespace.enabled
- if not is_namespace_enabled:
- abort(400, message='Namespace is disabled. Please contact your system administrator.')
-
- return f(namespace_name, repo_name, *args, **kwargs)
- return wrapper
-
-
def generate_headers(scope=GrantType.READ_REPOSITORY, add_grant_for_status=None):
def decorator_method(f):
@wraps(f)
@@ -80,16 +65,13 @@ def generate_headers(scope=GrantType.READ_REPOSITORY, add_grant_for_status=None)
response.headers['X-Docker-Token'] = signature
return response
-
return wrapper
-
return decorator_method
@v1_bp.route('/users', methods=['POST'])
@v1_bp.route('/users/', methods=['POST'])
@anon_allowed
-@check_readonly
def create_user():
user_data = request.get_json()
if not user_data or not 'username' in user_data:
@@ -101,32 +83,34 @@ def create_user():
# UGH! we have to use this response when the login actually worked, in order
# to get the CLI to try again with a get, and then tell us login succeeded.
success = make_response('"Username or email already exists"', 400)
- result, kind = validate_credentials(username, password)
- if not result.auth_valid:
- if kind == CredentialKind.token:
- abort(400, 'Invalid access token.', issue='invalid-access-token')
- if kind == CredentialKind.robot:
- abort(400, 'Invalid robot account or password.', issue='robot-login-failure')
+ if username == '$token':
+ if model.load_token(password):
+ return success
+ abort(400, 'Invalid access token.', issue='invalid-access-token')
- if kind == CredentialKind.oauth_token:
- abort(400, 'Invalid oauth access token.', issue='invalid-oauth-access-token')
+ elif username == '$oauthtoken':
+ if model.validate_oauth_token(password):
+ return success
+ abort(400, 'Invalid oauth access token.', issue='invalid-oauth-access-token')
- if kind == CredentialKind.user:
- # Mark that the login failed.
- event = userevents.get_event(username)
- event.publish_event_data('docker-cli', {'action': 'loginfailure'})
- abort(400, result.error_message, issue='login-failure')
+ elif '+' in username:
+ if model.verify_robot(username, password):
+ return success
+ abort(400, 'Invalid robot account or password.', issue='robot-login-failure')
- # Default case: Just fail.
- abort(400, result.error_message, issue='login-failure')
-
- if result.has_nonrobot_user:
+ (verified, error_message) = authentication.verify_and_link_user(username, password,
+ basic_auth=True)
+ if verified:
# Mark that the user was logged in.
event = userevents.get_event(username)
event.publish_event_data('docker-cli', {'action': 'login'})
-
- return success
+ return success
+ else:
+ # Mark that the login failed.
+ event = userevents.get_event(username)
+ event.publish_event_data('docker-cli', {'action': 'loginfailure'})
+ abort(400, error_message, issue='login-failure')
@v1_bp.route('/users', methods=['GET'])
@@ -134,20 +118,27 @@ def create_user():
@process_auth
@anon_allowed
def get_user():
- context = get_authenticated_context()
- if not context or context.is_anonymous:
- abort(404)
-
- return jsonify({
- 'username': context.credential_username,
- 'email': None,
- })
+ if get_validated_oauth_token():
+ return jsonify({
+ 'username': '$oauthtoken',
+ 'email': None,
+ })
+ elif get_authenticated_user():
+ return jsonify({
+ 'username': get_authenticated_user().username,
+ 'email': get_authenticated_user().email,
+ })
+ elif get_validated_token():
+ return jsonify({
+ 'username': '$token',
+ 'email': None,
+ })
+ abort(404)
@v1_bp.route('/users//', methods=['PUT'])
@process_auth
@anon_allowed
-@check_readonly
def update_user(username):
permission = UserAdminPermission(username)
if permission.can():
@@ -155,52 +146,47 @@ def update_user(username):
if 'password' in update_request:
logger.debug('Updating user password')
- model.user.change_password(get_authenticated_user(), update_request['password'])
+ model.change_user_password(get_authenticated_user(), update_request['password'])
return jsonify({
'username': get_authenticated_user().username,
- 'email': get_authenticated_user().email,
+ 'email': get_authenticated_user().email
})
-
abort(403)
@v1_bp.route('/repositories//', methods=['PUT'])
@process_auth
@parse_repository_name()
-@check_v1_push_enabled()
-@ensure_namespace_enabled
-@check_repository_state
@generate_headers(scope=GrantType.WRITE_REPOSITORY, add_grant_for_status=201)
@anon_allowed
-@check_readonly
def create_repository(namespace_name, repo_name):
# Verify that the repository name is valid.
if not REPOSITORY_NAME_REGEX.match(repo_name):
abort(400, message='Invalid repository name. Repository names cannot contain slashes.')
logger.debug('Looking up repository %s/%s', namespace_name, repo_name)
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None and get_authenticated_user() is None:
+ repo = model.get_repository(namespace_name, repo_name)
+
+ logger.debug('Found repository %s/%s', namespace_name, repo_name)
+ if not repo and get_authenticated_user() is None:
logger.debug('Attempt to create repository %s/%s without user auth', namespace_name, repo_name)
abort(401,
message='Cannot create a repository as a guest. Please login via "docker login" first.',
issue='no-login')
- elif repository_ref:
+
+ elif repo:
modify_perm = ModifyRepositoryPermission(namespace_name, repo_name)
if not modify_perm.can():
abort(403,
message='You do not have permission to modify repository %(namespace)s/%(repository)s',
- issue='no-repo-write-permission', namespace=namespace_name, repository=repo_name)
- elif repository_ref.kind != 'image':
- msg = ('This repository is for managing %s resources and not container images.' %
- repository_ref.kind)
- abort(405, message=msg, namespace=namespace_name)
+ issue='no-repo-write-permission',
+ namespace=namespace_name, repository=repo_name)
else:
create_perm = CreateRepositoryPermission(namespace_name)
if not create_perm.can():
- logger.warning('Attempt to create a new repo %s/%s with insufficient perms', namespace_name,
- repo_name)
+ logger.info('Attempt to create a new repo %s/%s with insufficient perms', namespace_name,
+ repo_name)
msg = 'You do not have permission to create repositories in namespace "%(namespace)s"'
abort(403, message=msg, issue='no-create-permission', namespace=namespace_name)
@@ -208,8 +194,7 @@ def create_repository(namespace_name, repo_name):
logger.debug('Creating repository %s/%s with owner: %s', namespace_name, repo_name,
get_authenticated_user().username)
- repository_ref = model.repository.create_repository(namespace_name, repo_name,
- get_authenticated_user())
+ model.create_repository(namespace_name, repo_name, get_authenticated_user())
if get_authenticated_user():
user_event_data = {
@@ -221,52 +206,34 @@ def create_repository(namespace_name, repo_name):
event = userevents.get_event(get_authenticated_user().username)
event.publish_event_data('docker-cli', user_event_data)
- # Start a new builder for the repository and save its ID in the session.
- assert repository_ref
- builder = create_manifest_builder(repository_ref, storage, docker_v2_signing_key)
- logger.debug('Started repo push with manifest builder %s', builder)
- if builder is None:
- abort(404, message='Unknown repository', issue='unknown-repo')
-
- session['manifest_builder'] = builder.builder_id
return make_response('Created', 201)
@v1_bp.route('/repositories//images', methods=['PUT'])
@process_auth
@parse_repository_name()
-@check_v1_push_enabled()
-@ensure_namespace_enabled
-@check_repository_state
@generate_headers(scope=GrantType.WRITE_REPOSITORY)
@anon_allowed
-@check_readonly
def update_images(namespace_name, repo_name):
permission = ModifyRepositoryPermission(namespace_name, repo_name)
if permission.can():
logger.debug('Looking up repository')
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name,
- kind_filter='image')
- if repository_ref is None:
+ repo = model.get_repository(namespace_name, repo_name)
+ if not repo:
# Make sure the repo actually exists.
abort(404, message='Unknown repository', issue='unknown-repo')
- builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), storage,
- docker_v2_signing_key)
- if builder is None:
- abort(400)
-
# Generate a job for each notification that has been added to this repo
logger.debug('Adding notifications for repository')
+
+ updated_tags = session.get('pushed_tags', {})
event_data = {
- 'updated_tags': [tag.name for tag in builder.committed_tags],
+ 'updated_tags': updated_tags,
}
- builder.done()
-
- track_and_log('push_repo', repository_ref)
- spawn_notification(repository_ref, 'repo_push', event_data)
+ track_and_log('push_repo', repo)
+ spawn_notification(repo, 'repo_push', event_data)
metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, 'v1', True])
return make_response('Updated', 204)
@@ -276,26 +243,24 @@ def update_images(namespace_name, repo_name):
@v1_bp.route('/repositories//images', methods=['GET'])
@process_auth
@parse_repository_name()
-@ensure_namespace_enabled
@generate_headers(scope=GrantType.READ_REPOSITORY)
@anon_protect
def get_repository_images(namespace_name, repo_name):
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name,
- kind_filter='image')
-
permission = ReadRepositoryPermission(namespace_name, repo_name)
- if permission.can() or (repository_ref and repository_ref.is_public):
+
+ # TODO invalidate token?
+ if permission.can() or model.repository_is_public(namespace_name, repo_name):
# We can't rely on permissions to tell us if a repo exists anymore
- if repository_ref is None:
+ logger.debug('Looking up repository')
+ repo = model.get_repository(namespace_name, repo_name)
+ if not repo:
abort(404, message='Unknown repository', issue='unknown-repo')
logger.debug('Building repository image response')
resp = make_response(json.dumps([]), 200)
resp.mimetype = 'application/json'
- track_and_log('pull_repo', repository_ref,
- analytics_name='pull_repo_100x',
- analytics_sample=0.01)
+ track_and_log('pull_repo', repo, analytics_name='pull_repo_100x', analytics_sample=0.01)
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v1', True])
return resp
@@ -305,23 +270,15 @@ def get_repository_images(namespace_name, repo_name):
@v1_bp.route('/repositories//images', methods=['DELETE'])
@process_auth
@parse_repository_name()
-@check_v1_push_enabled()
-@ensure_namespace_enabled
-@check_repository_state
@generate_headers(scope=GrantType.WRITE_REPOSITORY)
@anon_allowed
-@check_readonly
def delete_repository_images(namespace_name, repo_name):
abort(501, 'Not Implemented', issue='not-implemented')
@v1_bp.route('/repositories//auth', methods=['PUT'])
@parse_repository_name()
-@check_v1_push_enabled()
-@ensure_namespace_enabled
-@check_repository_state
@anon_allowed
-@check_readonly
def put_repository_auth(namespace_name, repo_name):
abort(501, 'Not Implemented', issue='not-implemented')
@@ -330,59 +287,43 @@ def put_repository_auth(namespace_name, repo_name):
@process_auth
@anon_protect
def get_search():
- query = request.args.get('q') or ''
-
- try:
- limit = min(100, max(1, int(request.args.get('n', 25))))
- except ValueError:
- limit = 25
-
- try:
- page = max(0, int(request.args.get('page', 1)))
- except ValueError:
- page = 1
+ query = request.args.get('q')
username = None
user = get_authenticated_user()
if user is not None:
username = user.username
- data = _conduct_repo_search(username, query, limit, page)
+ results = []
+ if query:
+ _conduct_repo_search(username, query, results)
+
+ data = {
+ "query": query,
+ "num_results": len(results),
+ "results" : results
+ }
+
resp = make_response(json.dumps(data), 200)
resp.mimetype = 'application/json'
return resp
-def _conduct_repo_search(username, query, limit=25, page=1):
+def _conduct_repo_search(username, query, results):
""" Finds matching repositories. """
- # Note that we put a maximum limit of five pages here, because this API should only really ever
- # be used by the Docker CLI, and it doesn't even paginate.
- page = min(page, 5)
- offset = (page - 1) * limit
+ def can_read(repo):
+ if repo.is_public:
+ return True
- if query:
- matching_repos = model.repository.get_filtered_matching_repositories(query,
- filter_username=username,
- offset=offset,
- limit=limit + 1)
- else:
- matching_repos = []
+ return ReadRepositoryPermission(repo.namespace_user.username, repo.name).can()
- results = []
- for repo in matching_repos[0:limit]:
+ only_public = username is None
+ matching_repos = model.get_sorted_matching_repositories(query, only_public, can_read, limit=5)
+
+ for repo in matching_repos:
results.append({
- 'name': repo.namespace_user.username + '/' + repo.name,
+ 'name': repo.namespace_name + '/' + repo.name,
'description': repo.description,
- 'is_public': model.repository.is_repository_public(repo),
- 'href': '/repository/' + repo.namespace_user.username + '/' + repo.name
+ 'is_public': repo.is_public,
+ 'href': '/repository/' + repo.namespace_name + '/' + repo.name
})
-
- # Defined: https://docs.docker.com/v1.6/reference/api/registry_api/
- return {
- 'query': query,
- 'num_results': len(results),
- 'num_pages': page + 1 if len(matching_repos) > limit else page,
- 'page': page,
- 'page_size': limit,
- 'results': results,
- }
diff --git a/endpoints/v1/registry.py b/endpoints/v1/registry.py
index 14376cb19..f0bcc11b8 100644
--- a/endpoints/v1/registry.py
+++ b/endpoints/v1/registry.py
@@ -7,45 +7,49 @@ from time import time
from flask import make_response, request, session, Response, redirect, abort as flask_abort
-from app import storage as store, app, docker_v2_signing_key, metric_queue
+from app import storage as store, app, metric_queue
from auth.auth_context import get_authenticated_user
-from auth.decorators import extract_namespace_repo_from_session, process_auth
-from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission)
-from data import database
-from data.registry_model import registry_model
-from data.registry_model.blobuploader import upload_blob, BlobUploadSettings, BlobUploadException
-from data.registry_model.manifestbuilder import lookup_manifest_builder
+from auth.permissions import (ReadRepositoryPermission,
+ ModifyRepositoryPermission)
+from auth.process import process_auth, extract_namespace_repo_from_session
+from auth.registry_jwt_auth import get_granted_username
+from data import model, database
+from data.interfaces.v1 import pre_oci_model as model
from digest import checksums
-from endpoints.v1 import v1_bp, check_v1_push_enabled
-from endpoints.v1.index import ensure_namespace_enabled
-from endpoints.decorators import (anon_protect, check_region_blacklisted, check_repository_state,
- check_readonly)
+from endpoints.v1 import v1_bp
+from endpoints.decorators import anon_protect
from util.http import abort, exact_abort
+from util.registry.filelike import SocketReader
+from util.registry import gzipstream
from util.registry.replication import queue_storage_replication
-from util.request import get_request_ip
+from util.registry.torrent import PieceHasher
+
logger = logging.getLogger(__name__)
+def _finish_image(namespace, repository, image_id):
+ # Checksum is ok, we remove the marker
+ blob_ref = model.update_image_uploading(namespace, repository, image_id, False)
+
+ # Send a job to the work queue to replicate the image layer.
+ queue_storage_replication(namespace, blob_ref)
+
+
def require_completion(f):
- """ This make sure that the image push correctly finished. """
+ """This make sure that the image push correctly finished."""
@wraps(f)
def wrapper(namespace, repository, *args, **kwargs):
image_id = kwargs['image_id']
- repository_ref = registry_model.lookup_repository(namespace, repository)
- if repository_ref is not None:
- legacy_image = registry_model.get_legacy_image(repository_ref, image_id)
- if legacy_image is not None and legacy_image.uploading:
- abort(400, 'Image %(image_id)s is being uploaded, retry later', issue='upload-in-progress',
- image_id=image_id)
-
+ if model.is_image_uploading(namespace, repository, image_id):
+ abort(400, 'Image %(image_id)s is being uploaded, retry later',
+ issue='upload-in-progress', image_id=image_id)
return f(namespace, repository, *args, **kwargs)
return wrapper
def set_cache_headers(f):
"""Returns HTTP headers suitable for caching."""
-
@wraps(f)
def wrapper(*args, **kwargs):
# Set TTL to 1 year by default
@@ -55,7 +59,8 @@ def set_cache_headers(f):
headers = {
'Cache-Control': 'public, max-age={0}'.format(ttl),
'Expires': expires,
- 'Last-Modified': 'Thu, 01 Jan 1970 00:00:00 GMT',}
+ 'Last-Modified': 'Thu, 01 Jan 1970 00:00:00 GMT',
+ }
if 'If-Modified-Since' in request.headers:
response = make_response('Not modified', 304)
response.headers.extend(headers)
@@ -64,36 +69,31 @@ def set_cache_headers(f):
# Prevent the Cookie to be sent when the object is cacheable
session.modified = False
return f(*args, **kwargs)
-
return wrapper
@v1_bp.route('/images//layer', methods=['HEAD'])
@process_auth
@extract_namespace_repo_from_session
-@ensure_namespace_enabled
@require_completion
@set_cache_headers
@anon_protect
def head_image_layer(namespace, repository, image_id, headers):
permission = ReadRepositoryPermission(namespace, repository)
- repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
logger.debug('Checking repo permissions')
- if permission.can() or (repository_ref is not None and repository_ref.is_public):
- if repository_ref is None:
- abort(404)
-
+ if permission.can() or model.repository_is_public(namespace, repository):
logger.debug('Looking up placement locations')
- legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
- if legacy_image is None:
+ locations, _ = model.placement_locations_and_path_docker_v1(namespace, repository, image_id)
+ if locations is None:
logger.debug('Could not find any blob placement locations')
- abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
+ abort(404, 'Image %(image_id)s not found', issue='unknown-image',
+ image_id=image_id)
# Add the Accept-Ranges header if the storage engine supports resumable
# downloads.
extra_headers = {}
- if store.get_supports_resumable_downloads(legacy_image.blob.placements):
+ if store.get_supports_resumable_downloads(locations):
logger.debug('Storage supports resumable downloads')
extra_headers['Accept-Ranges'] = 'bytes'
@@ -108,31 +108,22 @@ def head_image_layer(namespace, repository, image_id, headers):
@v1_bp.route('/images//layer', methods=['GET'])
@process_auth
@extract_namespace_repo_from_session
-@ensure_namespace_enabled
@require_completion
@set_cache_headers
-@check_region_blacklisted()
@anon_protect
def get_image_layer(namespace, repository, image_id, headers):
permission = ReadRepositoryPermission(namespace, repository)
- repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
logger.debug('Checking repo permissions')
- if permission.can() or (repository_ref is not None and repository_ref.is_public):
- if repository_ref is None:
- abort(404)
-
- legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
- if legacy_image is None:
- abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
-
- path = legacy_image.blob.storage_path
- metric_queue.pull_byte_count.Inc(legacy_image.blob.compressed_size, labelvalues=['v1'])
-
+ if permission.can() or model.repository_is_public(namespace, repository):
+ logger.debug('Looking up placement locations and path')
+ locations, path = model.placement_locations_and_path_docker_v1(namespace, repository, image_id)
+ if not locations or not path:
+ abort(404, 'Image %(image_id)s not found', issue='unknown-image',
+ image_id=image_id)
try:
logger.debug('Looking up the direct download URL for path: %s', path)
- direct_download_url = store.get_direct_download_url(legacy_image.blob.placements, path,
- get_request_ip())
+ direct_download_url = store.get_direct_download_url(locations, path)
if direct_download_url:
logger.debug('Returning direct download URL')
resp = redirect(direct_download_url)
@@ -141,10 +132,11 @@ def get_image_layer(namespace, repository, image_id, headers):
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
logger.debug('Streaming layer data')
- return Response(store.stream_read(legacy_image.blob.placements, path), headers=headers)
+ return Response(store.stream_read(locations, path), headers=headers)
except (IOError, AttributeError):
logger.exception('Image layer data not found')
- abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
+ abort(404, 'Image %(image_id)s not found', issue='unknown-image',
+ image_id=image_id)
abort(403)
@@ -152,94 +144,103 @@ def get_image_layer(namespace, repository, image_id, headers):
@v1_bp.route('/images//layer', methods=['PUT'])
@process_auth
@extract_namespace_repo_from_session
-@check_v1_push_enabled()
-@ensure_namespace_enabled
-@check_repository_state
@anon_protect
-@check_readonly
def put_image_layer(namespace, repository, image_id):
logger.debug('Checking repo permissions')
permission = ModifyRepositoryPermission(namespace, repository)
if not permission.can():
abort(403)
- repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
- if repository_ref is None:
- abort(403)
+ logger.debug('Retrieving image')
+ if model.storage_exists(namespace, repository, image_id):
+ exact_abort(409, 'Image already exists')
- logger.debug('Checking for image in manifest builder')
- builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), store,
- docker_v2_signing_key)
- if builder is None:
- abort(400)
-
- layer = builder.lookup_layer(image_id)
- if layer is None:
+ v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
+ if v1_metadata is None:
abort(404)
logger.debug('Storing layer data')
+
input_stream = request.stream
if request.headers.get('transfer-encoding') == 'chunked':
# Careful, might work only with WSGI servers supporting chunked
# encoding (Gunicorn)
input_stream = request.environ['wsgi.input']
- expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
- settings = BlobUploadSettings(maximum_blob_size=app.config['MAXIMUM_LAYER_SIZE'],
- bittorrent_piece_size=app.config['BITTORRENT_PIECE_SIZE'],
- committed_blob_expiration=expiration_sec)
-
- extra_handlers = []
+ # Create a socket reader to read the input stream containing the layer data.
+ sr = SocketReader(input_stream)
# Add a handler that copies the data into a temp file. This is used to calculate the tarsum,
# which is only needed for older versions of Docker.
- requires_tarsum = bool(builder.get_layer_checksums(layer))
+ requires_tarsum = session.get('checksum_format') == 'tarsum'
if requires_tarsum:
tmp, tmp_hndlr = store.temp_store_handler()
- extra_handlers.append(tmp_hndlr)
+ sr.add_handler(tmp_hndlr)
- # Add a handler which computes the simple Docker V1 checksum.
- h, sum_hndlr = checksums.simple_checksum_handler(layer.v1_metadata_string)
- extra_handlers.append(sum_hndlr)
+ # Add a handler to compute the compressed and uncompressed sizes of the layer.
+ size_info, size_hndlr = gzipstream.calculate_size_handler()
+ sr.add_handler(size_hndlr)
- uploaded_blob = None
- try:
- with upload_blob(repository_ref, store, settings,
- extra_blob_stream_handlers=extra_handlers) as manager:
- manager.upload_chunk(app.config, input_stream)
- uploaded_blob = manager.commit_to_blob(app.config)
- except BlobUploadException:
- logger.exception('Exception when writing image data')
- abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id)
+ # Add a handler to hash the chunks of the upload for torrenting
+ piece_hasher = PieceHasher(app.config['BITTORRENT_PIECE_SIZE'])
+ sr.add_handler(piece_hasher.update)
- # Compute the final checksum
+ # Add a handler which computes the checksum.
+ h, sum_hndlr = checksums.simple_checksum_handler(v1_metadata.compat_json)
+ sr.add_handler(sum_hndlr)
+
+ # Add a handler which computes the content checksum only
+ ch, content_sum_hndlr = checksums.content_checksum_handler()
+ sr.add_handler(content_sum_hndlr)
+
+ # Stream write the data to storage.
+ locations, path = model.placement_locations_and_path_docker_v1(namespace, repository, image_id)
+ with database.CloseForLongOperation(app.config):
+ try:
+ start_time = time()
+ store.stream_write(locations, path, sr)
+ metric_queue.chunk_size.Observe(size_info.compressed_size,
+ labelvalues=[list(locations)[0]])
+ metric_queue.chunk_upload_time.Observe(time() - start_time,
+ labelvalues=[list(locations)[0]])
+ except IOError:
+ logger.exception('Exception when writing image data')
+ abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id)
+
+ # Save the size of the image.
+ model.update_image_sizes(namespace, repository, image_id, size_info.compressed_size,
+ size_info.uncompressed_size)
+
+ # Save the BitTorrent pieces.
+ model.create_bittorrent_pieces(namespace, repository, image_id, piece_hasher.final_piece_hashes())
+
+ # Append the computed checksum.
csums = []
csums.append('sha256:{0}'.format(h.hexdigest()))
try:
if requires_tarsum:
tmp.seek(0)
- csums.append(checksums.compute_tarsum(tmp, layer.v1_metadata_string))
+ csums.append(checksums.compute_tarsum(tmp, v1_metadata.compat_json))
tmp.close()
except (IOError, checksums.TarError) as exc:
logger.debug('put_image_layer: Error when computing tarsum %s', exc)
- # If there was already a precomputed checksum, validate against it now.
- if builder.get_layer_checksums(layer):
- checksum = builder.get_layer_checksums(layer)[0]
- if not builder.validate_layer_checksum(layer, checksum):
- logger.debug('put_image_checksum: Wrong checksum. Given: %s and expected: %s', checksum,
- builder.get_layer_checksums(layer))
- abort(400, 'Checksum mismatch for image: %(image_id)s', issue='checksum-mismatch',
- image_id=image_id)
+ if v1_metadata.checksum is None:
+ # We don't have a checksum stored yet, that's fine skipping the check.
+ # Not removing the mark though, image is not downloadable yet.
+ session['checksum'] = csums
+ session['content_checksum'] = 'sha256:{0}'.format(ch.hexdigest())
+ return make_response('true', 200)
- # Assign the blob to the layer in the manifest.
- if not builder.assign_layer_blob(layer, uploaded_blob, csums):
- abort(500, 'Something went wrong')
+ # We check if the checksums provided matches one the one we computed
+ if v1_metadata.checksum not in csums:
+ logger.warning('put_image_layer: Wrong checksum')
+ abort(400, 'Checksum mismatch; ignoring the layer for image %(image_id)s',
+ issue='checksum-mismatch', image_id=image_id)
- # Send a job to the work queue to replicate the image layer.
- # TODO: move this into a better place.
- queue_storage_replication(namespace, uploaded_blob)
+ # Mark the image as uploaded.
+ _finish_image(namespace, repository, image_id)
return make_response('true', 200)
@@ -247,51 +248,66 @@ def put_image_layer(namespace, repository, image_id):
@v1_bp.route('/images//checksum', methods=['PUT'])
@process_auth
@extract_namespace_repo_from_session
-@check_v1_push_enabled()
-@ensure_namespace_enabled
-@check_repository_state
@anon_protect
-@check_readonly
def put_image_checksum(namespace, repository, image_id):
logger.debug('Checking repo permissions')
permission = ModifyRepositoryPermission(namespace, repository)
if not permission.can():
abort(403)
- repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
- if repository_ref is None:
- abort(403)
-
# Docker Version < 0.10 (tarsum+sha):
old_checksum = request.headers.get('X-Docker-Checksum')
# Docker Version >= 0.10 (sha):
new_checksum = request.headers.get('X-Docker-Checksum-Payload')
+ # Store whether we need to calculate the tarsum.
+ if new_checksum:
+ session['checksum_format'] = 'sha256'
+ else:
+ session['checksum_format'] = 'tarsum'
+
checksum = new_checksum or old_checksum
if not checksum:
abort(400, "Missing checksum for image %(image_id)s", issue='missing-checksum',
image_id=image_id)
- logger.debug('Checking for image in manifest builder')
- builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), store,
- docker_v2_signing_key)
- if builder is None:
- abort(400)
+ if not session.get('checksum'):
+ abort(400, 'Checksum not found in Cookie for image %(image_id)s',
+ issue='missing-checksum-cookie', image_id=image_id)
- layer = builder.lookup_layer(image_id)
- if layer is None:
- abort(404)
+ logger.debug('Looking up repo image')
+ v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
+ if not v1_metadata:
+ abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
- if old_checksum:
- builder.save_precomputed_checksum(layer, checksum)
- return make_response('true', 200)
+ logger.debug('Looking up repo layer data')
+ if not v1_metadata.compat_json:
+ abort(404, 'Image not found: %(image_id)s', issue='unknown-image', image_id=image_id)
- if not builder.validate_layer_checksum(layer, checksum):
- logger.debug('put_image_checksum: Wrong checksum. Given: %s and expected: %s', checksum,
- builder.get_layer_checksums(layer))
- abort(400, 'Checksum mismatch for image: %(image_id)s', issue='checksum-mismatch',
- image_id=image_id)
+ logger.debug('Marking image path')
+ if not model.is_image_uploading(namespace, repository, image_id):
+ abort(409, 'Cannot set checksum for image %(image_id)s',
+ issue='image-write-error', image_id=image_id)
+
+ logger.debug('Storing image and content checksums')
+
+ content_checksum = session.get('content_checksum', None)
+ checksum_parts = checksum.split(':')
+ if len(checksum_parts) != 2:
+ abort(400, 'Invalid checksum format')
+
+ model.store_docker_v1_checksums(namespace, repository, image_id, checksum, content_checksum)
+
+ if checksum not in session.get('checksum', []):
+ logger.debug('session checksums: %s', session.get('checksum', []))
+ logger.debug('client supplied checksum: %s', checksum)
+ logger.debug('put_image_checksum: Wrong checksum')
+ abort(400, 'Checksum mismatch for image: %(image_id)s',
+ issue='checksum-mismatch', image_id=image_id)
+
+ # Mark the image as uploaded.
+ _finish_image(namespace, repository, image_id)
return make_response('true', 200)
@@ -299,29 +315,28 @@ def put_image_checksum(namespace, repository, image_id):
@v1_bp.route('/images//json', methods=['GET'])
@process_auth
@extract_namespace_repo_from_session
-@ensure_namespace_enabled
@require_completion
@set_cache_headers
@anon_protect
def get_image_json(namespace, repository, image_id, headers):
logger.debug('Checking repo permissions')
permission = ReadRepositoryPermission(namespace, repository)
- repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
- if not permission.can() and not (repository_ref is not None and repository_ref.is_public):
+ if not permission.can() and not model.repository_is_public(namespace, repository):
abort(403)
logger.debug('Looking up repo image')
- legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_blob=True)
- if legacy_image is None:
+ v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
+ if v1_metadata is None:
flask_abort(404)
- size = legacy_image.blob.compressed_size
+ logger.debug('Looking up repo layer size')
+ size = model.get_image_size(namespace, repository, image_id)
if size is not None:
# Note: X-Docker-Size is optional and we *can* end up with a NULL image_size,
# so handle this case rather than failing.
headers['X-Docker-Size'] = str(size)
- response = make_response(legacy_image.v1_metadata_string, 200)
+ response = make_response(v1_metadata.compat_json, 200)
response.headers.extend(headers)
return response
@@ -329,26 +344,21 @@ def get_image_json(namespace, repository, image_id, headers):
@v1_bp.route('/images//ancestry', methods=['GET'])
@process_auth
@extract_namespace_repo_from_session
-@ensure_namespace_enabled
@require_completion
@set_cache_headers
@anon_protect
def get_image_ancestry(namespace, repository, image_id, headers):
logger.debug('Checking repo permissions')
permission = ReadRepositoryPermission(namespace, repository)
- repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
- if not permission.can() and not (repository_ref is not None and repository_ref.is_public):
+ if not permission.can() and not model.repository_is_public(namespace, repository):
abort(403)
- logger.debug('Looking up repo image')
- legacy_image = registry_model.get_legacy_image(repository_ref, image_id, include_parents=True)
- if legacy_image is None:
+ ancestry_docker_ids = model.image_ancestry(namespace, repository, image_id)
+ if ancestry_docker_ids is None:
abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id)
- # NOTE: We can not use jsonify here because we are returning a list not an object.
- ancestor_ids = ([legacy_image.docker_image_id] +
- [a.docker_image_id for a in legacy_image.parents])
- response = make_response(json.dumps(ancestor_ids), 200)
+ # We can not use jsonify here because we are returning a list not an object
+ response = make_response(json.dumps(ancestry_docker_ids), 200)
response.headers.extend(headers)
return response
@@ -356,26 +366,13 @@ def get_image_ancestry(namespace, repository, image_id, headers):
@v1_bp.route('/images//json', methods=['PUT'])
@process_auth
@extract_namespace_repo_from_session
-@check_v1_push_enabled()
-@ensure_namespace_enabled
-@check_repository_state
@anon_protect
-@check_readonly
def put_image_json(namespace, repository, image_id):
logger.debug('Checking repo permissions')
permission = ModifyRepositoryPermission(namespace, repository)
if not permission.can():
abort(403)
- repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image')
- if repository_ref is None:
- abort(403)
-
- builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), store,
- docker_v2_signing_key)
- if builder is None:
- abort(400)
-
logger.debug('Parsing image JSON')
try:
uploaded_metadata = request.data
@@ -384,24 +381,60 @@ def put_image_json(namespace, repository, image_id):
pass
if not data or not isinstance(data, dict):
- abort(400, 'Invalid JSON for image: %(image_id)s\nJSON: %(json)s', issue='invalid-request',
- image_id=image_id, json=request.data)
+ abort(400, 'Invalid JSON for image: %(image_id)s\nJSON: %(json)s',
+ issue='invalid-request', image_id=image_id, json=request.data)
if 'id' not in data:
- abort(400, 'Missing key `id` in JSON for image: %(image_id)s', issue='invalid-request',
- image_id=image_id)
-
- if image_id != data['id']:
- abort(400, 'JSON data contains invalid id for image: %(image_id)s', issue='invalid-request',
- image_id=image_id)
-
- logger.debug('Looking up repo image')
- location_pref = store.preferred_locations[0]
- username = get_authenticated_user() and get_authenticated_user().username
- layer = builder.start_layer(image_id, uploaded_metadata, location_pref, username,
- app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
- if layer is None:
- abort(400, 'Image %(image_id)s has invalid metadata',
+ abort(400, 'Missing key `id` in JSON for image: %(image_id)s',
issue='invalid-request', image_id=image_id)
+ if image_id != data['id']:
+ abort(400, 'JSON data contains invalid id for image: %(image_id)s',
+ issue='invalid-request', image_id=image_id)
+
+ logger.debug('Looking up repo image')
+
+ if not model.repository_exists(namespace, repository):
+ abort(404, 'Repository does not exist: %(namespace)s/%(repository)s', issue='no-repo',
+ namespace=namespace, repository=repository)
+
+ v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
+ if v1_metadata is None:
+ username = get_authenticated_user() and get_authenticated_user().username
+ if not username:
+ username = get_granted_username()
+
+ logger.debug('Image not found, creating or linking image with initiating user context: %s',
+ username)
+ location_pref = store.preferred_locations[0]
+ model.create_or_link_image(username, namespace, repository, image_id, location_pref)
+ v1_metadata = model.docker_v1_metadata(namespace, repository, image_id)
+
+ # Create a temporary tag to prevent this image from getting garbage collected while the push
+ # is in progress.
+ model.create_temp_hidden_tag(namespace, repository, image_id,
+ app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'])
+
+ parent_id = data.get('parent', None)
+ if parent_id:
+ logger.debug('Looking up parent image')
+ if model.docker_v1_metadata(namespace, repository, parent_id) is None:
+ abort(400, 'Image %(image_id)s depends on non existing parent image %(parent_id)s',
+ issue='invalid-request', image_id=image_id, parent_id=parent_id)
+
+ logger.debug('Checking if image already exists')
+ if v1_metadata and not model.is_image_uploading(namespace, repository, image_id):
+ exact_abort(409, 'Image already exists')
+
+ model.update_image_uploading(namespace, repository, image_id, True)
+
+ # If we reach that point, it means that this is a new image or a retry
+ # on a failed push, save the metadata
+ command_list = data.get('container_config', {}).get('Cmd', None)
+ command = json.dumps(command_list) if command_list else None
+
+ logger.debug('Setting image metadata')
+ model.update_docker_v1_metadata(namespace, repository, image_id, data.get('created'),
+ data.get('comment'), command, uploaded_metadata, parent_id)
+
return make_response('true', 200)
diff --git a/endpoints/v1/tag.py b/endpoints/v1/tag.py
index 67f37e098..917cc6a6f 100644
--- a/endpoints/v1/tag.py
+++ b/endpoints/v1/tag.py
@@ -3,16 +3,18 @@ import json
from flask import abort, request, jsonify, make_response, session
-from app import storage, docker_v2_signing_key
-from auth.decorators import process_auth
-from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission)
-from data.registry_model import registry_model
-from data.registry_model.manifestbuilder import lookup_manifest_builder
-from endpoints.decorators import (anon_protect, parse_repository_name, check_repository_state,
- check_readonly)
-from endpoints.v1 import v1_bp, check_v1_push_enabled
-from util.audit import track_and_log
+
from util.names import TAG_ERROR, TAG_REGEX
+from auth.permissions import (ReadRepositoryPermission,
+ ModifyRepositoryPermission)
+from auth.process import process_auth
+from data import model
+from data.interfaces.v1 import pre_oci_model as model
+from endpoints.common import parse_repository_name
+from endpoints.decorators import anon_protect
+from endpoints.v1 import v1_bp
+from endpoints.trackhelper import track_and_log
+
logger = logging.getLogger(__name__)
@@ -23,12 +25,10 @@ logger = logging.getLogger(__name__)
@parse_repository_name()
def get_tags(namespace_name, repo_name):
permission = ReadRepositoryPermission(namespace_name, repo_name)
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
- if permission.can() or (repository_ref is not None and repository_ref.is_public):
- if repository_ref is None:
- abort(404)
- tag_map = registry_model.get_legacy_tags_map(repository_ref, storage)
+ if permission.can() or model.repository_is_public(namespace_name, repo_name):
+ tags = model.list_tags(namespace_name, repo_name)
+ tag_map = {tag.name: tag.image.docker_image_id for tag in tags}
return jsonify(tag_map)
abort(403)
@@ -40,12 +40,9 @@ def get_tags(namespace_name, repo_name):
@parse_repository_name()
def get_tag(namespace_name, repo_name, tag):
permission = ReadRepositoryPermission(namespace_name, repo_name)
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
- if permission.can() or (repository_ref is not None and repository_ref.is_public):
- if repository_ref is None:
- abort(404)
- image_id = registry_model.get_tag_legacy_image_id(repository_ref, tag, storage)
+ if permission.can() or model.repository_is_public(namespace_name, repo_name):
+ image_id = model.find_image_id_by_tag(namespace_name, repo_name, tag)
if image_id is None:
abort(404)
@@ -60,40 +57,21 @@ def get_tag(namespace_name, repo_name, tag):
@process_auth
@anon_protect
@parse_repository_name()
-@check_repository_state
-@check_v1_push_enabled()
-@check_readonly
def put_tag(namespace_name, repo_name, tag):
permission = ModifyRepositoryPermission(namespace_name, repo_name)
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
- if permission.can() and repository_ref is not None:
+ if permission.can():
if not TAG_REGEX.match(tag):
abort(400, TAG_ERROR)
image_id = json.loads(request.data)
+ model.create_or_update_tag(namespace_name, repo_name, image_id, tag)
- # Check for the image ID first in a builder (for an in-progress push).
- builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), storage,
- docker_v2_signing_key)
- if builder is not None:
- layer = builder.lookup_layer(image_id)
- if layer is not None:
- commited_tag = builder.commit_tag_and_manifest(tag, layer)
- if commited_tag is None:
- abort(400)
+ # Store the updated tag.
+ if 'pushed_tags' not in session:
+ session['pushed_tags'] = {}
- return make_response('Created', 200)
-
- # Check if there is an existing image we should use (for PUT calls outside of a normal push
- # operation).
- legacy_image = registry_model.get_legacy_image(repository_ref, image_id)
- if legacy_image is None:
- abort(400)
-
- if registry_model.retarget_tag(repository_ref, tag, legacy_image, storage,
- docker_v2_signing_key) is None:
- abort(400)
+ session['pushed_tags'][tag] = image_id
return make_response('Created', 200)
@@ -104,18 +82,12 @@ def put_tag(namespace_name, repo_name, tag):
@process_auth
@anon_protect
@parse_repository_name()
-@check_repository_state
-@check_v1_push_enabled()
-@check_readonly
def delete_tag(namespace_name, repo_name, tag):
permission = ModifyRepositoryPermission(namespace_name, repo_name)
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name, kind_filter='image')
- if permission.can() and repository_ref is not None:
- if not registry_model.delete_tag(repository_ref, tag):
- abort(404)
-
- track_and_log('delete_tag', repository_ref, tag=tag)
+ if permission.can():
+ model.delete_tag(namespace_name, repo_name, tag)
+ track_and_log('delete_tag', model.get_repository(namespace_name, repo_name), tag=tag)
return make_response('Deleted', 200)
abort(403)
diff --git a/endpoints/v2/__init__.py b/endpoints/v2/__init__.py
index 845ad258f..d6af69db0 100644
--- a/endpoints/v2/__init__.py
+++ b/endpoints/v2/__init__.py
@@ -10,30 +10,33 @@ from semantic_version import Spec
import features
-from app import app, metric_queue, get_app_url
-from auth.auth_context import get_authenticated_context
-from auth.permissions import (
- ReadRepositoryPermission, ModifyRepositoryPermission, AdministerRepositoryPermission)
+from app import app, metric_queue, get_app_url, license_validator
+from auth.auth_context import get_grant_context
+from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission,
+ AdministerRepositoryPermission)
from auth.registry_jwt_auth import process_registry_jwt_auth, get_auth_headers
-from data.registry_model import registry_model
-from data.readreplica import ReadOnlyModeException
-from endpoints.decorators import anon_protect, anon_allowed, route_show_if
-from endpoints.v2.errors import (V2RegistryException, Unauthorized, Unsupported, NameUnknown,
- ReadOnlyMode)
+from data import model
+from endpoints.decorators import anon_protect, anon_allowed
+from endpoints.v2.errors import V2RegistryException, Unauthorized
from util.http import abort
from util.metrics.metricqueue import time_blueprint
from util.registry.dockerver import docker_version
from util.pagination import encrypt_page_token, decrypt_page_token
+
logger = logging.getLogger(__name__)
+
v2_bp = Blueprint('v2', __name__)
+license_validator.enforce_license_before_request(v2_bp)
time_blueprint(v2_bp, metric_queue)
@v2_bp.app_errorhandler(V2RegistryException)
def handle_registry_v2_exception(error):
- response = jsonify({'errors': [error.as_dict()]})
+ response = jsonify({
+ 'errors': [error.as_dict()]
+ })
response.status_code = error.http_status_code
if response.status_code == 401:
@@ -42,24 +45,14 @@ def handle_registry_v2_exception(error):
return response
-@v2_bp.app_errorhandler(ReadOnlyModeException)
-def handle_readonly(ex):
- error = ReadOnlyMode()
- response = jsonify({'errors': [error.as_dict()]})
- response.status_code = error.http_status_code
- logger.debug('sending response: %s', response.get_data())
- return response
+_MAX_RESULTS_PER_PAGE = 50
-_MAX_RESULTS_PER_PAGE = app.config.get('V2_PAGINATION_SIZE', 100)
-
-
-def paginate(start_id_kwarg_name='start_id', limit_kwarg_name='limit',
+def paginate(limit_kwarg_name='limit', offset_kwarg_name='offset',
callback_kwarg_name='pagination_callback'):
"""
Decorates a handler adding a parsed pagination token and a callback to encode a response token.
"""
-
def wrapper(func):
@wraps(func)
def wrapped(*args, **kwargs):
@@ -69,19 +62,20 @@ def paginate(start_id_kwarg_name='start_id', limit_kwarg_name='limit',
requested_limit = 0
limit = max(min(requested_limit, _MAX_RESULTS_PER_PAGE), 1)
- next_page_token = request.args.get('next_page', request.args.get('last', None))
+ next_page_token = request.args.get('next_page', None)
# Decrypt the next page token, if any.
- start_id = None
+ offset = 0
page_info = decrypt_page_token(next_page_token)
if page_info is not None:
- start_id = page_info.get('start_id', None)
+ # Note: we use offset here instead of ID >= n because one of the V2 queries is a UNION.
+ offset = page_info.get('offset', 0)
- def callback(results, response):
- if len(results) <= limit:
+ def callback(num_results, response):
+ if num_results < limit:
return
- next_page_token = encrypt_page_token({'start_id': max([obj.id for obj in results])})
+ next_page_token = encrypt_page_token({'offset': limit + offset})
link_url = os.path.join(get_app_url(), url_for(request.endpoint, **request.view_args))
link_param = urlencode({'n': limit, 'next_page': next_page_token})
@@ -89,7 +83,7 @@ def paginate(start_id_kwarg_name='start_id', limit_kwarg_name='limit',
response.headers['Link'] = link
kwargs[limit_kwarg_name] = limit
- kwargs[start_id_kwarg_name] = start_id
+ kwargs[offset_kwarg_name] = offset
kwargs[callback_kwarg_name] = callback
return func(*args, **kwargs)
return wrapped
@@ -100,39 +94,26 @@ def _require_repo_permission(permission_class, scopes=None, allow_public=False):
def wrapper(func):
@wraps(func)
def wrapped(namespace_name, repo_name, *args, **kwargs):
- logger.debug('Checking permission %s for repo: %s/%s', permission_class, namespace_name,
- repo_name)
-
+ logger.debug('Checking permission %s for repo: %s/%s', permission_class,
+ namespace_name, repo_name)
permission = permission_class(namespace_name, repo_name)
- if permission.can():
+ if (permission.can() or
+ (allow_public and
+ model.repository.repository_is_public(namespace_name, repo_name))):
return func(namespace_name, repo_name, *args, **kwargs)
-
repository = namespace_name + '/' + repo_name
- if allow_public:
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None or not repository_ref.is_public:
- raise Unauthorized(repository=repository, scopes=scopes)
-
- if repository_ref.kind != 'image':
- msg = 'This repository is for managing %s and not container images.' % repository_ref.kind
- raise Unsupported(detail=msg)
-
- if repository_ref.is_public:
- if not features.ANONYMOUS_ACCESS:
- raise Unauthorized(repository=repository, scopes=scopes)
-
- return func(namespace_name, repo_name, *args, **kwargs)
-
raise Unauthorized(repository=repository, scopes=scopes)
return wrapped
return wrapper
-require_repo_read = _require_repo_permission(ReadRepositoryPermission, scopes=['pull'],
+require_repo_read = _require_repo_permission(ReadRepositoryPermission,
+ scopes=['pull'],
allow_public=True)
-require_repo_write = _require_repo_permission(ModifyRepositoryPermission, scopes=['pull', 'push'])
-require_repo_admin = _require_repo_permission(AdministerRepositoryPermission, scopes=[
- 'pull', 'push'])
+require_repo_write = _require_repo_permission(ModifyRepositoryPermission,
+ scopes=['pull', 'push'])
+require_repo_admin = _require_repo_permission(AdministerRepositoryPermission,
+ scopes=['pull', 'push'])
def get_input_stream(flask_request):
@@ -141,6 +122,18 @@ def get_input_stream(flask_request):
return flask_request.stream
+def route_show_if(value):
+ def decorator(f):
+ @wraps(f)
+ def decorated_function(*args, **kwargs):
+ if not value:
+ abort(404)
+
+ return f(*args, **kwargs)
+ return decorated_function
+ return decorator
+
+
@v2_bp.route('/')
@route_show_if(features.ADVERTISE_V2)
@process_registry_jwt_auth()
@@ -151,12 +144,12 @@ def v2_support_enabled():
# Check if our version is one of the blacklisted versions, if we can't
# identify the version (None) we will fail open and assume that it is
# newer and therefore should not be blacklisted.
- if docker_ver is not None and Spec(app.config['BLACKLIST_V2_SPEC']).match(docker_ver):
+ if Spec(app.config['BLACKLIST_V2_SPEC']).match(docker_ver) and docker_ver is not None:
abort(404)
response = make_response('true', 200)
- if get_authenticated_context() is None:
+ if get_grant_context() is None:
response = make_response('true', 401)
response.headers.extend(get_auth_headers())
@@ -168,4 +161,5 @@ from endpoints.v2 import (
catalog,
manifest,
tag,
- v2auth,)
+ v2auth,
+)
diff --git a/endpoints/v2/blob.py b/endpoints/v2/blob.py
index 141c37990..ba0acf9ad 100644
--- a/endpoints/v2/blob.py
+++ b/endpoints/v2/blob.py
@@ -1,31 +1,30 @@
import logging
import re
+import time
from flask import url_for, request, redirect, Response, abort as flask_abort
-from app import storage, app, get_app_url, metric_queue, model_cache
+import resumablehashlib
+
+from app import storage, app, get_app_url, metric_queue
from auth.registry_jwt_auth import process_registry_jwt_auth
-from auth.permissions import ReadRepositoryPermission
from data import database
-from data.registry_model import registry_model
-from data.registry_model.blobuploader import (create_blob_upload, retrieve_blob_upload_manager,
- complete_when_uploaded, BlobUploadSettings,
- BlobUploadException, BlobTooLargeException,
- BlobRangeMismatchException)
+from data.interfaces.v2 import pre_oci_model as model
from digest import digest_tools
-from endpoints.decorators import (anon_protect, anon_allowed, parse_repository_name,
- check_region_blacklisted, check_readonly)
+from endpoints.common import parse_repository_name
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream
-from endpoints.v2.errors import (
- BlobUnknown, BlobUploadInvalid, BlobUploadUnknown, Unsupported, NameUnknown, LayerTooLarge,
- InvalidRequest, BlobDownloadGeoBlocked)
+from endpoints.v2.errors import (BlobUnknown, BlobUploadInvalid, BlobUploadUnknown, Unsupported,
+ NameUnknown)
+from endpoints.decorators import anon_protect
from util.cache import cache_control
-from util.names import parse_namespace_repository
-from util.request import get_request_ip
+from util.registry.filelike import wrap_with_handler, StreamSlice
+from util.registry.gzipstream import calculate_size_handler
+from util.registry.torrent import PieceHasher
logger = logging.getLogger(__name__)
+
BASE_BLOB_ROUTE = '//blobs/'
BLOB_DIGEST_ROUTE = BASE_BLOB_ROUTE.format(digest_tools.DIGEST_PATTERN)
RANGE_HEADER_REGEX = re.compile(r'^bytes=([0-9]+)-([0-9]+)$')
@@ -40,23 +39,23 @@ class _InvalidRangeHeader(Exception):
@parse_repository_name()
@process_registry_jwt_auth(scopes=['pull'])
@require_repo_read
-@anon_allowed
+@anon_protect
@cache_control(max_age=31436000)
def check_blob_exists(namespace_name, repo_name, digest):
# Find the blob.
- blob = registry_model.get_cached_repo_blob(model_cache, namespace_name, repo_name, digest)
+ blob = model.get_blob_by_digest(namespace_name, repo_name, digest)
if blob is None:
raise BlobUnknown()
# Build the response headers.
headers = {
'Docker-Content-Digest': digest,
- 'Content-Length': blob.compressed_size,
+ 'Content-Length': blob.size,
'Content-Type': BLOB_CONTENT_TYPE,
}
# If our storage supports range requests, let the client know.
- if storage.get_supports_resumable_downloads(blob.placements):
+ if storage.get_supports_resumable_downloads(blob.locations):
headers['Accept-Ranges'] = 'bytes'
# Write the response to the client.
@@ -67,12 +66,11 @@ def check_blob_exists(namespace_name, repo_name, digest):
@parse_repository_name()
@process_registry_jwt_auth(scopes=['pull'])
@require_repo_read
-@anon_allowed
-@check_region_blacklisted(BlobDownloadGeoBlocked)
+@anon_protect
@cache_control(max_age=31536000)
def download_blob(namespace_name, repo_name, digest):
# Find the blob.
- blob = registry_model.get_cached_repo_blob(model_cache, namespace_name, repo_name, digest)
+ blob = model.get_blob_by_digest(namespace_name, repo_name, digest)
if blob is None:
raise BlobUnknown()
@@ -80,15 +78,15 @@ def download_blob(namespace_name, repo_name, digest):
headers = {'Docker-Content-Digest': digest}
# If our storage supports range requests, let the client know.
- if storage.get_supports_resumable_downloads(blob.placements):
+ if storage.get_supports_resumable_downloads(blob.locations):
headers['Accept-Ranges'] = 'bytes'
- metric_queue.pull_byte_count.Inc(blob.compressed_size, labelvalues=['v2'])
+ # Find the storage path for the blob.
+ path = model.get_blob_path(blob)
# Short-circuit by redirecting if the storage supports it.
- path = blob.storage_path
logger.debug('Looking up the direct download URL for path: %s', path)
- direct_download_url = storage.get_direct_download_url(blob.placements, path, get_request_ip())
+ direct_download_url = storage.get_direct_download_url(blob.locations, path)
if direct_download_url:
logger.debug('Returning direct download URL')
resp = redirect(direct_download_url)
@@ -100,135 +98,69 @@ def download_blob(namespace_name, repo_name, digest):
with database.CloseForLongOperation(app.config):
# Stream the response to the client.
return Response(
- storage.stream_read(blob.placements, path),
+ storage.stream_read(blob.locations, path),
headers=headers.update({
- 'Content-Length': blob.compressed_size,
+ 'Content-Length': blob.size,
'Content-Type': BLOB_CONTENT_TYPE,
}),
)
-def _try_to_mount_blob(repository_ref, mount_blob_digest):
- """ Attempts to mount a blob requested by the user from another repository. """
- logger.debug('Got mount request for blob `%s` into `%s`', mount_blob_digest, repository_ref)
- from_repo = request.args.get('from', None)
- if from_repo is None:
- raise InvalidRequest(message='Missing `from` repository argument')
-
- # Ensure the user has access to the repository.
- logger.debug('Got mount request for blob `%s` under repository `%s` into `%s`',
- mount_blob_digest, from_repo, repository_ref)
- from_namespace, from_repo_name = parse_namespace_repository(from_repo,
- app.config['LIBRARY_NAMESPACE'],
- include_tag=False)
-
- from_repository_ref = registry_model.lookup_repository(from_namespace, from_repo_name)
- if from_repository_ref is None:
- logger.debug('Could not find from repo: `%s/%s`', from_namespace, from_repo_name)
- return None
-
- # First check permission.
- read_permission = ReadRepositoryPermission(from_namespace, from_repo_name).can()
- if not read_permission:
- # If no direct permission, check if the repostory is public.
- if not from_repository_ref.is_public:
- logger.debug('No permission to mount blob `%s` under repository `%s` into `%s`',
- mount_blob_digest, from_repo, repository_ref)
- return None
-
- # Lookup if the mount blob's digest exists in the repository.
- mount_blob = registry_model.get_cached_repo_blob(model_cache, from_namespace, from_repo_name,
- mount_blob_digest)
- if mount_blob is None:
- logger.debug('Blob `%s` under repository `%s` not found', mount_blob_digest, from_repo)
- return None
-
- logger.debug('Mounting blob `%s` under repository `%s` into `%s`', mount_blob_digest,
- from_repo, repository_ref)
-
- # Mount the blob into the current repository and return that we've completed the operation.
- expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
- mounted = registry_model.mount_blob_into_repository(mount_blob, repository_ref, expiration_sec)
- if not mounted:
- logger.debug('Could not mount blob `%s` under repository `%s` not found', mount_blob_digest,
- from_repo)
- return
-
- # Return the response for the blob indicating that it was mounted, and including its content
- # digest.
- logger.debug('Mounted blob `%s` under repository `%s` into `%s`', mount_blob_digest,
- from_repo, repository_ref)
-
- namespace_name = repository_ref.namespace_name
- repo_name = repository_ref.name
-
- return Response(
- status=201,
- headers={
- 'Docker-Content-Digest': mount_blob_digest,
- 'Location':
- get_app_url() + url_for('v2.download_blob',
- repository='%s/%s' % (namespace_name, repo_name),
- digest=mount_blob_digest),
- },
- )
-
@v2_bp.route('//blobs/uploads/', methods=['POST'])
@parse_repository_name()
@process_registry_jwt_auth(scopes=['pull', 'push'])
@require_repo_write
@anon_protect
-@check_readonly
def start_blob_upload(namespace_name, repo_name):
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
+ # Begin the blob upload process in the database and storage.
+ location_name = storage.preferred_locations[0]
+ new_upload_uuid, upload_metadata = storage.initiate_chunked_upload(location_name)
+ repository_exists = model.create_blob_upload(namespace_name, repo_name, new_upload_uuid,
+ location_name, upload_metadata)
+ if not repository_exists:
raise NameUnknown()
- # Check for mounting of a blob from another repository.
- mount_blob_digest = request.args.get('mount', None)
- if mount_blob_digest is not None:
- response = _try_to_mount_blob(repository_ref, mount_blob_digest)
- if response is not None:
- return response
-
- # Begin the blob upload process.
- blob_uploader = create_blob_upload(repository_ref, storage, _upload_settings())
- if blob_uploader is None:
- logger.debug('Could not create a blob upload for `%s/%s`', namespace_name, repo_name)
- raise InvalidRequest(message='Unable to start blob upload for unknown repository')
-
- # Check if the blob will be uploaded now or in followup calls. If the `digest` is given, then
- # the upload will occur as a monolithic chunk in this call. Otherwise, we return a redirect
- # for the client to upload the chunks as distinct operations.
digest = request.args.get('digest', None)
if digest is None:
# Short-circuit because the user will send the blob data in another request.
return Response(
status=202,
headers={
- 'Docker-Upload-UUID': blob_uploader.blob_upload_id,
+ 'Docker-Upload-UUID': new_upload_uuid,
'Range': _render_range(0),
- 'Location':
- get_app_url() + url_for('v2.upload_chunk',
- repository='%s/%s' % (namespace_name, repo_name),
- upload_uuid=blob_uploader.blob_upload_id)
+ 'Location': get_app_url() + url_for('v2.upload_chunk',
+ repository='%s/%s' % (namespace_name, repo_name),
+ upload_uuid=new_upload_uuid)
},
)
- # Upload the data sent and commit it to a blob.
- with complete_when_uploaded(blob_uploader):
- _upload_chunk(blob_uploader, digest)
+ # The user plans to send us the entire body right now.
+ # Find the upload.
+ blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, new_upload_uuid)
+ if blob_upload is None:
+ raise BlobUploadUnknown()
+
+ # Upload the chunk to storage while calculating some metadata and updating
+ # the upload state.
+ updated_blob_upload = _upload_chunk(blob_upload, request.headers.get('range'))
+ if updated_blob_upload is None:
+ _abort_range_not_satisfiable(blob_upload.byte_count, new_upload_uuid)
+
+ # Save the upload state to the database.
+ model.update_blob_upload(updated_blob_upload)
+
+ # Finalize the upload process in the database and storage.
+ _finish_upload(namespace_name, repo_name, updated_blob_upload, digest)
# Write the response to the client.
return Response(
status=201,
headers={
'Docker-Content-Digest': digest,
- 'Location':
- get_app_url() + url_for('v2.download_blob',
- repository='%s/%s' % (namespace_name, repo_name),
- digest=digest),
+ 'Location': get_app_url() + url_for('v2.download_blob',
+ repository='%s/%s' % (namespace_name, repo_name),
+ digest=digest),
},
)
@@ -239,19 +171,15 @@ def start_blob_upload(namespace_name, repo_name):
@require_repo_write
@anon_protect
def fetch_existing_upload(namespace_name, repo_name, upload_uuid):
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
- raise NameUnknown()
-
- uploader = retrieve_blob_upload_manager(repository_ref, upload_uuid, storage, _upload_settings())
- if uploader is None:
+ blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
+ if blob_upload is None:
raise BlobUploadUnknown()
return Response(
status=204,
headers={
'Docker-Upload-UUID': upload_uuid,
- 'Range': _render_range(uploader.blob_upload.byte_count + 1), # byte ranges are exclusive
+ 'Range': _render_range(blob_upload.byte_count+1), # byte ranges are exclusive
},
)
@@ -261,25 +189,27 @@ def fetch_existing_upload(namespace_name, repo_name, upload_uuid):
@process_registry_jwt_auth(scopes=['pull', 'push'])
@require_repo_write
@anon_protect
-@check_readonly
def upload_chunk(namespace_name, repo_name, upload_uuid):
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
- raise NameUnknown()
-
- uploader = retrieve_blob_upload_manager(repository_ref, upload_uuid, storage, _upload_settings())
- if uploader is None:
+ # Find the upload.
+ blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
+ if blob_upload is None:
raise BlobUploadUnknown()
- # Upload the chunk for the blob.
- _upload_chunk(uploader)
+ # Upload the chunk to storage while calculating some metadata and updating
+ # the upload state.
+ updated_blob_upload = _upload_chunk(blob_upload, request.headers.get('range'))
+ if updated_blob_upload is None:
+ _abort_range_not_satisfiable(blob_upload.byte_count, upload_uuid)
+
+ # Save the upload state to the database.
+ model.update_blob_upload(updated_blob_upload)
# Write the response to the client.
return Response(
status=204,
headers={
'Location': _current_request_url(),
- 'Range': _render_range(uploader.blob_upload.byte_count, with_bytes_prefix=False),
+ 'Range': _render_range(updated_blob_upload.byte_count, with_bytes_prefix=False),
'Docker-Upload-UUID': upload_uuid,
},
)
@@ -290,7 +220,6 @@ def upload_chunk(namespace_name, repo_name, upload_uuid):
@process_registry_jwt_auth(scopes=['pull', 'push'])
@require_repo_write
@anon_protect
-@check_readonly
def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
# Ensure the digest is present before proceeding.
digest = request.args.get('digest', None)
@@ -298,26 +227,28 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
raise BlobUploadInvalid(detail={'reason': 'Missing digest arg on monolithic upload'})
# Find the upload.
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
- raise NameUnknown()
-
- uploader = retrieve_blob_upload_manager(repository_ref, upload_uuid, storage, _upload_settings())
- if uploader is None:
+ blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
+ if blob_upload is None:
raise BlobUploadUnknown()
- # Upload the chunk for the blob and commit it once complete.
- with complete_when_uploaded(uploader):
- _upload_chunk(uploader, digest)
+ # Upload the chunk to storage while calculating some metadata and updating
+ # the upload state.
+ updated_blob_upload = _upload_chunk(blob_upload, request.headers.get('range'))
+ if updated_blob_upload is None:
+ _abort_range_not_satisfiable(blob_upload.byte_count, upload_uuid)
+
+ # Finalize the upload process in the database and storage.
+ _finish_upload(namespace_name, repo_name, updated_blob_upload, digest)
# Write the response to the client.
- return Response(status=201, headers={
- 'Docker-Content-Digest': digest,
- 'Location':
- get_app_url() + url_for('v2.download_blob',
- repository='%s/%s' % (namespace_name, repo_name),
- digest=digest),
- },
+ return Response(
+ status=201,
+ headers={
+ 'Docker-Content-Digest': digest,
+ 'Location': get_app_url() + url_for('v2.download_blob',
+ repository='%s/%s' % (namespace_name, repo_name),
+ digest=digest),
+ }
)
@@ -326,17 +257,17 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid):
@process_registry_jwt_auth(scopes=['pull', 'push'])
@require_repo_write
@anon_protect
-@check_readonly
def cancel_upload(namespace_name, repo_name, upload_uuid):
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
- raise NameUnknown()
-
- uploader = retrieve_blob_upload_manager(repository_ref, upload_uuid, storage, _upload_settings())
- if uploader is None:
+ blob_upload = model.blob_upload_by_uuid(namespace_name, repo_name, upload_uuid)
+ if blob_upload is None:
raise BlobUploadUnknown()
- uploader.cancel_upload()
+ # We delete the record for the upload first, since if the partial upload in
+ # storage fails to delete, it doesn't break anything.
+ model.delete_blob_upload(namespace_name, repo_name, upload_uuid)
+ storage.cancel_chunked_upload({blob_upload.location_name}, blob_upload.uuid,
+ blob_upload.storage_metadata)
+
return Response(status=204)
@@ -345,7 +276,6 @@ def cancel_upload(namespace_name, repo_name, upload_uuid):
@process_registry_jwt_auth(scopes=['pull', 'push'])
@require_repo_write
@anon_protect
-@check_readonly
def delete_digest(namespace_name, repo_name, upload_uuid):
# We do not support deleting arbitrary digests, as they break repo images.
raise Unsupported()
@@ -367,13 +297,11 @@ def _abort_range_not_satisfiable(valid_end, upload_uuid):
Writes a failure response for scenarios where the registry cannot function
with the provided range.
- TODO: Unify this with the V2RegistryException class.
+ TODO(jzelinskie): Unify this with the V2RegistryException class.
"""
- flask_abort(
- Response(status=416, headers={
- 'Location': _current_request_url(),
- 'Range': '0-{0}'.format(valid_end),
- 'Docker-Upload-UUID': upload_uuid}))
+ flask_abort(Response(status=416, headers={'Location': _current_request_url(),
+ 'Range': '0-{0}'.format(valid_end),
+ 'Docker-Upload-UUID': upload_uuid}))
def _parse_range_header(range_header_text):
@@ -412,39 +340,173 @@ def _start_offset_and_length(range_header):
return start_offset, length
-def _upload_settings():
- """ Returns the settings for instantiating a blob upload manager. """
- expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
- settings = BlobUploadSettings(maximum_blob_size=app.config['MAXIMUM_LAYER_SIZE'],
- bittorrent_piece_size=app.config['BITTORRENT_PIECE_SIZE'],
- committed_blob_expiration=expiration_sec)
- return settings
-
-
-def _upload_chunk(blob_uploader, commit_digest=None):
- """ Performs uploading of a chunk of data in the current request's stream, via the blob uploader
- given. If commit_digest is specified, the upload is committed to a blob once the stream's
- data has been read and stored.
+def _upload_chunk(blob_upload, range_header):
"""
- start_offset, length = _start_offset_and_length(request.headers.get('range'))
- if None in {start_offset, length}:
- raise InvalidRequest(message='Invalid range header')
+ Calculates metadata while uploading a chunk to storage.
- input_fp = get_input_stream(request)
+ Returns a BlobUpload object or None if there was a failure.
+ """
+ # Get the offset and length of the current chunk.
+ start_offset, length = _start_offset_and_length(range_header)
+ if blob_upload is None or None in {start_offset, length}:
+ logger.error('Invalid arguments provided to _upload_chunk')
+ return None
- try:
- # Upload the data received.
- blob_uploader.upload_chunk(app.config, input_fp, start_offset, length, metric_queue)
+ if start_offset > 0 and start_offset > blob_upload.byte_count:
+ logger.error('start_offset provided to _upload_chunk greater than blob.upload.byte_count')
+ return None
- if commit_digest is not None:
- # Commit the upload to a blob.
- return blob_uploader.commit_to_blob(app.config, commit_digest)
- except BlobTooLargeException as ble:
- raise LayerTooLarge(uploaded=ble.uploaded, max_allowed=ble.max_allowed)
- except BlobRangeMismatchException:
- logger.exception('Exception when uploading blob to %s', blob_uploader.blob_upload_id)
- _abort_range_not_satisfiable(blob_uploader.blob_upload.byte_count,
- blob_uploader.blob_upload_id)
- except BlobUploadException:
- logger.exception('Exception when uploading blob to %s', blob_uploader.blob_upload_id)
- raise BlobUploadInvalid()
+ location_set = {blob_upload.location_name}
+
+ upload_error = None
+ with database.CloseForLongOperation(app.config):
+ input_fp = get_input_stream(request)
+
+ if start_offset > 0 and start_offset < blob_upload.byte_count:
+ # Skip the bytes which were received on a previous push, which are already stored and
+ # included in the sha calculation
+ overlap_size = blob_upload.byte_count - start_offset
+ input_fp = StreamSlice(input_fp, overlap_size)
+
+ # Update our upload bounds to reflect the skipped portion of the overlap
+ start_offset = blob_upload.byte_count
+ length = max(length - overlap_size, 0)
+
+ # We use this to escape early in case we have already processed all of the bytes the user
+ # wants to upload
+ if length == 0:
+ return blob_upload
+
+ input_fp = wrap_with_handler(input_fp, blob_upload.sha_state.update)
+
+ # Add a hasher for calculating SHA1s for torrents if this is the first chunk and/or we have
+ # already calculated hash data for the previous chunk(s).
+ piece_hasher = None
+ if blob_upload.chunk_count == 0 or blob_upload.piece_sha_state:
+ initial_sha1_value = blob_upload.piece_sha_state or resumablehashlib.sha1()
+ initial_sha1_pieces_value = blob_upload.piece_hashes or ''
+
+ piece_hasher = PieceHasher(app.config['BITTORRENT_PIECE_SIZE'], start_offset,
+ initial_sha1_pieces_value, initial_sha1_value)
+
+ input_fp = wrap_with_handler(input_fp, piece_hasher.update)
+
+ # If this is the first chunk and we're starting at the 0 offset, add a handler to gunzip the
+ # stream so we can determine the uncompressed size. We'll throw out this data if another chunk
+ # comes in, but in the common case the docker client only sends one chunk.
+ size_info = None
+ if start_offset == 0 and blob_upload.chunk_count == 0:
+ size_info, fn = calculate_size_handler()
+ input_fp = wrap_with_handler(input_fp, fn)
+
+ start_time = time.time()
+ length_written, new_metadata, upload_error = storage.stream_upload_chunk(
+ location_set,
+ blob_upload.uuid,
+ start_offset,
+ length,
+ input_fp,
+ blob_upload.storage_metadata,
+ content_type=BLOB_CONTENT_TYPE,
+ )
+
+ if upload_error is not None:
+ logger.error('storage.stream_upload_chunk returned error %s', upload_error)
+ return None
+
+ # Update the chunk upload time metric.
+ metric_queue.chunk_upload_time.Observe(time.time() - start_time,
+ labelvalues=[length_written, list(location_set)[0]])
+
+ # If we determined an uncompressed size and this is the first chunk, add it to the blob.
+ # Otherwise, we clear the size from the blob as it was uploaded in multiple chunks.
+ if size_info is not None and blob_upload.chunk_count == 0 and size_info.is_valid:
+ blob_upload.uncompressed_byte_count = size_info.uncompressed_size
+ elif length_written > 0:
+ # Otherwise, if we wrote some bytes and the above conditions were not met, then we don't
+ # know the uncompressed size.
+ blob_upload.uncompressed_byte_count = None
+
+ if piece_hasher is not None:
+ blob_upload.piece_hashes = piece_hasher.piece_hashes
+ blob_upload.piece_sha_state = piece_hasher.hash_fragment
+
+ blob_upload.storage_metadata = new_metadata
+ blob_upload.byte_count += length_written
+ blob_upload.chunk_count += 1
+
+ return blob_upload
+
+
+def _validate_digest(blob_upload, expected_digest):
+ """
+ Verifies that the digest's SHA matches that of the uploaded data.
+ """
+ computed_digest = digest_tools.sha256_digest_from_hashlib(blob_upload.sha_state)
+ if not digest_tools.digests_equal(computed_digest, expected_digest):
+ logger.error('Digest mismatch for upload %s: Expected digest %s, found digest %s',
+ blob_upload.uuid, expected_digest, computed_digest)
+ raise BlobUploadInvalid(detail={'reason': 'Digest mismatch on uploaded blob'})
+
+
+def _finalize_blob_storage(blob_upload, expected_digest):
+ """
+ When an upload is successful, this ends the uploading process from the
+ storage's perspective.
+
+ Returns True if the blob already existed.
+ """
+ final_blob_location = digest_tools.content_path(expected_digest)
+
+ # Move the storage into place, or if this was a re-upload, cancel it
+ with database.CloseForLongOperation(app.config):
+ already_existed = storage.exists({blob_upload.location_name}, final_blob_location)
+ if already_existed:
+ # It already existed, clean up our upload which served as proof that the
+ # uploader had the blob.
+ storage.cancel_chunked_upload({blob_upload.location_name}, blob_upload.uuid,
+ blob_upload.storage_metadata)
+
+ else:
+ # We were the first ones to upload this image (at least to this location)
+ # Let's copy it into place
+ storage.complete_chunked_upload({blob_upload.location_name}, blob_upload.uuid,
+ final_blob_location, blob_upload.storage_metadata)
+ return already_existed
+
+
+def _finalize_blob_database(namespace_name, repo_name, blob_upload, digest, already_existed):
+ """
+ When an upload is successful, this ends the uploading process from the
+ database's perspective.
+ """
+ # Create the blob and temporarily tag it.
+ blob_storage = model.create_blob_and_temp_tag(
+ namespace_name,
+ repo_name,
+ digest,
+ blob_upload,
+ app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'],
+ )
+
+ # If it doesn't already exist, create the BitTorrent pieces for the blob.
+ if blob_upload.piece_sha_state is not None and not already_existed:
+ piece_bytes = blob_upload.piece_hashes + blob_upload.piece_sha_state.digest()
+ model.save_bittorrent_pieces(blob_storage, app.config['BITTORRENT_PIECE_SIZE'], piece_bytes)
+
+ # Delete the blob upload.
+ model.delete_blob_upload(namespace_name, repo_name, blob_upload.uuid)
+
+
+def _finish_upload(namespace_name, repo_name, blob_upload, digest):
+ """
+ When an upload is successful, this ends the uploading process.
+ """
+ _validate_digest(blob_upload, digest)
+ _finalize_blob_database(
+ namespace_name,
+ repo_name,
+ blob_upload,
+ digest,
+ _finalize_blob_storage(blob_upload, digest),
+ )
diff --git a/endpoints/v2/catalog.py b/endpoints/v2/catalog.py
index 240ab6ac5..8ae243460 100644
--- a/endpoints/v2/catalog.py
+++ b/endpoints/v2/catalog.py
@@ -1,55 +1,25 @@
-from collections import namedtuple
-
from flask import jsonify
-import features
-
-from app import model_cache
-from auth.auth_context import get_authenticated_user, get_authenticated_context
-from auth.registry_jwt_auth import process_registry_jwt_auth
-from data import model
-from data.cache import cache_key
+from auth.registry_jwt_auth import process_registry_jwt_auth, get_granted_entity
from endpoints.decorators import anon_protect
from endpoints.v2 import v2_bp, paginate
-
-
-class Repository(namedtuple('Repository', ['id', 'namespace_name', 'name'])):
- pass
-
+from data.interfaces.v2 import pre_oci_model as model
@v2_bp.route('/_catalog', methods=['GET'])
@process_registry_jwt_auth()
@anon_protect
@paginate()
-def catalog_search(start_id, limit, pagination_callback):
- def _load_catalog():
- include_public = bool(features.PUBLIC_CATALOG)
- if not include_public and not get_authenticated_user():
- return []
-
- username = get_authenticated_user().username if get_authenticated_user() else None
- if username and not get_authenticated_user().enabled:
- return []
-
- query = model.repository.get_visible_repositories(username,
- kind_filter='image',
- include_public=include_public,
- start_id=start_id,
- limit=limit + 1)
- # NOTE: The repository ID is in `rid` (not `id`) here, as per the requirements of
- # the `get_visible_repositories` call.
- return [Repository(repo.rid, repo.namespace_user.username, repo.name)._asdict()
- for repo in query]
-
- context_key = get_authenticated_context().unique_key if get_authenticated_context() else None
- catalog_cache_key = cache_key.for_catalog_page(context_key, start_id, limit)
- visible_repositories = [Repository(**repo_dict) for repo_dict
- in model_cache.retrieve(catalog_cache_key, _load_catalog)]
+def catalog_search(limit, offset, pagination_callback):
+ username = None
+ entity = get_granted_entity()
+ if entity:
+ username = entity.user.username
+ visible_repositories = model.get_visible_repositories(username, limit+1, offset)
response = jsonify({
'repositories': ['%s/%s' % (repo.namespace_name, repo.name)
for repo in visible_repositories][0:limit],
})
- pagination_callback(visible_repositories, response)
+ pagination_callback(len(visible_repositories), response)
return response
diff --git a/endpoints/v2/errors.py b/endpoints/v2/errors.py
index 1479984db..0f8a5284e 100644
--- a/endpoints/v2/errors.py
+++ b/endpoints/v2/errors.py
@@ -1,168 +1,131 @@
-import bitmath
-
-
class V2RegistryException(Exception):
- def __init__(self, error_code_str, message, detail, http_status_code=400, repository=None,
- scopes=None, is_read_only=False):
+ def __init__(self, error_code_str, message, detail, http_status_code=400,
+ repository=None, scopes=None):
super(V2RegistryException, self).__init__(message)
self.http_status_code = http_status_code
self.repository = repository
self.scopes = scopes
- self.is_read_only = is_read_only
self._error_code_str = error_code_str
self._detail = detail
def as_dict(self):
- error_dict = {
+ return {
'code': self._error_code_str,
- 'message': str(self),
+ 'message': self.message,
'detail': self._detail if self._detail is not None else {},
}
- if self.is_read_only:
- error_dict['is_readonly'] = True
-
- return error_dict
-
class BlobUnknown(V2RegistryException):
def __init__(self, detail=None):
- super(BlobUnknown, self).__init__('BLOB_UNKNOWN', 'blob unknown to registry', detail, 404)
+ super(BlobUnknown, self).__init__('BLOB_UNKNOWN',
+ 'blob unknown to registry',
+ detail,
+ 404)
class BlobUploadInvalid(V2RegistryException):
def __init__(self, detail=None):
- super(BlobUploadInvalid, self).__init__('BLOB_UPLOAD_INVALID', 'blob upload invalid', detail)
+ super(BlobUploadInvalid, self).__init__('BLOB_UPLOAD_INVALID',
+ 'blob upload invalid',
+ detail)
class BlobUploadUnknown(V2RegistryException):
def __init__(self, detail=None):
super(BlobUploadUnknown, self).__init__('BLOB_UPLOAD_UNKNOWN',
- 'blob upload unknown to registry', detail, 404)
+ 'blob upload unknown to registry',
+ detail,
+ 404)
class DigestInvalid(V2RegistryException):
def __init__(self, detail=None):
super(DigestInvalid, self).__init__('DIGEST_INVALID',
- 'provided digest did not match uploaded content', detail)
+ 'provided digest did not match uploaded content',
+ detail)
class ManifestBlobUnknown(V2RegistryException):
def __init__(self, detail=None):
super(ManifestBlobUnknown, self).__init__('MANIFEST_BLOB_UNKNOWN',
- 'manifest blob unknown to registry', detail)
+ 'manifest blob unknown to registry',
+ detail)
class ManifestInvalid(V2RegistryException):
def __init__(self, detail=None, http_status_code=400):
- super(ManifestInvalid, self).__init__('MANIFEST_INVALID', 'manifest invalid', detail,
+ super(ManifestInvalid, self).__init__('MANIFEST_INVALID',
+ 'manifest invalid',
+ detail,
http_status_code)
class ManifestUnknown(V2RegistryException):
def __init__(self, detail=None):
- super(ManifestUnknown, self).__init__('MANIFEST_UNKNOWN', 'manifest unknown', detail, 404)
-
-
-class TagExpired(V2RegistryException):
- def __init__(self, message=None, detail=None):
- super(TagExpired, self).__init__('TAG_EXPIRED',
- message or 'Tag has expired',
- detail,
- 404)
+ super(ManifestUnknown, self).__init__('MANIFEST_UNKNOWN',
+ 'manifest unknown',
+ detail,
+ 404)
class ManifestUnverified(V2RegistryException):
def __init__(self, detail=None):
super(ManifestUnverified, self).__init__('MANIFEST_UNVERIFIED',
- 'manifest failed signature verification', detail)
+ 'manifest failed signature verification',
+ detail)
class NameInvalid(V2RegistryException):
- def __init__(self, detail=None, message=None):
- super(NameInvalid, self).__init__('NAME_INVALID', message or 'invalid repository name', detail)
+ def __init__(self, detail=None):
+ super(NameInvalid, self).__init__('NAME_INVALID',
+ 'invalid repository name',
+ detail)
class NameUnknown(V2RegistryException):
def __init__(self, detail=None):
- super(NameUnknown, self).__init__('NAME_UNKNOWN', 'repository name not known to registry',
- detail, 404)
+ super(NameUnknown, self).__init__('NAME_UNKNOWN',
+ 'repository name not known to registry',
+ detail,
+ 404)
class SizeInvalid(V2RegistryException):
def __init__(self, detail=None):
super(SizeInvalid, self).__init__('SIZE_INVALID',
- 'provided length did not match content length', detail)
+ 'provided length did not match content length',
+ detail)
class TagAlreadyExists(V2RegistryException):
def __init__(self, detail=None):
- super(TagAlreadyExists, self).__init__('TAG_ALREADY_EXISTS', 'tag was already pushed', detail,
+ super(TagAlreadyExists, self).__init__('TAG_ALREADY_EXISTS',
+ 'tag was already pushed',
+ detail,
409)
-
class TagInvalid(V2RegistryException):
def __init__(self, detail=None):
- super(TagInvalid, self).__init__('TAG_INVALID', 'manifest tag did not match URI', detail)
-
-
-class LayerTooLarge(V2RegistryException):
- def __init__(self, uploaded=None, max_allowed=None):
- detail = {}
- message = 'Uploaded blob is larger than allowed by this registry'
-
- if uploaded is not None and max_allowed is not None:
- detail = {
- 'reason': '%s is greater than maximum allowed size %s' % (uploaded, max_allowed),
- 'max_allowed': max_allowed,
- 'uploaded': uploaded,}
-
- up_str = bitmath.Byte(uploaded).best_prefix().format("{value:.2f} {unit}")
- max_str = bitmath.Byte(max_allowed).best_prefix().format("{value:.2f} {unit}")
- message = 'Uploaded blob of %s is larger than %s allowed by this registry' % (up_str,
- max_str)
+ super(TagInvalid, self).__init__('TAG_INVALID',
+ 'manifest tag did not match URI',
+ detail)
class Unauthorized(V2RegistryException):
def __init__(self, detail=None, repository=None, scopes=None):
- super(Unauthorized,
- self).__init__('UNAUTHORIZED', 'access to the requested resource is not authorized',
- detail, 401, repository=repository, scopes=scopes)
+ super(Unauthorized, self).__init__('UNAUTHORIZED',
+ 'access to the requested resource is not authorized',
+ detail,
+ 401,
+ repository=repository,
+ scopes=scopes)
class Unsupported(V2RegistryException):
- def __init__(self, detail=None, message=None):
- super(Unsupported, self).__init__('UNSUPPORTED', message or 'The operation is unsupported.',
- detail, 405)
-
-
-class InvalidLogin(V2RegistryException):
- def __init__(self, message=None):
- super(InvalidLogin, self).__init__('UNAUTHORIZED', message or
- 'Specified credentials are invalid', {}, 401)
-
-
-class InvalidRequest(V2RegistryException):
- def __init__(self, message=None):
- super(InvalidRequest, self).__init__('INVALID_REQUEST', message or 'Invalid request', {}, 400)
-
-
-class NamespaceDisabled(V2RegistryException):
- def __init__(self, message=None):
- message = message or 'This namespace is disabled. Please contact your system administrator.'
- super(NamespaceDisabled, self).__init__('DENIED', message, {}, 405)
-
-
-class BlobDownloadGeoBlocked(V2RegistryException):
def __init__(self, detail=None):
- message = ('The region from which you are pulling has been geo-ip blocked. ' +
- 'Please contact the namespace owner.')
- super(BlobDownloadGeoBlocked, self).__init__('DENIED', message, detail, 403)
-
-
-class ReadOnlyMode(V2RegistryException):
- def __init__(self, detail=None):
- message = ('System is currently read-only. Pulls will succeed but all write operations ' +
- 'are currently suspended.')
- super(ReadOnlyMode, self).__init__('DENIED', message, detail, 405, is_read_only=True)
+ super(Unsupported, self).__init__('UNSUPPORTED',
+ 'The operation is unsupported.',
+ detail,
+ 405)
diff --git a/endpoints/v2/manifest.py b/endpoints/v2/manifest.py
index b71b3bb3f..a4155add2 100644
--- a/endpoints/v2/manifest.py
+++ b/endpoints/v2/manifest.py
@@ -6,74 +6,58 @@ from flask import request, url_for, Response
import features
-from app import app, metric_queue, storage
+from app import docker_v2_signing_key, app, metric_queue
from auth.registry_jwt_auth import process_registry_jwt_auth
+from data.interfaces.v2 import pre_oci_model as model, Label
from digest import digest_tools
-from data.registry_model import registry_model
-from data.model.oci.manifest import CreateManifestException
-from endpoints.decorators import anon_protect, parse_repository_name, check_readonly
+from endpoints.common import parse_repository_name
+from endpoints.decorators import anon_protect
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write
-from endpoints.v2.errors import (ManifestInvalid, ManifestUnknown, NameInvalid, TagExpired,
- NameUnknown)
+from endpoints.v2.errors import (BlobUnknown, ManifestInvalid, ManifestUnknown, TagInvalid,
+ NameInvalid)
+from endpoints.trackhelper import track_and_log
+from endpoints.notificationhelper import spawn_notification
from image.docker import ManifestException
-from image.docker.schema1 import DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE, DOCKER_SCHEMA1_CONTENT_TYPES
-from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES, OCI_CONTENT_TYPES
-from image.docker.schemas import parse_manifest_from_bytes
-from notifications import spawn_notification
-from util.audit import track_and_log
-from util.bytes import Bytes
+from image.docker.schema1 import DockerSchema1Manifest, DockerSchema1ManifestBuilder
+from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
from util.names import VALID_TAG_PATTERN
from util.registry.replication import queue_replication_batch
+from util.validation import is_json
logger = logging.getLogger(__name__)
+
BASE_MANIFEST_ROUTE = '//manifests/'
MANIFEST_DIGEST_ROUTE = BASE_MANIFEST_ROUTE.format(digest_tools.DIGEST_PATTERN)
MANIFEST_TAGNAME_ROUTE = BASE_MANIFEST_ROUTE.format(VALID_TAG_PATTERN)
-
@v2_bp.route(MANIFEST_TAGNAME_ROUTE, methods=['GET'])
@parse_repository_name()
@process_registry_jwt_auth(scopes=['pull'])
@require_repo_read
@anon_protect
def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
- raise NameUnknown()
-
- tag = registry_model.get_repo_tag(repository_ref, manifest_ref)
- if tag is None:
- if registry_model.has_expired_tag(repository_ref, manifest_ref):
- logger.debug('Found expired tag %s for repository %s/%s', manifest_ref, namespace_name,
- repo_name)
- msg = 'Tag %s was deleted or has expired. To pull, revive via time machine' % manifest_ref
- raise TagExpired(msg)
-
- raise ManifestUnknown()
-
- manifest = registry_model.get_manifest_for_tag(tag, backfill_if_necessary=True)
+ manifest = model.get_manifest_by_tag(namespace_name, repo_name, manifest_ref)
if manifest is None:
- # Something went wrong.
- raise ManifestInvalid()
+ has_tag = model.has_active_tag(namespace_name, repo_name, manifest_ref)
+ if not has_tag:
+ raise ManifestUnknown()
- manifest_bytes, manifest_digest, manifest_media_type = _rewrite_schema_if_necessary(
- namespace_name, repo_name, manifest_ref, manifest)
- if manifest_bytes is None:
- raise ManifestUnknown()
+ manifest = _generate_and_store_manifest(namespace_name, repo_name, manifest_ref)
+ if manifest is None:
+ raise ManifestUnknown()
- track_and_log('pull_repo', repository_ref, analytics_name='pull_repo_100x', analytics_sample=0.01,
- tag=manifest_ref)
- metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
+ repo = model.get_repository(namespace_name, repo_name)
+ if repo is not None:
+ track_and_log('pull_repo', repo, analytics_name='pull_repo_100x', analytics_sample=0.01,
+ tag=manifest_ref)
+ metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
return Response(
- manifest_bytes.as_unicode(),
+ manifest.json,
status=200,
- headers={
- 'Content-Type': manifest_media_type,
- 'Docker-Content-Digest': manifest_digest,
- },
+ headers={'Content-Type': manifest.media_type, 'Docker-Content-Digest': manifest.digest},
)
@@ -83,194 +67,144 @@ def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
@require_repo_read
@anon_protect
def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref):
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
- raise NameUnknown()
-
- manifest = registry_model.lookup_manifest_by_digest(repository_ref, manifest_ref)
+ manifest = model.get_manifest_by_digest(namespace_name, repo_name, manifest_ref)
if manifest is None:
+ # Without a tag name to reference, we can't make an attempt to generate the manifest
raise ManifestUnknown()
- manifest_bytes, manifest_digest, manifest_media_type = _rewrite_schema_if_necessary(
- namespace_name, repo_name, '$digest', manifest)
- if manifest_digest is None:
- raise ManifestUnknown()
+ repo = model.get_repository(namespace_name, repo_name)
+ if repo is not None:
+ track_and_log('pull_repo', repo, manifest_digest=manifest_ref)
+ metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
- track_and_log('pull_repo', repository_ref, manifest_digest=manifest_ref)
- metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
-
- return Response(manifest_bytes.as_unicode(), status=200, headers={
- 'Content-Type': manifest_media_type,
- 'Docker-Content-Digest': manifest_digest,
- })
-
-
-def _rewrite_schema_if_necessary(namespace_name, repo_name, tag_name, manifest):
- # As per the Docker protocol, if the manifest is not schema version 1 and the manifest's
- # media type is not in the Accept header, we return a schema 1 version of the manifest for
- # the amd64+linux platform, if any, or None if none.
- # See: https://docs.docker.com/registry/spec/manifest-v2-2
- mimetypes = [mimetype for mimetype, _ in request.accept_mimetypes]
- if manifest.media_type in mimetypes:
- return manifest.internal_manifest_bytes, manifest.digest, manifest.media_type
-
- # Short-circuit check: If the mimetypes is empty or just `application/json`, verify we have
- # a schema 1 manifest and return it.
- if not mimetypes or mimetypes == ['application/json']:
- if manifest.media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
- return manifest.internal_manifest_bytes, manifest.digest, manifest.media_type
-
- logger.debug('Manifest `%s` not compatible against %s; checking for conversion', manifest.digest,
- request.accept_mimetypes)
- converted = registry_model.convert_manifest(manifest, namespace_name, repo_name, tag_name,
- mimetypes, storage)
- if converted is not None:
- return converted.bytes, converted.digest, converted.media_type
-
- # For back-compat, we always default to schema 1 if the manifest could not be converted.
- schema1 = registry_model.get_schema1_parsed_manifest(manifest, namespace_name, repo_name,
- tag_name, storage)
- if schema1 is None:
- return None, None, None
-
- return schema1.bytes, schema1.digest, schema1.media_type
+ return Response(manifest.json, status=200, headers={'Content-Type': manifest.media_type,
+ 'Docker-Content-Digest': manifest.digest})
def _reject_manifest2_schema2(func):
@wraps(func)
def wrapped(*args, **kwargs):
- namespace_name = kwargs['namespace_name']
- if registry_model.supports_schema2(namespace_name):
- return func(*args, **kwargs)
-
- if _doesnt_accept_schema_v1() or \
- request.content_type in DOCKER_SCHEMA2_CONTENT_TYPES | OCI_CONTENT_TYPES:
+ if request.content_type in DOCKER_SCHEMA2_CONTENT_TYPES:
raise ManifestInvalid(detail={'message': 'manifest schema version not supported'},
http_status_code=415)
return func(*args, **kwargs)
-
return wrapped
-def _doesnt_accept_schema_v1():
- # If the client doesn't specify anything, still give them Schema v1.
- return len(request.accept_mimetypes) != 0 and \
- DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE not in request.accept_mimetypes
-
-
@v2_bp.route(MANIFEST_TAGNAME_ROUTE, methods=['PUT'])
-@parse_repository_name()
@_reject_manifest2_schema2
+@parse_repository_name()
@process_registry_jwt_auth(scopes=['pull', 'push'])
@require_repo_write
@anon_protect
-@check_readonly
def write_manifest_by_tagname(namespace_name, repo_name, manifest_ref):
- parsed = _parse_manifest()
- return _write_manifest_and_log(namespace_name, repo_name, manifest_ref, parsed)
+ try:
+ manifest = DockerSchema1Manifest(request.data)
+ except ManifestException as me:
+ logger.exception("failed to parse manifest when writing by tagname")
+ raise ManifestInvalid(detail={'message': 'failed to parse manifest: %s' % me.message})
+
+ if manifest.tag != manifest_ref:
+ raise TagInvalid()
+
+ return _write_manifest_and_log(namespace_name, repo_name, manifest)
@v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['PUT'])
-@parse_repository_name()
@_reject_manifest2_schema2
+@parse_repository_name()
@process_registry_jwt_auth(scopes=['pull', 'push'])
@require_repo_write
@anon_protect
-@check_readonly
def write_manifest_by_digest(namespace_name, repo_name, manifest_ref):
- parsed = _parse_manifest()
- if parsed.digest != manifest_ref:
+ try:
+ manifest = DockerSchema1Manifest(request.data)
+ except ManifestException as me:
+ logger.exception("failed to parse manifest when writing by digest")
+ raise ManifestInvalid(detail={'message': 'failed to parse manifest: %s' % me.message})
+
+ if manifest.digest != manifest_ref:
raise ManifestInvalid(detail={'message': 'manifest digest mismatch'})
- if parsed.schema_version != 2:
- return _write_manifest_and_log(namespace_name, repo_name, parsed.tag, parsed)
-
- # If the manifest is schema version 2, then this cannot be a normal tag-based push, as the
- # manifest does not contain the tag and this call was not given a tag name. Instead, we write the
- # manifest with a temporary tag, as it is being pushed as part of a call for a manifest list.
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
- raise NameUnknown()
-
- expiration_sec = app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']
- manifest = registry_model.create_manifest_with_temp_tag(repository_ref, parsed, expiration_sec,
- storage)
- if manifest is None:
- raise ManifestInvalid()
-
- return Response(
- 'OK',
- status=202,
- headers={
- 'Docker-Content-Digest': manifest.digest,
- 'Location':
- url_for('v2.fetch_manifest_by_digest',
- repository='%s/%s' % (namespace_name, repo_name),
- manifest_ref=manifest.digest),
- },
- )
+ return _write_manifest_and_log(namespace_name, repo_name, manifest)
-def _parse_manifest():
- content_type = request.content_type or DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
- if content_type == 'application/json':
- # For back-compat.
- content_type = DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
+def _write_manifest(namespace_name, repo_name, manifest):
+ if (manifest.namespace == '' and
+ features.LIBRARY_SUPPORT and
+ namespace_name == app.config['LIBRARY_NAMESPACE']):
+ pass
+ elif manifest.namespace != namespace_name:
+ raise NameInvalid()
+ if manifest.repo_name != repo_name:
+ raise NameInvalid()
+
+ # Ensure that the repository exists.
+ repo = model.get_repository(namespace_name, repo_name)
+ if repo is None:
+ raise NameInvalid()
+
+ if not manifest.layers:
+ logger.info("manifest provided with no layers")
+ raise ManifestInvalid(detail={'message': 'manifest does not reference any layers'})
+
+ # Ensure all the blobs in the manifest exist.
+ storage_map = model.lookup_blobs_by_digest(namespace_name, repo_name, manifest.checksums)
+ for layer in manifest.layers:
+ digest_str = str(layer.digest)
+ if digest_str not in storage_map:
+ raise BlobUnknown(detail={'digest': digest_str})
+
+ # Lookup all the images and their parent images (if any) inside the manifest.
+ # This will let us know which v1 images we need to synthesize and which ones are invalid.
+ all_image_ids = list(manifest.parent_image_ids | manifest.image_ids)
+ images_map = model.get_docker_v1_metadata_by_image_id(namespace_name, repo_name, all_image_ids)
+
+ # Rewrite any v1 image IDs that do not match the checksum in the database.
try:
- return parse_manifest_from_bytes(Bytes.for_string_or_unicode(request.data), content_type)
+ rewritten_images = list(manifest.rewrite_invalid_image_ids(images_map))
+ for rewritten_image in rewritten_images:
+ model.synthesize_v1_image(
+ repo,
+ storage_map[rewritten_image.content_checksum],
+ rewritten_image.image_id,
+ rewritten_image.created,
+ rewritten_image.comment,
+ rewritten_image.command,
+ rewritten_image.compat_json,
+ rewritten_image.parent_image_id,
+ )
except ManifestException as me:
- logger.exception("failed to parse manifest when writing by tagname")
- raise ManifestInvalid(detail={'message': 'failed to parse manifest: %s' % me})
+ logger.exception("exception when rewriting v1 metadata")
+ raise ManifestInvalid(detail={'message': 'failed synthesizing v1 metadata: %s' % me.message})
+
+ # Store the manifest pointing to the tag.
+ leaf_layer_id = rewritten_images[-1].image_id
+ newly_created = model.save_manifest(namespace_name, repo_name, manifest.tag, leaf_layer_id,
+ manifest.digest, manifest.bytes)
+ if newly_created:
+ labels = []
+ for key, value in manifest.layers[-1].v1_metadata.labels.iteritems():
+ media_type = 'application/json' if is_json(value) else 'text/plain'
+ labels.append(Label(key=key, value=value, source_type='manifest', media_type=media_type))
+ model.create_manifest_labels(namespace_name, repo_name, manifest.digest, labels)
+
+ return repo, storage_map
-@v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['DELETE'])
-@parse_repository_name()
-@process_registry_jwt_auth(scopes=['pull', 'push'])
-@require_repo_write
-@anon_protect
-@check_readonly
-def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref):
- """
- Delete the manifest specified by the digest.
-
- Note: there is no equivalent method for deleting by tag name because it is
- forbidden by the spec.
- """
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
- raise NameUnknown()
-
- manifest = registry_model.lookup_manifest_by_digest(repository_ref, manifest_ref)
- if manifest is None:
- raise ManifestUnknown()
-
- tags = registry_model.delete_tags_for_manifest(manifest)
- if not tags:
- raise ManifestUnknown()
-
- for tag in tags:
- track_and_log('delete_tag', repository_ref, tag=tag.name, digest=manifest_ref)
-
- return Response(status=202)
-
-
-def _write_manifest_and_log(namespace_name, repo_name, tag_name, manifest_impl):
- repository_ref, manifest, tag = _write_manifest(namespace_name, repo_name, tag_name,
- manifest_impl)
+def _write_manifest_and_log(namespace_name, repo_name, manifest):
+ repo, storage_map = _write_manifest(namespace_name, repo_name, manifest)
# Queue all blob manifests for replication.
if features.STORAGE_REPLICATION:
- blobs = registry_model.get_manifest_local_blobs(manifest)
- if blobs is None:
- logger.error('Could not lookup blobs for manifest `%s`', manifest.digest)
- else:
- with queue_replication_batch(namespace_name) as queue_storage_replication:
- for blob_digest in blobs:
- queue_storage_replication(blob_digest)
+ with queue_replication_batch(namespace_name) as queue_storage_replication:
+ for layer in manifest.layers:
+ digest_str = str(layer.digest)
+ queue_storage_replication(storage_map[digest_str])
- track_and_log('push_repo', repository_ref, tag=tag_name)
- spawn_notification(repository_ref, 'repo_push', {'updated_tags': [tag_name]})
+ track_and_log('push_repo', repo, tag=manifest.tag)
+ spawn_notification(repo, 'repo_push', {'updated_tags': [manifest.tag]})
metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, 'v2', True])
return Response(
@@ -278,47 +212,63 @@ def _write_manifest_and_log(namespace_name, repo_name, tag_name, manifest_impl):
status=202,
headers={
'Docker-Content-Digest': manifest.digest,
- 'Location':
- url_for('v2.fetch_manifest_by_digest',
- repository='%s/%s' % (namespace_name, repo_name),
- manifest_ref=manifest.digest),
+ 'Location': url_for('v2.fetch_manifest_by_digest',
+ repository='%s/%s' % (namespace_name, repo_name),
+ manifest_ref=manifest.digest),
},
)
-def _write_manifest(namespace_name, repo_name, tag_name, manifest_impl):
- # NOTE: These extra checks are needed for schema version 1 because the manifests
- # contain the repo namespace, name and tag name.
- if manifest_impl.schema_version == 1:
- if (manifest_impl.namespace == '' and features.LIBRARY_SUPPORT and
- namespace_name == app.config['LIBRARY_NAMESPACE']):
- pass
- elif manifest_impl.namespace != namespace_name:
- raise NameInvalid()
+@v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['DELETE'])
+@parse_repository_name()
+@process_registry_jwt_auth(scopes=['pull', 'push'])
+@require_repo_write
+@anon_protect
+def delete_manifest_by_digest(namespace_name, repo_name, manifest_ref):
+ """
+ Delete the manifest specified by the digest.
- if manifest_impl.repo_name != repo_name:
- raise NameInvalid()
+ Note: there is no equivalent method for deleting by tag name because it is
+ forbidden by the spec.
+ """
+ tags = model.delete_manifest_by_digest(namespace_name, repo_name, manifest_ref)
+ if not tags:
+ raise ManifestUnknown()
- try:
- if not manifest_impl.layers:
- raise ManifestInvalid(detail={'message': 'manifest does not reference any layers'})
- except ManifestException as me:
- raise ManifestInvalid(detail={'message': str(me)})
+ for tag in tags:
+ track_and_log('delete_tag', tag.repository, tag=tag.name, digest=manifest_ref)
- # Ensure that the repository exists.
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
- raise NameUnknown()
+ return Response(status=202)
- # Create the manifest(s) and retarget the tag to point to it.
- try:
- manifest, tag = registry_model.create_manifest_and_retarget_tag(repository_ref, manifest_impl,
- tag_name, storage,
- raise_on_error=True)
- except CreateManifestException as cme:
- raise ManifestInvalid(detail={'message': str(cme)})
- if manifest is None:
- raise ManifestInvalid()
+def _generate_and_store_manifest(namespace_name, repo_name, tag_name):
+ # Find the v1 metadata for this image and its parents.
+ v1_metadata = model.get_docker_v1_metadata_by_tag(namespace_name, repo_name, tag_name)
+ parents_v1_metadata = model.get_parents_docker_v1_metadata(namespace_name, repo_name,
+ v1_metadata.image_id)
- return repository_ref, manifest, tag
+ # If the manifest is being generated under the library namespace, then we make its namespace
+ # empty.
+ manifest_namespace = namespace_name
+ if features.LIBRARY_SUPPORT and namespace_name == app.config['LIBRARY_NAMESPACE']:
+ manifest_namespace = ''
+
+ # Create and populate the manifest builder
+ builder = DockerSchema1ManifestBuilder(manifest_namespace, repo_name, tag_name)
+
+ # Add the leaf layer
+ builder.add_layer(v1_metadata.content_checksum, v1_metadata.compat_json)
+
+ for parent_v1_metadata in parents_v1_metadata:
+ builder.add_layer(parent_v1_metadata.content_checksum, parent_v1_metadata.compat_json)
+
+ # Sign the manifest with our signing key.
+ manifest = builder.build(docker_v2_signing_key)
+
+ # Write the manifest to the DB.
+ model.create_manifest_and_update_tag(namespace_name, repo_name, tag_name, manifest.digest,
+ manifest.bytes)
+ return manifest
+
+def _determine_media_type(value):
+ media_type_name = 'application/json' if is_json(value) else 'text/plain'
diff --git a/endpoints/v2/tag.py b/endpoints/v2/tag.py
index 779a78351..6b1ce20ad 100644
--- a/endpoints/v2/tag.py
+++ b/endpoints/v2/tag.py
@@ -1,12 +1,11 @@
from flask import jsonify
-from app import model_cache
from auth.registry_jwt_auth import process_registry_jwt_auth
-from data.registry_model import registry_model
-from endpoints.decorators import anon_protect, parse_repository_name
+from endpoints.common import parse_repository_name
from endpoints.v2 import v2_bp, require_repo_read, paginate
from endpoints.v2.errors import NameUnknown
-
+from endpoints.decorators import anon_protect
+from data.interfaces.v2 import pre_oci_model as model
@v2_bp.route('//tags/list', methods=['GET'])
@parse_repository_name()
@@ -14,19 +13,16 @@ from endpoints.v2.errors import NameUnknown
@require_repo_read
@anon_protect
@paginate()
-def list_all_tags(namespace_name, repo_name, start_id, limit, pagination_callback):
- repository_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repository_ref is None:
+def list_all_tags(namespace_name, repo_name, limit, offset, pagination_callback):
+ repo = model.get_repository(namespace_name, repo_name)
+ if repo is None:
raise NameUnknown()
- # NOTE: We add 1 to the limit because that's how pagination_callback knows if there are
- # additional tags.
- tags = registry_model.lookup_cached_active_repository_tags(model_cache, repository_ref, start_id,
- limit + 1)
+ tags = model.repository_tags(namespace_name, repo_name, limit, offset)
response = jsonify({
'name': '{0}/{1}'.format(namespace_name, repo_name),
- 'tags': [tag.name for tag in tags][0:limit],
+ 'tags': [tag.name for tag in tags],
})
- pagination_callback(tags, response)
+ pagination_callback(len(tags), response)
return response
diff --git a/endpoints/v2/test/test_blob.py b/endpoints/v2/test/test_blob.py
deleted file mode 100644
index cd3b0932d..000000000
--- a/endpoints/v2/test/test_blob.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import hashlib
-import pytest
-
-from mock import patch
-from flask import url_for
-from playhouse.test_utils import assert_query_count
-
-from app import instance_keys, app as realapp
-from auth.auth_context_type import ValidatedAuthContext
-from data import model
-from data.cache import InMemoryDataModelCache
-from data.database import ImageStorageLocation
-from endpoints.test.shared import conduct_call
-from util.security.registry_jwt import generate_bearer_token, build_context_and_subject
-from test.fixtures import *
-
-@pytest.mark.parametrize('method, endpoint', [
- ('GET', 'download_blob'),
- ('HEAD', 'check_blob_exists'),
-])
-def test_blob_caching(method, endpoint, client, app):
- digest = 'sha256:' + hashlib.sha256("a").hexdigest()
- location = ImageStorageLocation.get(name='local_us')
- model.blob.store_blob_record_and_temp_link('devtable', 'simple', digest, location, 1, 10000000)
-
- params = {
- 'repository': 'devtable/simple',
- 'digest': digest,
- }
-
- user = model.user.get_user('devtable')
- access = [{
- 'type': 'repository',
- 'name': 'devtable/simple',
- 'actions': ['pull'],
- }]
-
- context, subject = build_context_and_subject(ValidatedAuthContext(user=user))
- token = generate_bearer_token(realapp.config['SERVER_HOSTNAME'], subject, context, access, 600,
- instance_keys)
-
- headers = {
- 'Authorization': 'Bearer %s' % token,
- }
-
- # Run without caching to make sure the request works. This also preloads some of
- # our global model caches.
- conduct_call(client, 'v2.' + endpoint, url_for, method, params, expected_code=200,
- headers=headers)
-
- with patch('endpoints.v2.blob.model_cache', InMemoryDataModelCache()):
- # First request should make a DB query to retrieve the blob.
- conduct_call(client, 'v2.' + endpoint, url_for, method, params, expected_code=200,
- headers=headers)
-
- # Subsequent requests should use the cached blob.
- with assert_query_count(0):
- conduct_call(client, 'v2.' + endpoint, url_for, method, params, expected_code=200,
- headers=headers)
-
-@pytest.mark.parametrize('mount_digest, source_repo, username, expect_success', [
- # Unknown blob.
- ('sha256:unknown', 'devtable/simple', 'devtable', False),
-
- # Blob not in repo.
- ('sha256:' + hashlib.sha256("a").hexdigest(), 'devtable/complex', 'devtable', False),
-
- # Blob in repo.
- ('sha256:' + hashlib.sha256("b").hexdigest(), 'devtable/complex', 'devtable', True),
-
- # No access to repo.
- ('sha256:' + hashlib.sha256("b").hexdigest(), 'devtable/complex', 'public', False),
-
- # Public repo.
- ('sha256:' + hashlib.sha256("c").hexdigest(), 'public/publicrepo', 'devtable', True),
-])
-def test_blob_mounting(mount_digest, source_repo, username, expect_success, client, app):
- location = ImageStorageLocation.get(name='local_us')
-
- # Store and link some blobs.
- digest = 'sha256:' + hashlib.sha256("a").hexdigest()
- model.blob.store_blob_record_and_temp_link('devtable', 'simple', digest, location, 1, 10000000)
-
- digest = 'sha256:' + hashlib.sha256("b").hexdigest()
- model.blob.store_blob_record_and_temp_link('devtable', 'complex', digest, location, 1, 10000000)
-
- digest = 'sha256:' + hashlib.sha256("c").hexdigest()
- model.blob.store_blob_record_and_temp_link('public', 'publicrepo', digest, location, 1, 10000000)
-
- params = {
- 'repository': 'devtable/building',
- 'mount': mount_digest,
- 'from': source_repo,
- }
-
- user = model.user.get_user(username)
- access = [{
- 'type': 'repository',
- 'name': 'devtable/building',
- 'actions': ['pull', 'push'],
- }]
-
- if source_repo.find(username) == 0:
- access.append({
- 'type': 'repository',
- 'name': source_repo,
- 'actions': ['pull'],
- })
-
- context, subject = build_context_and_subject(ValidatedAuthContext(user=user))
- token = generate_bearer_token(realapp.config['SERVER_HOSTNAME'], subject, context, access, 600,
- instance_keys)
-
- headers = {
- 'Authorization': 'Bearer %s' % token,
- }
-
- expected_code = 201 if expect_success else 202
- conduct_call(client, 'v2.start_blob_upload', url_for, 'POST', params, expected_code=expected_code,
- headers=headers)
-
- if expect_success:
- # Ensure the blob now exists under the repo.
- model.blob.get_repo_blob_by_digest('devtable', 'building', mount_digest)
- else:
- with pytest.raises(model.blob.BlobDoesNotExist):
- model.blob.get_repo_blob_by_digest('devtable', 'building', mount_digest)
diff --git a/endpoints/v2/test/test_manifest.py b/endpoints/v2/test/test_manifest.py
deleted file mode 100644
index 960501052..000000000
--- a/endpoints/v2/test/test_manifest.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import hashlib
-import pytest
-import time
-
-from mock import patch
-
-from flask import url_for
-from playhouse.test_utils import count_queries
-
-from app import instance_keys, app as realapp
-from auth.auth_context_type import ValidatedAuthContext
-from data import model
-from endpoints.test.shared import conduct_call
-from util.security.registry_jwt import generate_bearer_token, build_context_and_subject
-from test.fixtures import *
-
-def test_e2e_query_count_manifest_norewrite(client, app):
- tag_manifest = model.tag.load_tag_manifest('devtable', 'simple', 'latest')
-
- params = {
- 'repository': 'devtable/simple',
- 'manifest_ref': tag_manifest.digest,
- }
-
- user = model.user.get_user('devtable')
- access = [{
- 'type': 'repository',
- 'name': 'devtable/simple',
- 'actions': ['pull', 'push'],
- }]
-
- context, subject = build_context_and_subject(ValidatedAuthContext(user=user))
- token = generate_bearer_token(realapp.config['SERVER_HOSTNAME'], subject, context, access, 600,
- instance_keys)
-
- headers = {
- 'Authorization': 'Bearer %s' % token,
- }
-
- # Conduct a call to prime the instance key and other caches.
- conduct_call(client, 'v2.write_manifest_by_digest', url_for, 'PUT', params, expected_code=202,
- headers=headers, raw_body=tag_manifest.json_data)
-
- timecode = time.time()
- def get_time():
- return timecode + 10
-
- with patch('time.time', get_time):
- # Necessary in order to have the tag updates not occur in the same second, which is the
- # granularity supported currently.
- with count_queries() as counter:
- conduct_call(client, 'v2.write_manifest_by_digest', url_for, 'PUT', params, expected_code=202,
- headers=headers, raw_body=tag_manifest.json_data)
-
- assert counter.count <= 27
diff --git a/endpoints/v2/test/test_manifest_cornercases.py b/endpoints/v2/test/test_manifest_cornercases.py
deleted file mode 100644
index b08242343..000000000
--- a/endpoints/v2/test/test_manifest_cornercases.py
+++ /dev/null
@@ -1,138 +0,0 @@
-import hashlib
-
-from contextlib import contextmanager
-
-from app import storage, docker_v2_signing_key
-from data import model, database
-from data.registry_model import registry_model
-from endpoints.v2.manifest import _write_manifest
-from image.docker.schema1 import DockerSchema1ManifestBuilder
-
-from test.fixtures import *
-
-
-ADMIN_ACCESS_USER = 'devtable'
-REPO = 'simple'
-FIRST_TAG = 'first'
-SECOND_TAG = 'second'
-THIRD_TAG = 'third'
-
-
-@contextmanager
-def set_tag_expiration_policy(namespace, expiration_s=0):
- namespace_user = model.user.get_user(namespace)
- model.user.change_user_tag_expiration(namespace_user, expiration_s)
- yield
-
-
-def _perform_cleanup():
- database.RepositoryTag.delete().where(database.RepositoryTag.hidden == True).execute()
- repo_object = model.repository.get_repository(ADMIN_ACCESS_USER, REPO)
- model.gc.garbage_collect_repo(repo_object)
-
-
-def test_missing_link(initialized_db):
- """ Tests for a corner case that could result in missing a link to a blob referenced by a
- manifest. The test exercises the case as follows:
-
- 1) Push a manifest of a single layer with a Docker ID `FIRST_ID`, pointing
- to blob `FIRST_BLOB`. The database should contain the tag referencing the layer, with
- no changed ID and the blob not being GCed.
-
- 2) Push a manifest of two layers:
-
- Layer 1: `FIRST_ID` with blob `SECOND_BLOB`: Will result in a new synthesized ID
- Layer 2: `SECOND_ID` with blob `THIRD_BLOB`: Will result in `SECOND_ID` pointing to the
- `THIRD_BLOB`, with a parent pointing to the new synthesized ID's layer.
-
- 3) Push a manifest of two layers:
-
- Layer 1: `THIRD_ID` with blob `FOURTH_BLOB`: Will result in a new `THIRD_ID` layer
- Layer 2: `FIRST_ID` with blob `THIRD_BLOB`: Since `FIRST_ID` already points to `SECOND_BLOB`,
- this will synthesize a new ID. With the current bug, the synthesized ID will match
- that of `SECOND_ID`, leaving `THIRD_ID` unlinked and therefore, after a GC, missing
- `FOURTH_BLOB`.
- """
- with set_tag_expiration_policy('devtable', 0):
- location_name = storage.preferred_locations[0]
- location = database.ImageStorageLocation.get(name=location_name)
-
- # Create first blob.
- first_blob_sha = 'sha256:' + hashlib.sha256("FIRST").hexdigest()
- model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, first_blob_sha, location, 0, 0, 0)
-
- # Push the first manifest.
- first_manifest = (DockerSchema1ManifestBuilder(ADMIN_ACCESS_USER, REPO, FIRST_TAG)
- .add_layer(first_blob_sha, '{"id": "first"}')
- .build(docker_v2_signing_key))
-
- _write_manifest(ADMIN_ACCESS_USER, REPO, FIRST_TAG, first_manifest)
-
- # Delete all temp tags and perform GC.
- _perform_cleanup()
-
- # Ensure that the first blob still exists, along with the first tag.
- assert model.blob.get_repo_blob_by_digest(ADMIN_ACCESS_USER, REPO, first_blob_sha) is not None
-
- repository_ref = registry_model.lookup_repository(ADMIN_ACCESS_USER, REPO)
- found_tag = registry_model.get_repo_tag(repository_ref, FIRST_TAG, include_legacy_image=True)
- assert found_tag is not None
- assert found_tag.legacy_image.docker_image_id == 'first'
-
- # Create the second and third blobs.
- second_blob_sha = 'sha256:' + hashlib.sha256("SECOND").hexdigest()
- third_blob_sha = 'sha256:' + hashlib.sha256("THIRD").hexdigest()
-
- model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, second_blob_sha, location, 0, 0, 0)
- model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, third_blob_sha, location, 0, 0, 0)
-
- # Push the second manifest.
- second_manifest = (DockerSchema1ManifestBuilder(ADMIN_ACCESS_USER, REPO, SECOND_TAG)
- .add_layer(third_blob_sha, '{"id": "second", "parent": "first"}')
- .add_layer(second_blob_sha, '{"id": "first"}')
- .build(docker_v2_signing_key))
-
- _write_manifest(ADMIN_ACCESS_USER, REPO, SECOND_TAG, second_manifest)
-
- # Delete all temp tags and perform GC.
- _perform_cleanup()
-
- # Ensure that the first and second blobs still exists, along with the second tag.
- assert registry_model.get_repo_blob_by_digest(repository_ref, first_blob_sha) is not None
- assert registry_model.get_repo_blob_by_digest(repository_ref, second_blob_sha) is not None
- assert registry_model.get_repo_blob_by_digest(repository_ref, third_blob_sha) is not None
-
- found_tag = registry_model.get_repo_tag(repository_ref, FIRST_TAG, include_legacy_image=True)
- assert found_tag is not None
- assert found_tag.legacy_image.docker_image_id == 'first'
-
- # Ensure the IDs have changed.
- found_tag = registry_model.get_repo_tag(repository_ref, SECOND_TAG, include_legacy_image=True)
- assert found_tag is not None
- assert found_tag.legacy_image.docker_image_id != 'second'
-
- # Create the fourth blob.
- fourth_blob_sha = 'sha256:' + hashlib.sha256("FOURTH").hexdigest()
- model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, fourth_blob_sha, location, 0, 0, 0)
-
- # Push the third manifest.
- third_manifest = (DockerSchema1ManifestBuilder(ADMIN_ACCESS_USER, REPO, THIRD_TAG)
- .add_layer(third_blob_sha, '{"id": "second", "parent": "first"}')
- .add_layer(fourth_blob_sha, '{"id": "first"}') # Note the change in BLOB from the second manifest.
- .build(docker_v2_signing_key))
-
- _write_manifest(ADMIN_ACCESS_USER, REPO, THIRD_TAG, third_manifest)
-
- # Delete all temp tags and perform GC.
- _perform_cleanup()
-
- # Ensure all blobs are present.
- assert registry_model.get_repo_blob_by_digest(repository_ref, first_blob_sha) is not None
- assert registry_model.get_repo_blob_by_digest(repository_ref, second_blob_sha) is not None
- assert registry_model.get_repo_blob_by_digest(repository_ref, third_blob_sha) is not None
- assert registry_model.get_repo_blob_by_digest(repository_ref, fourth_blob_sha) is not None
-
- # Ensure new synthesized IDs were created.
- second_tag = registry_model.get_repo_tag(repository_ref, SECOND_TAG, include_legacy_image=True)
- third_tag = registry_model.get_repo_tag(repository_ref, THIRD_TAG, include_legacy_image=True)
- assert second_tag.legacy_image.docker_image_id != third_tag.legacy_image.docker_image_id
diff --git a/endpoints/v2/test/test_v2_tuf.py b/endpoints/v2/test/test_v2_tuf.py
deleted file mode 100644
index efd0c0ce9..000000000
--- a/endpoints/v2/test/test_v2_tuf.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import pytest
-import flask
-
-from flask_principal import Identity, Principal
-from mock import Mock
-
-from auth import permissions
-from endpoints.v2.v2auth import _get_tuf_root
-from test import testconfig
-from util.security.registry_jwt import QUAY_TUF_ROOT, SIGNER_TUF_ROOT, DISABLED_TUF_ROOT
-
-
-
-def admin_identity(namespace, reponame):
- identity = Identity('admin')
- identity.provides.add(permissions._RepositoryNeed(namespace, reponame, 'admin'))
- identity.provides.add(permissions._OrganizationRepoNeed(namespace, 'admin'))
- return identity
-
-
-def write_identity(namespace, reponame):
- identity = Identity('writer')
- identity.provides.add(permissions._RepositoryNeed(namespace, reponame, 'write'))
- identity.provides.add(permissions._OrganizationRepoNeed(namespace, 'write'))
- return identity
-
-
-def read_identity(namespace, reponame):
- identity = Identity('reader')
- identity.provides.add(permissions._RepositoryNeed(namespace, reponame, 'read'))
- identity.provides.add(permissions._OrganizationRepoNeed(namespace, 'read'))
- return identity
-
-
-def app_with_principal():
- app = flask.Flask(__name__)
- app.config.from_object(testconfig.TestConfig())
- principal = Principal(app)
- return app, principal
-
-
-@pytest.mark.parametrize('identity,expected', [
- (Identity('anon'), QUAY_TUF_ROOT),
- (read_identity("namespace", "repo"), QUAY_TUF_ROOT),
- (read_identity("different", "repo"), QUAY_TUF_ROOT),
- (admin_identity("different", "repo"), QUAY_TUF_ROOT),
- (write_identity("different", "repo"), QUAY_TUF_ROOT),
- (admin_identity("namespace", "repo"), SIGNER_TUF_ROOT),
- (write_identity("namespace", "repo"), SIGNER_TUF_ROOT),
-])
-def test_get_tuf_root(identity, expected):
- app, principal = app_with_principal()
- with app.test_request_context('/'):
- principal.set_identity(identity)
- actual = _get_tuf_root(Mock(), "namespace", "repo")
- assert actual == expected, "should be %s, but was %s" % (expected, actual)
-
-
-@pytest.mark.parametrize('trust_enabled,tuf_root', [
- (True, QUAY_TUF_ROOT),
- (False, DISABLED_TUF_ROOT),
-])
-def test_trust_disabled(trust_enabled,tuf_root):
- app, principal = app_with_principal()
- with app.test_request_context('/'):
- principal.set_identity(read_identity("namespace", "repo"))
- actual = _get_tuf_root(Mock(trust_enabled=trust_enabled), "namespace", "repo")
- assert actual == tuf_root, "should be %s, but was %s" % (tuf_root, actual)
diff --git a/endpoints/v2/test/test_v2auth.py b/endpoints/v2/test/test_v2auth.py
deleted file mode 100644
index 60c8f34b1..000000000
--- a/endpoints/v2/test/test_v2auth.py
+++ /dev/null
@@ -1,150 +0,0 @@
-import base64
-
-from flask import url_for
-
-from app import instance_keys, app as original_app
-from data.model.user import regenerate_robot_token, get_robot_and_metadata, get_user
-from endpoints.test.shared import conduct_call
-from util.security.registry_jwt import decode_bearer_token, CLAIM_TUF_ROOTS
-
-from test.fixtures import *
-
-
-def get_robot_password(username):
- parent_name, robot_shortname = username.split('+', 1)
- parent = get_user(parent_name)
- _, token, _ = get_robot_and_metadata(robot_shortname, parent)
- return token
-
-
-@pytest.mark.parametrize('scope, username, password, expected_code, expected_scopes', [
- # Invalid repository.
- ('repository:devtable/simple/foo/bar/baz:pull', 'devtable', 'password', 400, []),
-
- # Invalid scopes.
- ('some_invalid_scope', 'devtable', 'password', 400, []),
-
- # Invalid credentials.
- ('repository:devtable/simple:pull', 'devtable', 'invalid', 401, []),
-
- # Valid credentials.
- ('repository:devtable/simple:pull', 'devtable', 'password', 200,
- ['devtable/simple:pull']),
-
- ('repository:devtable/simple:push', 'devtable', 'password', 200,
- ['devtable/simple:push']),
-
- ('repository:devtable/simple:pull,push', 'devtable', 'password', 200,
- ['devtable/simple:push,pull']),
-
- ('repository:devtable/simple:pull,push,*', 'devtable', 'password', 200,
- ['devtable/simple:push,pull,*']),
-
- ('repository:buynlarge/orgrepo:pull,push,*', 'devtable', 'password', 200,
- ['buynlarge/orgrepo:push,pull,*']),
-
- ('', 'devtable', 'password', 200, []),
-
- # No credentials, non-public repo.
- ('repository:devtable/simple:pull', None, None, 200, ['devtable/simple:']),
-
- # No credentials, public repo.
- ('repository:public/publicrepo:pull', None, None, 200, ['public/publicrepo:pull']),
-
- # Reader only.
- ('repository:buynlarge/orgrepo:pull,push,*', 'reader', 'password', 200,
- ['buynlarge/orgrepo:pull']),
-
- # Unknown repository.
- ('repository:devtable/unknownrepo:pull,push', 'devtable', 'password', 200,
- ['devtable/unknownrepo:push,pull']),
-
- # Unknown repository in another namespace.
- ('repository:somenamespace/unknownrepo:pull,push', 'devtable', 'password', 200,
- ['somenamespace/unknownrepo:']),
-
- # Disabled namespace.
- (['repository:devtable/simple:pull,push', 'repository:disabled/complex:pull'],
- 'devtable', 'password', 405,
- []),
-
- # Multiple scopes.
- (['repository:devtable/simple:pull,push', 'repository:devtable/complex:pull'],
- 'devtable', 'password', 200,
- ['devtable/simple:push,pull', 'devtable/complex:pull']),
-
- # Multiple scopes with restricted behavior.
- (['repository:devtable/simple:pull,push', 'repository:public/publicrepo:pull,push'],
- 'devtable', 'password', 200,
- ['devtable/simple:push,pull', 'public/publicrepo:pull']),
-
- (['repository:devtable/simple:pull,push,*', 'repository:public/publicrepo:pull,push,*'],
- 'devtable', 'password', 200,
- ['devtable/simple:push,pull,*', 'public/publicrepo:pull']),
-
- # Read Only State
- ('repository:devtable/readonly:pull,push,*', 'devtable', 'password', 200,
- ['devtable/readonly:pull']),
-
- # Mirror State as a typical User
- ('repository:devtable/mirrored:pull,push,*', 'devtable', 'password', 200,
- ['devtable/mirrored:pull']),
-
- # Mirror State as the robot User should have write access
- ('repository:devtable/mirrored:pull,push,*', 'devtable+dtrobot', get_robot_password, 200,
- ['devtable/mirrored:push,pull']),
-
- # Organization repository, org admin
- ('repository:buynlarge/orgrepo:pull,push,*', 'devtable', 'password', 200,
- ['buynlarge/orgrepo:push,pull,*']),
-
- # Organization repository, org creator
- ('repository:buynlarge/orgrepo:pull,push,*', 'creator', 'password', 200,
- ['buynlarge/orgrepo:']),
-
- # Organization repository, org reader
- ('repository:buynlarge/orgrepo:pull,push,*', 'reader', 'password', 200,
- ['buynlarge/orgrepo:pull']),
-
- # Organization repository, freshuser
- ('repository:buynlarge/orgrepo:pull,push,*', 'freshuser', 'password', 200,
- ['buynlarge/orgrepo:']),
-])
-def test_generate_registry_jwt(scope, username, password, expected_code, expected_scopes,
- app, client):
- params = {
- 'service': original_app.config['SERVER_HOSTNAME'],
- 'scope': scope,
- }
-
- if callable(password):
- password = password(username)
-
- headers = {}
- if username and password:
- headers['Authorization'] = 'Basic %s' % (base64.b64encode('%s:%s' % (username, password)))
-
- resp = conduct_call(client, 'v2.generate_registry_jwt', url_for, 'GET', params, {}, expected_code,
- headers=headers)
- if expected_code != 200:
- return
-
- token = resp.json['token']
- decoded = decode_bearer_token(token, instance_keys, original_app.config)
- assert decoded['iss'] == 'quay'
- assert decoded['aud'] == original_app.config['SERVER_HOSTNAME']
- assert decoded['sub'] == username if username else '(anonymous)'
-
- expected_access = []
- for scope in expected_scopes:
- name, actions_str = scope.split(':')
- actions = actions_str.split(',') if actions_str else []
-
- expected_access.append({
- 'type': 'repository',
- 'name': name,
- 'actions': actions,
- })
-
- assert decoded['access'] == expected_access
- assert len(decoded['context'][CLAIM_TUF_ROOTS]) == len(expected_scopes)
diff --git a/endpoints/v2/v2auth.py b/endpoints/v2/v2auth.py
index c3a6aa3ce..45f248961 100644
--- a/endpoints/v2/v2auth.py
+++ b/endpoints/v2/v2auth.py
@@ -1,43 +1,41 @@
import logging
import re
-from collections import namedtuple
-from cachetools.func import lru_cache
-from flask import request, jsonify
+from cachetools import lru_cache
+from flask import request, jsonify, abort
-import features
from app import app, userevents, instance_keys
-from auth.auth_context import get_authenticated_context, get_authenticated_user
-from auth.decorators import process_basic_auth
+from auth.auth_context import get_authenticated_user, get_validated_token, get_validated_oauth_token
from auth.permissions import (ModifyRepositoryPermission, ReadRepositoryPermission,
CreateRepositoryPermission, AdministerRepositoryPermission)
-from data import model
-from data.database import RepositoryState
-from data.registry_model import registry_model
-from data.registry_model.datatypes import RepositoryReference
-from data.model.repo_mirror import get_mirroring_robot
-from endpoints.decorators import anon_protect
+from auth.process import process_auth
from endpoints.v2 import v2_bp
-from endpoints.v2.errors import (InvalidLogin, NameInvalid, InvalidRequest, Unsupported,
- Unauthorized, NamespaceDisabled)
+from endpoints.decorators import anon_protect
+from data.interfaces.v2 import pre_oci_model as model
from util.cache import no_cache
from util.names import parse_namespace_repository, REPOSITORY_NAME_REGEX
-from util.security.registry_jwt import (generate_bearer_token, build_context_and_subject,
- QUAY_TUF_ROOT, SIGNER_TUF_ROOT, DISABLED_TUF_ROOT)
+from util.security.registry_jwt import generate_bearer_token, build_context_and_subject
+
logger = logging.getLogger(__name__)
-TOKEN_VALIDITY_LIFETIME_S = 60 * 60 # 1 hour
-SCOPE_REGEX_TEMPLATE = r'^repository:((?:{}\/)?((?:[\.a-zA-Z0-9_\-]+\/)*[\.a-zA-Z0-9_\-]+)):((?:push|pull|\*)(?:,(?:push|pull|\*))*)$'
-scopeResult = namedtuple('scopeResult', ['actions', 'namespace', 'repository', 'registry_and_repo',
- 'tuf_root'])
+TOKEN_VALIDITY_LIFETIME_S = 60 * 60 # 1 hour
+SCOPE_REGEX_TEMPLATE = r'^repository:((?:{}\/)?((?:[\.a-zA-Z0-9_\-]+\/)?[\.a-zA-Z0-9_\-]+)):((?:push|pull|\*)(?:,(?:push|pull|\*))*)$'
+
+
+@lru_cache(maxsize=1)
+def get_scope_regex():
+ hostname = re.escape(app.config['SERVER_HOSTNAME'])
+ scope_regex_string = SCOPE_REGEX_TEMPLATE.format(hostname)
+ return re.compile(scope_regex_string)
+
@v2_bp.route('/auth')
-@process_basic_auth
+@process_auth
@no_cache
@anon_protect
-def generate_registry_jwt(auth_result):
+def generate_registry_jwt():
"""
This endpoint will generate a JWT conforming to the Docker Registry v2 Auth Spec:
https://docs.docker.com/registry/spec/auth/token/
@@ -45,217 +43,121 @@ def generate_registry_jwt(auth_result):
audience_param = request.args.get('service')
logger.debug('Request audience: %s', audience_param)
- scope_params = request.args.getlist('scope') or []
- logger.debug('Scope request: %s', scope_params)
+ scope_param = request.args.get('scope') or ''
+ logger.debug('Scope request: %s', scope_param)
- auth_header = request.headers.get('authorization', '')
- auth_credentials_sent = bool(auth_header)
+ user = get_authenticated_user()
+ logger.debug('Authenticated user: %s', user)
- # Load the auth context and verify thatg we've directly received credentials.
- has_valid_auth_context = False
- if get_authenticated_context():
- has_valid_auth_context = not get_authenticated_context().is_anonymous
+ token = get_validated_token()
+ logger.debug('Authenticated token: %s', token)
- if auth_credentials_sent and not has_valid_auth_context:
+ oauthtoken = get_validated_oauth_token()
+ logger.debug('Authenticated OAuth token: %s', oauthtoken)
+
+ auth_credentials_sent = bool(request.headers.get('authorization', ''))
+ if auth_credentials_sent and not user and not token:
# The auth credentials sent for the user are invalid.
- raise InvalidLogin(auth_result.error_message)
+ logger.debug('Invalid auth credentials')
+ abort(401)
- if not has_valid_auth_context and len(scope_params) == 0:
- # In this case, we are doing an auth flow, and it's not an anonymous pull.
- logger.debug('No user and no token sent for empty scope list')
- raise Unauthorized()
-
- # Build the access list for the authenticated context.
access = []
- scope_results = []
- for scope_param in scope_params:
- scope_result = _authorize_or_downscope_request(scope_param, has_valid_auth_context)
- if scope_result is None:
- continue
-
- scope_results.append(scope_result)
- access.append({
- 'type': 'repository',
- 'name': scope_result.registry_and_repo,
- 'actions': scope_result.actions,
- })
-
- # Issue user events.
user_event_data = {
'action': 'login',
}
- # Set the user event data for when authed.
- if len(scope_results) > 0:
- if 'push' in scope_results[0].actions:
+ if len(scope_param) > 0:
+ match = get_scope_regex().match(scope_param)
+ if match is None:
+ logger.debug('Match: %s', match)
+ logger.debug('len: %s', len(scope_param))
+ logger.warning('Unable to decode repository and actions: %s', scope_param)
+ abort(400)
+
+ logger.debug('Match: %s', match.groups())
+
+ registry_and_repo = match.group(1)
+ namespace_and_repo = match.group(2)
+ actions = match.group(3).split(',')
+
+ lib_namespace = app.config['LIBRARY_NAMESPACE']
+ namespace, reponame = parse_namespace_repository(namespace_and_repo, lib_namespace)
+
+ # Ensure that we are never creating an invalid repository.
+ if not REPOSITORY_NAME_REGEX.match(reponame):
+ logger.debug('Found invalid repository name in auth flow: %s', reponame)
+ abort(400)
+
+ final_actions = []
+
+ if 'push' in actions:
+ # If there is no valid user or token, then the repository cannot be
+ # accessed.
+ if user is not None or token is not None:
+ # Lookup the repository. If it exists, make sure the entity has modify
+ # permission. Otherwise, make sure the entity has create permission.
+ repo = model.get_repository(namespace, reponame)
+ if repo:
+ if ModifyRepositoryPermission(namespace, reponame).can():
+ final_actions.append('push')
+ else:
+ logger.debug('No permission to modify repository %s/%s', namespace, reponame)
+ else:
+ if CreateRepositoryPermission(namespace).can() and user is not None:
+ logger.debug('Creating repository: %s/%s', namespace, reponame)
+ model.create_repository(namespace, reponame, user)
+ final_actions.append('push')
+ else:
+ logger.debug('No permission to create repository %s/%s', namespace, reponame)
+
+ if 'pull' in actions:
+ # Grant pull if the user can read the repo or it is public.
+ if (ReadRepositoryPermission(namespace, reponame).can() or
+ model.repository_is_public(namespace, reponame)):
+ final_actions.append('pull')
+ else:
+ logger.debug('No permission to pull repository %s/%s', namespace, reponame)
+
+ if '*' in actions:
+ # Grant * user is admin
+ if (AdministerRepositoryPermission(namespace, reponame).can()):
+ final_actions.append('*')
+ else:
+ logger.debug("No permission to administer repository %s/%s", namespace, reponame)
+
+ # Add the access for the JWT.
+ access.append({
+ 'type': 'repository',
+ 'name': registry_and_repo,
+ 'actions': final_actions,
+ })
+
+ # Set the user event data for the auth.
+ if 'push' in final_actions:
user_action = 'push_start'
- elif 'pull' in scope_results[0].actions:
+ elif 'pull' in final_actions:
user_action = 'pull_start'
else:
user_action = 'login'
user_event_data = {
'action': user_action,
- 'namespace': scope_results[0].namespace,
- 'repository': scope_results[0].repository,
+ 'repository': reponame,
+ 'namespace': namespace,
}
+ elif user is None and token is None:
+ # In this case, we are doing an auth flow, and it's not an anonymous pull
+ logger.debug('No user and no token sent for empty scope list')
+ abort(401)
+
# Send the user event.
- if get_authenticated_user() is not None:
- event = userevents.get_event(get_authenticated_user().username)
+ if user is not None:
+ event = userevents.get_event(user.username)
event.publish_event_data('docker-cli', user_event_data)
# Build the signed JWT.
- tuf_roots = {'%s/%s' % (scope_result.namespace, scope_result.repository): scope_result.tuf_root
- for scope_result in scope_results}
- context, subject = build_context_and_subject(get_authenticated_context(), tuf_roots=tuf_roots)
+ context, subject = build_context_and_subject(user, token, oauthtoken)
token = generate_bearer_token(audience_param, subject, context, access,
TOKEN_VALIDITY_LIFETIME_S, instance_keys)
return jsonify({'token': token})
-
-
-@lru_cache(maxsize=1)
-def _get_scope_regex():
- hostname = re.escape(app.config['SERVER_HOSTNAME'])
- scope_regex_string = SCOPE_REGEX_TEMPLATE.format(hostname)
- return re.compile(scope_regex_string)
-
-
-def _get_tuf_root(repository_ref, namespace, reponame):
- if not features.SIGNING or repository_ref is None or not repository_ref.trust_enabled:
- return DISABLED_TUF_ROOT
-
- # Users with write access to a repository will see signer-rooted TUF metadata
- if ModifyRepositoryPermission(namespace, reponame).can():
- return SIGNER_TUF_ROOT
- return QUAY_TUF_ROOT
-
-
-def _authorize_or_downscope_request(scope_param, has_valid_auth_context):
- # TODO: The complexity of this function is difficult to follow and maintain. Refactor/Cleanup.
- if len(scope_param) == 0:
- if not has_valid_auth_context:
- # In this case, we are doing an auth flow, and it's not an anonymous pull.
- logger.debug('No user and no token sent for empty scope list')
- raise Unauthorized()
-
- return None
-
- match = _get_scope_regex().match(scope_param)
- if match is None:
- logger.debug('Match: %s', match)
- logger.debug('len: %s', len(scope_param))
- logger.warning('Unable to decode repository and actions: %s', scope_param)
- raise InvalidRequest('Unable to decode repository and actions: %s' % scope_param)
-
- logger.debug('Match: %s', match.groups())
-
- registry_and_repo = match.group(1)
- namespace_and_repo = match.group(2)
- requested_actions = match.group(3).split(',')
-
- lib_namespace = app.config['LIBRARY_NAMESPACE']
- namespace, reponame = parse_namespace_repository(namespace_and_repo, lib_namespace)
-
- # Ensure that we are never creating an invalid repository.
- if not REPOSITORY_NAME_REGEX.match(reponame):
- logger.debug('Found invalid repository name in auth flow: %s', reponame)
- if len(namespace_and_repo.split('/')) > 1:
- msg = 'Nested repositories are not supported. Found: %s' % namespace_and_repo
- raise NameInvalid(message=msg)
-
- raise NameInvalid(message='Invalid repository name: %s' % namespace_and_repo)
-
- # Ensure the namespace is enabled.
- if registry_model.is_existing_disabled_namespace(namespace):
- msg = 'Namespace %s has been disabled. Please contact a system administrator.' % namespace
- raise NamespaceDisabled(message=msg)
-
- final_actions = []
-
- repository_ref = registry_model.lookup_repository(namespace, reponame)
- repo_is_public = repository_ref is not None and repository_ref.is_public
- invalid_repo_message = ''
- if repository_ref is not None and repository_ref.kind != 'image':
- invalid_repo_message = ((
- 'This repository is for managing %s ' + 'and not container images.') % repository_ref.kind)
-
- if 'push' in requested_actions:
- # Check if there is a valid user or token, as otherwise the repository cannot be
- # accessed.
- if has_valid_auth_context:
- user = get_authenticated_user()
-
- # Lookup the repository. If it exists, make sure the entity has modify
- # permission. Otherwise, make sure the entity has create permission.
- if repository_ref:
- if ModifyRepositoryPermission(namespace, reponame).can():
- if repository_ref is not None and repository_ref.kind != 'image':
- raise Unsupported(message=invalid_repo_message)
-
- # Check for different repository states.
- if repository_ref.state == RepositoryState.NORMAL:
- # In NORMAL mode, if the user has permission, then they can push.
- final_actions.append('push')
- elif repository_ref.state == RepositoryState.MIRROR:
- # In MIRROR mode, only the mirroring robot can push.
- mirror = model.repo_mirror.get_mirror(repository_ref.id)
- robot = mirror.internal_robot if mirror is not None else None
- if robot is not None and user is not None and robot == user:
- assert robot.robot
- final_actions.append('push')
- else:
- logger.debug('Repository %s/%s push requested for non-mirror robot %s: %s', namespace,
- reponame, robot, user)
- elif repository_ref.state == RepositoryState.READ_ONLY:
- # No pushing allowed in read-only state.
- pass
- else:
- logger.warning('Unknown state for repository %s: %s', repository_ref, repository_ref.state)
- else:
- logger.debug('No permission to modify repository %s/%s', namespace, reponame)
- else:
- # TODO: Push-to-create functionality should be configurable
- if CreateRepositoryPermission(namespace).can() and user is not None:
- logger.debug('Creating repository: %s/%s', namespace, reponame)
- repository_ref = RepositoryReference.for_repo_obj(
- model.repository.create_repository(namespace, reponame, user))
- final_actions.append('push')
- else:
- logger.debug('No permission to create repository %s/%s', namespace, reponame)
-
- if 'pull' in requested_actions:
- # Grant pull if the user can read the repo or it is public.
- if ReadRepositoryPermission(namespace, reponame).can() or repo_is_public:
- if repository_ref is not None and repository_ref.kind != 'image':
- raise Unsupported(message=invalid_repo_message)
-
- final_actions.append('pull')
- else:
- logger.debug('No permission to pull repository %s/%s', namespace, reponame)
-
- if '*' in requested_actions:
- # Grant * user is admin
- if AdministerRepositoryPermission(namespace, reponame).can():
- if repository_ref is not None and repository_ref.kind != 'image':
- raise Unsupported(message=invalid_repo_message)
-
- if repository_ref and repository_ref.state in (RepositoryState.MIRROR,
- RepositoryState.READ_ONLY):
- logger.debug('No permission to administer repository %s/%s', namespace, reponame)
- else:
- assert repository_ref.state == RepositoryState.NORMAL
- final_actions.append('*')
- else:
- logger.debug("No permission to administer repository %s/%s", namespace, reponame)
-
- # Final sanity checks.
- if 'push' in final_actions:
- assert repository_ref.state != RepositoryState.READ_ONLY
-
- if '*' in final_actions:
- assert repository_ref.state == RepositoryState.NORMAL
-
- return scopeResult(actions=final_actions, namespace=namespace, repository=reponame,
- registry_and_repo=registry_and_repo,
- tuf_root=_get_tuf_root(repository_ref, namespace, reponame))
diff --git a/endpoints/verbs/__init__.py b/endpoints/verbs/__init__.py
index 1a7898ab8..ff2c28f76 100644
--- a/endpoints/verbs/__init__.py
+++ b/endpoints/verbs/__init__.py
@@ -1,51 +1,37 @@
-import hashlib
-import json
import logging
-import uuid
+import hashlib
from flask import redirect, Blueprint, abort, send_file, make_response, request
import features
-from app import app, signer, storage, metric_queue, config_provider, ip_resolver, instance_keys
+from app import app, signer, storage, metric_queue, license_validator
from auth.auth_context import get_authenticated_user
-from auth.decorators import process_auth
from auth.permissions import ReadRepositoryPermission
+from auth.process import process_auth
from data import database
-from data import model
-from data.registry_model import registry_model
-from endpoints.decorators import (anon_protect, anon_allowed, route_show_if, parse_repository_name,
- check_region_blacklisted)
+from data.interfaces.verbs import pre_oci_model as model
+from endpoints.common import route_show_if, parse_repository_name
+from endpoints.decorators import anon_protect
+from endpoints.trackhelper import track_and_log
from endpoints.v2.blob import BLOB_DIGEST_ROUTE
from image.appc import AppCImageFormatter
-from image.docker import ManifestException
from image.docker.squashed import SquashedDockerImageFormatter
from storage import Storage
-from util.audit import track_and_log, wrap_repository
-from util.http import exact_abort
from util.registry.filelike import wrap_with_handler
from util.registry.queuefile import QueueFile
from util.registry.queueprocess import QueueProcess
-from util.registry.tarlayerformat import TarLayerFormatterReporter
from util.registry.torrent import (make_torrent, per_user_torrent_filename, public_torrent_filename,
- PieceHasher, TorrentConfiguration)
+ PieceHasher)
+
logger = logging.getLogger(__name__)
verbs = Blueprint('verbs', __name__)
-
-LAYER_MIMETYPE = 'binary/octet-stream'
+license_validator.enforce_license_before_request(verbs)
-class VerbReporter(TarLayerFormatterReporter):
- def __init__(self, kind):
- self.kind = kind
-
- def report_pass(self, pass_count):
- metric_queue.verb_action_passes.Inc(labelvalues=[self.kind, pass_count])
-
-
-def _open_stream(formatter, tag, schema1_manifest, derived_image_id, handlers, reporter):
+def _open_stream(formatter, repo_image, tag, derived_image_id, handlers):
"""
This method generates a stream of data which will be replicated and read from the queue files.
This method runs in a separate process.
@@ -53,27 +39,24 @@ def _open_stream(formatter, tag, schema1_manifest, derived_image_id, handlers, r
# For performance reasons, we load the full image list here, cache it, then disconnect from
# the database.
with database.UseThenDisconnect(app.config):
- layers = registry_model.list_parsed_manifest_layers(tag.repository, schema1_manifest, storage,
- include_placements=True)
+ image_list = list(model.get_manifest_layers_with_blobs(repo_image))
- def image_stream_getter(store, blob):
- def get_stream_for_storage():
- current_image_stream = store.stream_read_file(blob.placements, blob.storage_path)
- logger.debug('Returning blob %s: %s', blob.digest, blob.storage_path)
- return current_image_stream
- return get_stream_for_storage
+ def get_next_image():
+ for current_image in image_list:
+ yield current_image
- def tar_stream_getter_iterator():
+ def get_next_layer():
# Re-Initialize the storage engine because some may not respond well to forking (e.g. S3)
- store = Storage(app, metric_queue, config_provider=config_provider, ip_resolver=ip_resolver)
+ store = Storage(app, metric_queue)
+ for current_image in image_list:
+ current_image_path = model.get_blob_path(current_image.blob)
+ current_image_stream = store.stream_read_file(current_image.blob.locations,
+ current_image_path)
- # Note: We reverse because we have to start at the leaf layer and move upward,
- # as per the spec for the formatters.
- for layer in reversed(layers):
- yield image_stream_getter(store, layer.blob)
+ logger.debug('Returning image layer %s: %s', current_image.image_id, current_image_path)
+ yield current_image_stream
- stream = formatter.build_stream(tag, schema1_manifest, derived_image_id, layers,
- tar_stream_getter_iterator, reporter=reporter)
+ stream = formatter.build_stream(repo_image, tag, derived_image_id, get_next_image, get_next_layer)
for handler_fn in handlers:
stream = wrap_with_handler(stream, handler_fn)
@@ -88,40 +71,32 @@ def _sign_derived_image(verb, derived_image, queue_file):
try:
signature = signer.detached_sign(queue_file)
except:
- logger.exception('Exception when signing %s deriving image %s', verb, derived_image)
+ logger.exception('Exception when signing %s deriving image %s', verb, derived_image.ref)
return
# Setup the database (since this is a new process) and then disconnect immediately
# once the operation completes.
if not queue_file.raised_exception:
with database.UseThenDisconnect(app.config):
- registry_model.set_derived_image_signature(derived_image, signer.name, signature)
+ model.set_derived_image_signature(derived_image, signer.name, signature)
def _write_derived_image_to_storage(verb, derived_image, queue_file):
""" Read from the generated stream and write it back to the storage engine. This method runs in a
separate process.
"""
-
def handle_exception(ex):
- logger.debug('Exception when building %s derived image %s: %s', verb, derived_image, ex)
+ logger.debug('Exception when building %s derived image %s: %s', verb, derived_image.ref, ex)
with database.UseThenDisconnect(app.config):
- registry_model.delete_derived_image(derived_image)
+ model.delete_derived_image(derived_image)
queue_file.add_exception_handler(handle_exception)
# Re-Initialize the storage engine because some may not respond well to forking (e.g. S3)
- store = Storage(app, metric_queue, config_provider=config_provider, ip_resolver=ip_resolver)
-
- try:
- store.stream_write(derived_image.blob.placements, derived_image.blob.storage_path, queue_file)
- except IOError as ex:
- logger.debug('Exception when writing %s derived image %s: %s', verb, derived_image, ex)
-
- with database.UseThenDisconnect(app.config):
- registry_model.delete_derived_image(derived_image)
-
+ store = Storage(app, metric_queue)
+ image_path = model.get_blob_path(derived_image.blob)
+ store.stream_write(derived_image.blob.locations, image_path, queue_file)
queue_file.close()
@@ -130,23 +105,21 @@ def _torrent_for_blob(blob, is_public):
with an error if the state is not valid (e.g. non-public, non-user request).
"""
# Make sure the storage has a size.
- if not blob.compressed_size:
+ if not blob.size:
abort(404)
# Lookup the torrent information for the storage.
- torrent_info = registry_model.get_torrent_info(blob)
+ torrent_info = model.get_torrent_info(blob)
if torrent_info is None:
abort(404)
# Lookup the webseed path for the storage.
- webseed = storage.get_direct_download_url(blob.placements, blob.storage_path,
+ path = model.get_blob_path(blob)
+ webseed = storage.get_direct_download_url(blob.locations, path,
expires_in=app.config['BITTORRENT_WEBSEED_LIFETIME'])
if webseed is None:
# We cannot support webseeds for storages that cannot provide direct downloads.
- exact_abort(501, 'Storage engine does not support seeding.')
-
- # Load the config for building torrents.
- torrent_config = TorrentConfiguration.from_app_config(instance_keys, app.config)
+ abort(make_response('Storage engine does not support seeding.', 501))
# Build the filename for the torrent.
if is_public:
@@ -156,20 +129,19 @@ def _torrent_for_blob(blob, is_public):
if not user:
abort(403)
- name = per_user_torrent_filename(torrent_config, user.uuid, blob.uuid)
+ name = per_user_torrent_filename(user.uuid, blob.uuid)
# Return the torrent file.
- torrent_file = make_torrent(torrent_config, name, webseed, blob.compressed_size,
- torrent_info.piece_length, torrent_info.pieces)
+ torrent_file = make_torrent(name, webseed, blob.size, torrent_info.piece_length,
+ torrent_info.pieces)
- headers = {
- 'Content-Type': 'application/x-bittorrent',
- 'Content-Disposition': 'attachment; filename={0}.torrent'.format(name)}
+ headers = {'Content-Type': 'application/x-bittorrent',
+ 'Content-Disposition': 'attachment; filename={0}.torrent'.format(name)}
return make_response(torrent_file, 200, headers)
-def _torrent_repo_verb(repository, tag, manifest, verb, **kwargs):
+def _torrent_repo_verb(repo_image, tag, verb, **kwargs):
""" Handles returning a torrent for the given verb on the given image and tag. """
if not features.BITTORRENT:
# Torrent feature is not enabled.
@@ -177,89 +149,45 @@ def _torrent_repo_verb(repository, tag, manifest, verb, **kwargs):
# Lookup an *existing* derived storage for the verb. If the verb's image storage doesn't exist,
# we cannot create it here, so we 406.
- derived_image = registry_model.lookup_derived_image(manifest, verb, storage,
- varying_metadata={'tag': tag.name},
- include_placements=True)
+ derived_image = model.lookup_derived_image(repo_image, verb, varying_metadata={'tag': tag})
if derived_image is None:
abort(406)
# Return the torrent.
- torrent = _torrent_for_blob(derived_image.blob, model.repository.is_repository_public(repository))
+ public_repo = model.repository_is_public(repo_image.repository.namespace_name,
+ repo_image.repository.name)
+ torrent = _torrent_for_blob(derived_image.blob, public_repo)
# Log the action.
- track_and_log('repo_verb', wrap_repository(repository), tag=tag.name, verb=verb, torrent=True,
- **kwargs)
+ track_and_log('repo_verb', repo_image.repository, tag=tag, verb=verb, torrent=True, **kwargs)
return torrent
-def _verify_repo_verb(_, namespace, repo_name, tag_name, verb, checker=None):
- permission = ReadRepositoryPermission(namespace, repo_name)
- repo = model.repository.get_repository(namespace, repo_name)
- repo_is_public = repo is not None and model.repository.is_repository_public(repo)
- if not permission.can() and not repo_is_public:
- logger.debug('No permission to read repository %s/%s for user %s with verb %s', namespace,
- repo_name, get_authenticated_user(), verb)
+def _verify_repo_verb(_, namespace, repository, tag, verb, checker=None):
+ permission = ReadRepositoryPermission(namespace, repository)
+ if not permission.can() and not model.repository_is_public(namespace, repository):
abort(403)
- if repo is not None and repo.kind.name != 'image':
- logger.debug('Repository %s/%s for user %s is not an image repo', namespace, repo_name,
- get_authenticated_user())
- abort(405)
-
- # Make sure the repo's namespace isn't disabled.
- if not registry_model.is_namespace_enabled(namespace):
- abort(400)
-
# Lookup the requested tag.
- repo_ref = registry_model.lookup_repository(namespace, repo_name)
- if repo_ref is None:
- abort(404)
-
- tag = registry_model.get_repo_tag(repo_ref, tag_name)
- if tag is None:
- logger.debug('Tag %s does not exist in repository %s/%s for user %s', tag, namespace, repo_name,
- get_authenticated_user())
- abort(404)
-
- # Get its associated manifest.
- manifest = registry_model.get_manifest_for_tag(tag, backfill_if_necessary=True)
- if manifest is None:
- logger.debug('Could not get manifest on %s/%s:%s::%s', namespace, repo_name, tag.name, verb)
- abort(404)
-
- # Retrieve the schema1-compatible version of the manifest.
- try:
- schema1_manifest = registry_model.get_schema1_parsed_manifest(manifest, namespace,
- repo_name, tag.name,
- storage)
- except ManifestException:
- logger.exception('Could not get manifest on %s/%s:%s::%s', namespace, repo_name, tag.name, verb)
- abort(400)
-
- if schema1_manifest is None:
+ tag_image = model.get_tag_image(namespace, repository, tag)
+ if tag_image is None:
abort(404)
# If there is a data checker, call it first.
if checker is not None:
- if not checker(tag, schema1_manifest):
- logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repo_name, tag.name, verb)
+ if not checker(tag_image):
+ logger.debug('Check mismatch on %s/%s:%s, verb %s', namespace, repository, tag, verb)
abort(404)
- # Preload the tag's repository information, so it gets cached.
- assert tag.repository.namespace_name
- assert tag.repository.name
-
- return tag, manifest, schema1_manifest
+ return tag_image
-def _repo_verb_signature(namespace, repository, tag_name, verb, checker=None, **kwargs):
- # Verify that the tag exists and that we have access to it.
- tag, manifest, _ = _verify_repo_verb(storage, namespace, repository, tag_name, verb, checker)
-
- # Find the derived image storage for the verb.
- derived_image = registry_model.lookup_derived_image(manifest, verb, storage,
- varying_metadata={'tag': tag.name})
+def _repo_verb_signature(namespace, repository, tag, verb, checker=None, **kwargs):
+ # Verify that the image exists and that we have access to it.
+ repo_image = _verify_repo_verb(storage, namespace, repository, tag, verb, checker)
+ # derived_image the derived image storage for the verb.
+ derived_image = model.lookup_derived_image(repo_image, verb, varying_metadata={'tag': tag})
if derived_image is None or derived_image.blob.uploading:
return make_response('', 202)
@@ -268,7 +196,7 @@ def _repo_verb_signature(namespace, repository, tag_name, verb, checker=None, **
abort(404)
# Lookup the signature for the verb.
- signature_value = registry_model.get_derived_image_signature(derived_image, signer.name)
+ signature_value = model.get_derived_image_signature(derived_image, signer.name)
if signature_value is None:
abort(404)
@@ -276,74 +204,44 @@ def _repo_verb_signature(namespace, repository, tag_name, verb, checker=None, **
return make_response(signature_value)
-@check_region_blacklisted()
-def _repo_verb(namespace, repository, tag_name, verb, formatter, sign=False, checker=None,
- **kwargs):
+def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker=None, **kwargs):
# Verify that the image exists and that we have access to it.
- logger.debug('Verifying repo verb %s for repository %s/%s with user %s with mimetype %s',
- verb, namespace, repository, get_authenticated_user(), request.accept_mimetypes.best)
- tag, manifest, schema1_manifest = _verify_repo_verb(storage, namespace, repository,
- tag_name, verb, checker)
-
- # Load the repository for later.
- repo = model.repository.get_repository(namespace, repository)
- if repo is None:
- abort(404)
+ repo_image = _verify_repo_verb(storage, namespace, repository, tag, verb, checker)
# Check for torrent. If found, we return a torrent for the repo verb image (if the derived
# image already exists).
if request.accept_mimetypes.best == 'application/x-bittorrent':
metric_queue.repository_pull.Inc(labelvalues=[namespace, repository, verb + '+torrent', True])
- return _torrent_repo_verb(repo, tag, manifest, verb, **kwargs)
+ return _torrent_repo_verb(repo_image, tag, verb, **kwargs)
# Log the action.
- track_and_log('repo_verb', wrap_repository(repo), tag=tag.name, verb=verb, **kwargs)
+ track_and_log('repo_verb', repo_image.repository, tag=tag, verb=verb, **kwargs)
metric_queue.repository_pull.Inc(labelvalues=[namespace, repository, verb, True])
- is_readonly = app.config.get('REGISTRY_STATE', 'normal') == 'readonly'
-
# Lookup/create the derived image for the verb and repo image.
- if is_readonly:
- derived_image = registry_model.lookup_derived_image(
- manifest, verb, storage,
- varying_metadata={'tag': tag.name},
- include_placements=True)
- else:
- derived_image = registry_model.lookup_or_create_derived_image(
- manifest, verb, storage.preferred_locations[0], storage,
- varying_metadata={'tag': tag.name},
- include_placements=True)
- if derived_image is None:
- logger.error('Could not create or lookup a derived image for manifest %s', manifest)
- abort(400)
-
- if derived_image is not None and not derived_image.blob.uploading:
- logger.debug('Derived %s image %s exists in storage', verb, derived_image)
+ derived_image = model.lookup_or_create_derived_image(repo_image, verb,
+ storage.preferred_locations[0],
+ varying_metadata={'tag': tag})
+ if not derived_image.blob.uploading:
+ logger.debug('Derived %s image %s exists in storage', verb, derived_image.ref)
+ derived_layer_path = model.get_blob_path(derived_image.blob)
is_head_request = request.method == 'HEAD'
-
- metric_queue.pull_byte_count.Inc(derived_image.blob.compressed_size, labelvalues=[verb])
-
- download_url = storage.get_direct_download_url(derived_image.blob.placements,
- derived_image.blob.storage_path,
+ download_url = storage.get_direct_download_url(derived_image.blob.locations, derived_layer_path,
head=is_head_request)
if download_url:
- logger.debug('Redirecting to download URL for derived %s image %s', verb, derived_image)
+ logger.debug('Redirecting to download URL for derived %s image %s', verb, derived_image.ref)
return redirect(download_url)
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
- logger.debug('Sending cached derived %s image %s', verb, derived_image)
- return send_file(
- storage.stream_read_file(derived_image.blob.placements, derived_image.blob.storage_path),
- mimetype=LAYER_MIMETYPE)
+ logger.debug('Sending cached derived %s image %s', verb, derived_image.ref)
+ return send_file(storage.stream_read_file(derived_image.blob.locations, derived_layer_path))
- logger.debug('Building and returning derived %s image', verb)
+ logger.debug('Building and returning derived %s image %s', verb, derived_image.ref)
- # Close the database connection before any process forking occurs. This is important because
- # the Postgres driver does not react kindly to forking, so we need to make sure it is closed
- # so that each process will get its own unique connection.
- database.close_db_filter(None)
+ # Calculate a derived image ID.
+ derived_image_id = hashlib.sha256(repo_image.image_id + ':' + verb).hexdigest()
def _cleanup():
# Close any existing DB connection once the process has exited.
@@ -352,68 +250,48 @@ def _repo_verb(namespace, repository, tag_name, verb, formatter, sign=False, che
hasher = PieceHasher(app.config['BITTORRENT_PIECE_SIZE'])
def _store_metadata_and_cleanup():
- if is_readonly:
- return
-
with database.UseThenDisconnect(app.config):
- registry_model.set_torrent_info(derived_image.blob, app.config['BITTORRENT_PIECE_SIZE'],
- hasher.final_piece_hashes())
- registry_model.set_derived_image_size(derived_image, hasher.hashed_bytes)
+ model.set_torrent_info(derived_image.blob, app.config['BITTORRENT_PIECE_SIZE'],
+ hasher.final_piece_hashes())
+ model.set_blob_size(derived_image.blob, hasher.hashed_bytes)
# Create a queue process to generate the data. The queue files will read from the process
# and send the results to the client and storage.
- unique_id = (derived_image.unique_id
- if derived_image is not None
- else hashlib.sha256('%s:%s' % (verb, uuid.uuid4())).hexdigest())
handlers = [hasher.update]
- reporter = VerbReporter(verb)
- args = (formatter, tag, schema1_manifest, unique_id, handlers, reporter)
- queue_process = QueueProcess(
- _open_stream,
- 8 * 1024,
- 10 * 1024 * 1024, # 8K/10M chunk/max
- args,
- finished=_store_metadata_and_cleanup)
+ args = (formatter, repo_image, tag, derived_image_id, handlers)
+ queue_process = QueueProcess(_open_stream,
+ 8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max
+ args, finished=_store_metadata_and_cleanup)
client_queue_file = QueueFile(queue_process.create_queue(), 'client')
+ storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
- if not is_readonly:
- storage_queue_file = QueueFile(queue_process.create_queue(), 'storage')
-
- # If signing is required, add a QueueFile for signing the image as we stream it out.
- signing_queue_file = None
- if sign and signer.name:
- signing_queue_file = QueueFile(queue_process.create_queue(), 'signing')
+ # If signing is required, add a QueueFile for signing the image as we stream it out.
+ signing_queue_file = None
+ if sign and signer.name:
+ signing_queue_file = QueueFile(queue_process.create_queue(), 'signing')
# Start building.
queue_process.run()
# Start the storage saving.
- if not is_readonly:
- storage_args = (verb, derived_image, storage_queue_file)
- QueueProcess.run_process(_write_derived_image_to_storage, storage_args, finished=_cleanup)
+ storage_args = (verb, derived_image, storage_queue_file)
+ QueueProcess.run_process(_write_derived_image_to_storage, storage_args, finished=_cleanup)
- if sign and signer.name:
- signing_args = (verb, derived_image, signing_queue_file)
- QueueProcess.run_process(_sign_derived_image, signing_args, finished=_cleanup)
+ if sign and signer.name:
+ signing_args = (verb, derived_image, signing_queue_file)
+ QueueProcess.run_process(_sign_derived_image, signing_args, finished=_cleanup)
# Close the database handle here for this process before we send the long download.
database.close_db_filter(None)
# Return the client's data.
- return send_file(client_queue_file, mimetype=LAYER_MIMETYPE)
+ return send_file(client_queue_file)
def os_arch_checker(os, arch):
- def checker(tag, manifest):
- try:
- image_json = json.loads(manifest.leaf_layer.raw_v1_metadata)
- except ValueError:
- logger.exception('Could not parse leaf layer JSON for manifest %s', manifest)
- return False
- except TypeError:
- logger.exception('Could not parse leaf layer JSON for manifest %s', manifest)
- return False
+ def checker(repo_image):
+ image_json = repo_image.compat_metadata
# Verify the architecture and os.
operating_system = image_json.get('os', 'linux')
@@ -447,13 +325,11 @@ def get_aci_signature(server, namespace, repository, tag, os, arch):
@route_show_if(features.ACI_CONVERSION)
@anon_protect
-@verbs.route('/aci/////aci///', methods=[
- 'GET', 'HEAD'])
+@verbs.route('/aci/////aci///', methods=['GET', 'HEAD'])
@process_auth
def get_aci_image(server, namespace, repository, tag, os, arch):
- return _repo_verb(namespace, repository, tag, 'aci',
- AppCImageFormatter(), sign=True, checker=os_arch_checker(os, arch), os=os,
- arch=arch)
+ return _repo_verb(namespace, repository, tag, 'aci', AppCImageFormatter(),
+ sign=True, checker=os_arch_checker(os, arch), os=os, arch=arch)
@anon_protect
@@ -468,36 +344,20 @@ def get_squashed_tag(namespace, repository, tag):
@verbs.route('/torrent{0}'.format(BLOB_DIGEST_ROUTE), methods=['GET'])
@process_auth
@parse_repository_name()
-@check_region_blacklisted(namespace_name_kwarg='namespace_name')
def get_tag_torrent(namespace_name, repo_name, digest):
- repo = model.repository.get_repository(namespace_name, repo_name)
- repo_is_public = repo is not None and model.repository.is_repository_public(repo)
-
permission = ReadRepositoryPermission(namespace_name, repo_name)
- if not permission.can() and not repo_is_public:
+ public_repo = model.repository_is_public(namespace_name, repo_name)
+ if not permission.can() and not public_repo:
abort(403)
user = get_authenticated_user()
- if user is None and not repo_is_public:
+ if user is None and not public_repo:
# We can not generate a private torrent cluster without a user uuid (e.g. token auth)
abort(403)
- if repo is not None and repo.kind.name != 'image':
- abort(405)
-
- repo_ref = registry_model.lookup_repository(namespace_name, repo_name)
- if repo_ref is None:
- abort(404)
-
- blob = registry_model.get_repo_blob_by_digest(repo_ref, digest, include_placements=True)
+ blob = model.get_repo_blob_by_digest(namespace_name, repo_name, digest)
if blob is None:
abort(404)
metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'torrent', True])
- return _torrent_for_blob(blob, repo_is_public)
-
-
-@verbs.route('/_internal_ping')
-@anon_allowed
-def internal_ping():
- return make_response('true', 200)
+ return _torrent_for_blob(blob, public_repo)
diff --git a/endpoints/verbs/test/test_security.py b/endpoints/verbs/test/test_security.py
deleted file mode 100644
index eeb79c567..000000000
--- a/endpoints/verbs/test/test_security.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import pytest
-
-from flask import url_for
-from endpoints.test.shared import conduct_call, gen_basic_auth
-from test.fixtures import *
-
-NO_ACCESS_USER = 'freshuser'
-READ_ACCESS_USER = 'reader'
-ADMIN_ACCESS_USER = 'devtable'
-CREATOR_ACCESS_USER = 'creator'
-
-PUBLIC_REPO = 'public/publicrepo'
-PRIVATE_REPO = 'devtable/shared'
-ORG_REPO = 'buynlarge/orgrepo'
-ANOTHER_ORG_REPO = 'buynlarge/anotherorgrepo'
-
-ACI_ARGS = {
- 'server': 'someserver',
- 'tag': 'fake',
- 'os': 'linux',
- 'arch': 'x64',}
-
-
-@pytest.mark.parametrize('user', [
- (0, None),
- (1, NO_ACCESS_USER),
- (2, READ_ACCESS_USER),
- (3, CREATOR_ACCESS_USER),
- (4, ADMIN_ACCESS_USER),])
-@pytest.mark.parametrize(
- 'endpoint,method,repository,single_repo_path,params,expected_statuses',
- [
- ('get_aci_signature', 'GET', PUBLIC_REPO, False, ACI_ARGS, (404, 404, 404, 404, 404)),
- ('get_aci_signature', 'GET', PRIVATE_REPO, False, ACI_ARGS, (403, 403, 404, 403, 404)),
- ('get_aci_signature', 'GET', ORG_REPO, False, ACI_ARGS, (403, 403, 404, 403, 404)),
- ('get_aci_signature', 'GET', ANOTHER_ORG_REPO, False, ACI_ARGS, (403, 403, 403, 403, 404)),
-
- # get_aci_image
- ('get_aci_image', 'GET', PUBLIC_REPO, False, ACI_ARGS, (404, 404, 404, 404, 404)),
- ('get_aci_image', 'GET', PRIVATE_REPO, False, ACI_ARGS, (403, 403, 404, 403, 404)),
- ('get_aci_image', 'GET', ORG_REPO, False, ACI_ARGS, (403, 403, 404, 403, 404)),
- ('get_aci_image', 'GET', ANOTHER_ORG_REPO, False, ACI_ARGS, (403, 403, 403, 403, 404)),
-
- # get_squashed_tag
- ('get_squashed_tag', 'GET', PUBLIC_REPO, False, dict(tag='fake'), (404, 404, 404, 404, 404)),
- ('get_squashed_tag', 'GET', PRIVATE_REPO, False, dict(tag='fake'), (403, 403, 404, 403, 404)),
- ('get_squashed_tag', 'GET', ORG_REPO, False, dict(tag='fake'), (403, 403, 404, 403, 404)),
- ('get_squashed_tag', 'GET', ANOTHER_ORG_REPO, False, dict(tag='fake'), (403, 403, 403, 403,
- 404)),
-
- # get_tag_torrent
- ('get_tag_torrent', 'GET', PUBLIC_REPO, True, dict(digest='sha256:1234'), (404, 404, 404, 404,
- 404)),
- ('get_tag_torrent', 'GET', PRIVATE_REPO, True, dict(digest='sha256:1234'), (403, 403, 404, 403,
- 404)),
- ('get_tag_torrent', 'GET', ORG_REPO, True, dict(digest='sha256:1234'), (403, 403, 404, 403,
- 404)),
- ('get_tag_torrent', 'GET', ANOTHER_ORG_REPO, True, dict(digest='sha256:1234'), (403, 403, 403,
- 403, 404)),])
-def test_verbs_security(user, endpoint, method, repository, single_repo_path, params,
- expected_statuses, app, client):
- headers = {}
- if user[1] is not None:
- headers['Authorization'] = gen_basic_auth(user[1], 'password')
-
- if single_repo_path:
- params['repository'] = repository
- else:
- (namespace, repo_name) = repository.split('/')
- params['namespace'] = namespace
- params['repository'] = repo_name
-
- conduct_call(client, 'verbs.' + endpoint, url_for, method, params,
- expected_code=expected_statuses[user[0]], headers=headers)
diff --git a/endpoints/web.py b/endpoints/web.py
index df1f775b9..f3a6f7ce7 100644
--- a/endpoints/web.py
+++ b/endpoints/web.py
@@ -1,48 +1,40 @@
-import os
import json
import logging
from datetime import timedelta, datetime
-from cachetools.func import lru_cache
+from cachetools import lru_cache
from flask import (abort, redirect, request, url_for, make_response, Response, render_template,
- Blueprint, jsonify, send_file, session)
+ Blueprint, jsonify, send_file)
from flask_login import current_user
import features
from app import (app, billing as stripe, build_logs, avatar, signer, log_archive, config_provider,
- get_app_url, instance_keys, user_analytics, storage)
+ get_app_url, instance_keys, user_analytics)
from auth import scopes
from auth.auth_context import get_authenticated_user
-from auth.basic import has_basic_auth
-from auth.decorators import require_session_login, process_oauth, process_auth_or_cookie
from auth.permissions import (AdministerOrganizationPermission, ReadRepositoryPermission,
SuperUserPermission, AdministerRepositoryPermission,
ModifyRepositoryPermission, OrganizationMemberPermission)
+from auth.process import require_session_login, process_oauth, has_basic_auth, process_auth_or_cookie
from buildtrigger.basehandler import BuildTriggerHandler
from buildtrigger.bitbuckethandler import BitbucketBuildTrigger
from buildtrigger.customhandler import CustomBuildTrigger
from buildtrigger.triggerutil import TriggerProviderException
from data import model
-from data.database import db, RepositoryTag, TagToRepositoryTag
+from data.database import db
from endpoints.api.discovery import swagger_route_data
-from endpoints.common import common_login, render_page_template
+from endpoints.common import (common_login, render_page_template, route_show_if, param_required,
+ parse_repository_name)
from endpoints.csrf import csrf_protect, generate_csrf_token, verify_csrf
-from endpoints.decorators import (anon_protect, anon_allowed, route_show_if, parse_repository_name,
- param_required)
+from endpoints.decorators import anon_protect, anon_allowed
from health.healthcheck import get_healthchecker
from util.cache import no_cache
from util.headers import parse_basic_auth
from util.invoice import renderInvoiceToPdf
-from util.saas.useranalytics import build_error_callback
+from util.systemlogs import build_logs_archive
from util.useremails import send_email_changed
-from util.registry.gzipinputstream import GzipInputStream
-from util.request import get_request_ip
-from _init import ROOT_DIR
-
-
-PGP_KEY_MIMETYPE = 'application/pgp-keys'
@lru_cache(maxsize=1)
@@ -67,10 +59,6 @@ STATUS_TAGS = app.config['STATUS_TAGS']
def index(path, **kwargs):
return render_page_template_with_routedata('index.html', **kwargs)
-@web.route('/_internal_ping')
-@anon_allowed
-def internal_ping():
- return make_response('true', 200)
@web.route('/500', methods=['GET'])
def internal_error_display():
@@ -83,25 +71,13 @@ def not_found_error_display(e = None):
resp.status_code = 404
return resp
-@web.route('/opensearch.xml')
-def opensearch():
- template = render_template('opensearch.xml',
- baseurl=get_app_url(),
- registry_title=app.config.get('REGISTRY_TITLE', 'Quay'))
- resp = make_response(template)
- resp.headers['Content-Type'] = 'application/xml'
- return resp
-
-
@web.route('/organization/', methods=['GET'])
-@web.route('/organization//', methods=['GET'])
@no_cache
def org_view(path):
return index('')
@web.route('/user/', methods=['GET'])
-@web.route('/user//', methods=['GET'])
@no_cache
def user_view(path):
return index('')
@@ -115,8 +91,7 @@ def aci_signing_key():
if not signer.name:
abort(404)
- return send_file(signer.open_public_key_file(), mimetype=PGP_KEY_MIMETYPE)
-
+ return send_file(signer.open_public_key_file())
@web.route('/plans/')
@no_cache
@@ -125,12 +100,6 @@ def plans():
return index('')
-@web.route('/search')
-@no_cache
-def search():
- return index('')
-
-
@web.route('/guide/')
@no_cache
def guide():
@@ -171,27 +140,6 @@ def setup():
return index('')
-@web.route('/upgradeprogress/')
-@no_cache
-@route_show_if(not features.BILLING)
-@route_show_if(app.config.get('V3_UPGRADE_MODE') == 'background')
-def upgrade_progress():
- total_tags = RepositoryTag.select().where(RepositoryTag.hidden == False).count()
- if total_tags == 0:
- return jsonify({
- 'progress': 1.0,
- 'tags_remaining': 0,
- 'total_tags': 0,
- })
-
- upgraded_tags = TagToRepositoryTag.select().count()
- return jsonify({
- 'progress': float(upgraded_tags) / total_tags,
- 'tags_remaining': total_tags - upgraded_tags,
- 'total_tags': total_tags,
- })
-
-
@web.route('/signin/')
@no_cache
def signin(redirect=None):
@@ -235,18 +183,9 @@ def confirm_invite():
def repository(path):
return index('')
-
-@web.route('/repository//trigger/', methods=['GET'])
+@web.route('/starred/')
@no_cache
-def buildtrigger(path, trigger):
- return index('')
-
-
-@route_show_if(features.APP_REGISTRY)
-@web.route('/application/', defaults={'path': ''})
-@web.route('/application/', methods=['GET'])
-@no_cache
-def application(path):
+def starred():
return index('')
@@ -260,7 +199,7 @@ def security():
@no_cache
@route_show_if(features.BILLING)
def enterprise():
- return redirect('/plans?tab=enterprise')
+ return index('')
@web.route('/__exp/')
@@ -288,9 +227,9 @@ def privacy():
return index('')
+# TODO(jschorr): Remove this mirrored endpoint once we migrate ELB.
@web.route('/health', methods=['GET'])
@web.route('/health/instance', methods=['GET'])
-@process_auth_or_cookie
@no_cache
def instance_health():
checker = get_healthchecker(app, config_provider, instance_keys)
@@ -300,9 +239,9 @@ def instance_health():
return response
+# TODO(jschorr): Remove this mirrored endpoint once we migrate pingdom.
@web.route('/status', methods=['GET'])
@web.route('/health/endtoend', methods=['GET'])
-@process_auth_or_cookie
@no_cache
def endtoend_health():
checker = get_healthchecker(app, config_provider, instance_keys)
@@ -312,20 +251,8 @@ def endtoend_health():
return response
-@web.route('/health/warning', methods=['GET'])
-@process_auth_or_cookie
-@no_cache
-def warning_health():
- checker = get_healthchecker(app, config_provider, instance_keys)
- (data, status_code) = checker.check_warning()
- response = jsonify(dict(data=data, status_code=status_code))
- response.status_code = status_code
- return response
-
-
@web.route('/health/dbrevision', methods=['GET'])
@route_show_if(features.BILLING) # Since this is only used in production.
-@process_auth_or_cookie
@no_cache
def dbrevision_health():
# Find the revision from the database.
@@ -333,7 +260,7 @@ def dbrevision_health():
db_revision = result[0]
# Find the local revision from the file system.
- with open(os.path.join(ROOT_DIR, 'ALEMBIC_HEAD'), 'r') as f:
+ with open('ALEMBIC_HEAD', 'r') as f:
local_revision = f.readline().split(' ')[0]
data = {
@@ -348,23 +275,6 @@ def dbrevision_health():
return response
-@web.route('/health/enabledebug/', methods=['GET'])
-@no_cache
-def enable_health_debug(secret):
- if not secret:
- abort(404)
-
- if not app.config.get('ENABLE_HEALTH_DEBUG_SECRET'):
- abort(404)
-
- if app.config.get('ENABLE_HEALTH_DEBUG_SECRET') != secret:
- abort(404)
-
- session['health_debug'] = True
- return make_response('Health check debug information enabled')
-
-
-
@web.route('/robots.txt', methods=['GET'])
def robots():
robots_txt = make_response(render_template('robots.txt', baseurl=get_app_url()))
@@ -372,27 +282,30 @@ def robots():
return robots_txt
+@web.route('/sitemap.xml', methods=['GET'])
+def sitemap():
+ popular_repo_tuples = model.repository.list_popular_public_repos(50, timedelta(weeks=1))
+ xml = make_response(render_template('sitemap.xml', public_repos=popular_repo_tuples,
+ baseurl=get_app_url()))
+ xml.headers['Content-Type'] = 'application/xml'
+ return xml
+
+
@web.route('/buildlogs/', methods=['GET'])
@route_show_if(features.BUILD_SUPPORT)
-@process_auth_or_cookie
+@require_session_login
def buildlogs(build_uuid):
found_build = model.build.get_repository_build(build_uuid)
if not found_build:
abort(403)
repo = found_build.repository
- has_permission = ModifyRepositoryPermission(repo.namespace_user.username, repo.name).can()
- if features.READER_BUILD_LOGS and not has_permission:
- if (ReadRepositoryPermission(repo.namespace_user.username, repo.name).can() or
- model.repository.repository_is_public(repo.namespace_user.username, repo.name)):
- has_permission = True
-
- if not has_permission:
+ if not ModifyRepositoryPermission(repo.namespace_user.username, repo.name).can():
abort(403)
# If the logs have been archived, just return a URL of the completed archive
if found_build.logs_archived:
- return redirect(log_archive.get_file_url(found_build.uuid, get_request_ip()))
+ return redirect(log_archive.get_file_url(found_build.uuid))
_, logs = build_logs.get_log_entries(found_build.uuid, 0)
response = jsonify({
@@ -403,63 +316,6 @@ def buildlogs(build_uuid):
return response
-@web.route('/exportedlogs/', methods=['GET'])
-def exportedlogs(file_id):
- # Only enable this endpoint if local storage is available.
- has_local_storage = False
- for storage_type, _ in app.config.get('DISTRIBUTED_STORAGE_CONFIG', {}).values():
- if storage_type == 'LocalStorage':
- has_local_storage = True
- break
-
- if not has_local_storage:
- abort(404)
-
- JSON_MIMETYPE = 'application/json'
- exported_logs_storage_path = app.config.get('EXPORT_ACTION_LOGS_STORAGE_PATH',
- 'exportedactionlogs')
- export_storage_path = os.path.join(exported_logs_storage_path, file_id)
- if not storage.exists(storage.preferred_locations, export_storage_path):
- abort(404)
-
- try:
- return send_file(storage.stream_read_file(storage.preferred_locations, export_storage_path),
- mimetype=JSON_MIMETYPE)
- except IOError:
- logger.exception('Could not read exported logs')
- abort(403)
-
-
-@web.route('/logarchive/', methods=['GET'])
-@route_show_if(features.BUILD_SUPPORT)
-@process_auth_or_cookie
-def logarchive(file_id):
- JSON_MIMETYPE = 'application/json'
- try:
- found_build = model.build.get_repository_build(file_id)
- except model.InvalidRepositoryBuildException as ex:
- logger.exception(ex, extra={'build_uuid': file_id})
- abort(403)
-
- repo = found_build.repository
- has_permission = ModifyRepositoryPermission(repo.namespace_user.username, repo.name).can()
- if features.READER_BUILD_LOGS and not has_permission:
- if (ReadRepositoryPermission(repo.namespace_user.username, repo.name).can() or
- model.repository.repository_is_public(repo.namespace_user.username, repo.name)):
- has_permission = True
-
- if not has_permission:
- abort(403)
-
- try:
- path = log_archive.get_file_id_path(file_id)
- data_stream = log_archive._storage.stream_read_file(log_archive._locations, path)
- return send_file(GzipInputStream(data_stream), mimetype=JSON_MIMETYPE)
- except IOError:
- logger.exception('Could not read archived logs')
- abort(403)
-
-
@web.route('/receipt', methods=['GET'])
@route_show_if(features.BILLING)
@require_session_login
@@ -532,14 +388,9 @@ def confirm_email():
if new_email:
send_email_changed(user.username, old_email, new_email)
- change_email_future = user_analytics.change_email(old_email, new_email)
- change_email_future.add_done_callback(build_error_callback('Change email failed'))
-
- success, _ = common_login(user.uuid)
- if not success:
- return index('', error_info=dict(reason='confirmerror',
- error_message='Could not perform login'))
+ user_analytics.change_email(old_email, new_email)
+ common_login(user)
if model.user.has_user_prompts(user):
return redirect(url_for('web.updateuser'))
elif new_email:
@@ -556,11 +407,7 @@ def confirm_recovery():
user = model.user.validate_reset_code(code)
if user is not None:
- success, _ = common_login(user.uuid)
- if not success:
- message = 'Could not perform login.'
- return render_page_template_with_routedata('message.html', message=message)
-
+ common_login(user)
return redirect(url_for('web.user_view', path=user.username, tab='settings', action='password'))
else:
message = 'Invalid recovery code: This code is invalid or may have already been used.'
@@ -572,17 +419,17 @@ def confirm_recovery():
@anon_protect
def build_status_badge(namespace_name, repo_name):
token = request.args.get('token', None)
- repo = model.repository.get_repository(namespace_name, repo_name)
- if repo and repo.kind.name != 'image':
- abort(404)
-
is_public = model.repository.repository_is_public(namespace_name, repo_name)
if not is_public:
+ repo = model.repository.get_repository(namespace_name, repo_name)
if not repo or token != repo.badge_token:
abort(404)
- is_empty = model.repository.is_empty(namespace_name, repo_name)
+ # Lookup the tags for the repository.
+ tags = model.tag.list_repository_tags(namespace_name, repo_name)
+ is_empty = len(list(tags)) == 0
recent_build = model.build.get_recent_repository_build(namespace_name, repo_name)
+
if not is_empty and (not recent_build or recent_build.phase == 'complete'):
status_name = 'ready'
elif recent_build and recent_build.phase == 'error':
@@ -745,6 +592,24 @@ def exchange_code_for_token():
return provider.get_token(grant_type, client_id, client_secret, redirect_uri, code, scope=scope)
+@web.route('/systemlogsarchive', methods=['GET'])
+@process_oauth
+@route_show_if(features.SUPER_USERS)
+@no_cache
+def download_logs_archive():
+ # Note: We cannot use the decorator here because this is a GET method. That being said, this
+ # information is sensitive enough that we want the extra protection.
+ verify_csrf()
+
+ if SuperUserPermission().can():
+ archive_data = build_logs_archive(app)
+ return Response(archive_data,
+ mimetype="application/octet-stream",
+ headers={"Content-Disposition": "attachment;filename=erlogs.tar.gz"})
+
+ abort(403)
+
+
@web.route('/bitbucket/setup/', methods=['GET'])
@require_session_login
@parse_repository_name()
@@ -756,8 +621,6 @@ def attach_bitbucket_trigger(namespace_name, repo_name):
if not repo:
msg = 'Invalid repository: %s/%s' % (namespace_name, repo_name)
abort(404, message=msg)
- elif repo.kind.name != 'image':
- abort(501)
trigger = model.build.create_build_trigger(repo, BitbucketBuildTrigger.service_name(), None,
current_user.db_user())
@@ -791,14 +654,14 @@ def attach_custom_build_trigger(namespace_name, repo_name):
if not repo:
msg = 'Invalid repository: %s/%s' % (namespace_name, repo_name)
abort(404, message=msg)
- elif repo.kind.name != 'image':
- abort(501)
trigger = model.build.create_build_trigger(repo, CustomBuildTrigger.service_name(),
None, current_user.db_user())
repo_path = '%s/%s' % (namespace_name, repo_name)
- full_url = url_for('web.buildtrigger', path=repo_path, trigger=trigger.uuid)
+ full_url = '%s%s%s' % (url_for('web.repository', path=repo_path), '?tab=builds&newtrigger=',
+ trigger.uuid)
+
logger.debug('Redirecting to full url: %s', full_url)
return redirect(full_url)
@@ -806,7 +669,6 @@ def attach_custom_build_trigger(namespace_name, repo_name):
@web.route('/')
-@web.route('//')
@no_cache
@process_oauth
@parse_repository_name(include_tag=True)
@@ -820,14 +682,11 @@ def redirect_to_repository(namespace_name, repo_name, tag_name):
# Redirect to the repository page if the user can see the repository.
is_public = model.repository.repository_is_public(namespace_name, repo_name)
permission = ReadRepositoryPermission(namespace_name, repo_name)
- repo = model.repository.get_repository(namespace_name, repo_name)
+ repo_exists = bool(model.repository.get_repository(namespace_name, repo_name))
- if repo and (permission.can() or is_public):
+ if repo_exists and (permission.can() or is_public):
repo_path = '/'.join([namespace_name, repo_name])
- if repo.kind.name == 'application':
- return redirect(url_for('web.application', path=repo_path))
- else:
- return redirect(url_for('web.repository', path=repo_path, tab="tags", tag=tag_name))
+ return redirect(url_for('web.repository', path=repo_path, tab="tags", tag=tag_name))
namespace_exists = bool(model.user.get_user_or_org(namespace_name))
namespace_permission = OrganizationMemberPermission(namespace_name).can()
@@ -848,7 +707,7 @@ def redirect_to_repository(namespace_name, repo_name, tag_name):
'repo_name': repo_name,
}
- if not namespace_exists or (namespace_permission and repo is None):
+ if not namespace_exists or (namespace_permission and not repo_exists):
resp = index('', error_code=404, error_info=json.dumps(error_info))
resp.status_code = 404
return resp
@@ -859,15 +718,10 @@ def redirect_to_repository(namespace_name, repo_name, tag_name):
@web.route('/')
-@web.route('//')
@no_cache
@process_oauth
@anon_protect
def redirect_to_namespace(namespace):
- okay, _ = model.user.validate_username(namespace)
- if not okay:
- abort(404)
-
user_or_org = model.user.get_user_or_org(namespace)
if not user_or_org:
abort(404)
diff --git a/endpoints/webhooks.py b/endpoints/webhooks.py
index 41c28233f..79a3e58d1 100644
--- a/endpoints/webhooks.py
+++ b/endpoints/webhooks.py
@@ -2,19 +2,17 @@ import logging
from flask import request, make_response, Blueprint
-from app import billing as stripe, app
+from app import billing as stripe
from data import model
-from data.database import RepositoryState
-from auth.decorators import process_auth
from auth.permissions import ModifyRepositoryPermission
+from auth.process import process_auth
from util.invoice import renderInvoiceToHtml
from util.useremails import send_invoice_email, send_subscription_change, send_payment_failed
from util.http import abort
from buildtrigger.basehandler import BuildTriggerHandler
from buildtrigger.triggerutil import (ValidationRequestException, SkipRequestException,
InvalidPayloadException)
-from endpoints.building import (start_build, MaximumBuildsQueuedException,
- BuildTriggerDisabledException)
+from endpoints.building import start_build, MaximumBuildsQueuedException
logger = logging.getLogger(__name__)
@@ -28,37 +26,28 @@ def stripe_webhook():
logger.debug('Stripe webhook call: %s', request_data)
customer_id = request_data.get('data', {}).get('object', {}).get('customer', None)
- namespace = model.user.get_user_or_org_by_customer_id(customer_id) if customer_id else None
+ user = model.user.get_user_or_org_by_customer_id(customer_id) if customer_id else None
event_type = request_data['type'] if 'type' in request_data else None
if event_type == 'charge.succeeded':
invoice_id = request_data['data']['object']['invoice']
- namespace = model.user.get_user_or_org_by_customer_id(customer_id) if customer_id else None
- if namespace:
- # Increase the namespace's build allowance, since we had a successful charge.
- build_maximum = app.config.get('BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT')
- if build_maximum is not None:
- model.user.increase_maximum_build_count(namespace, build_maximum)
-
- if namespace.invoice_email:
- # Lookup the invoice.
- invoice = stripe.Invoice.retrieve(invoice_id)
- if invoice:
- invoice_html = renderInvoiceToHtml(invoice, namespace)
- send_invoice_email(namespace.invoice_email_address or namespace.email, invoice_html)
+ if user and user.invoice_email:
+ # Lookup the invoice.
+ invoice = stripe.Invoice.retrieve(invoice_id)
+ if invoice:
+ invoice_html = renderInvoiceToHtml(invoice, user)
+ send_invoice_email(user.invoice_email_address or user.email, invoice_html)
elif event_type.startswith('customer.subscription.'):
- cust_email = namespace.email if namespace is not None else 'unknown@domain.com'
- quay_username = namespace.username if namespace is not None else 'unknown'
+ cust_email = user.email if user is not None else 'unknown@domain.com'
+ quay_username = user.username if user is not None else 'unknown'
change_type = ''
if event_type.endswith('.deleted'):
plan_id = request_data['data']['object']['plan']['id']
- requested = bool(request_data.get('request'))
- if requested:
- change_type = 'canceled %s' % plan_id
- send_subscription_change(change_type, customer_id, cust_email, quay_username)
+ change_type = 'canceled %s' % plan_id
+ send_subscription_change(change_type, customer_id, cust_email, quay_username)
elif event_type.endswith('.created'):
plan_id = request_data['data']['object']['plan']['id']
change_type = 'subscribed %s' % plan_id
@@ -72,8 +61,8 @@ def stripe_webhook():
send_subscription_change(change_type, customer_id, cust_email, quay_username)
elif event_type == 'invoice.payment_failed':
- if namespace:
- send_payment_failed(namespace.email, namespace.username)
+ if user:
+ send_payment_failed(user.email, user.username)
return make_response('Okay')
@@ -91,22 +80,12 @@ def build_trigger_webhook(trigger_uuid, **kwargs):
# doesn't leak anything
abort(404)
- # Ensure we are not currently in read-only mode.
- if app.config.get('REGISTRY_STATE', 'normal') == 'readonly':
- abort(503, 'System is currently in read-only mode')
-
- # Ensure the trigger has permission.
namespace = trigger.repository.namespace_user.username
repository = trigger.repository.name
- if ModifyRepositoryPermission(namespace, repository).can():
+ permission = ModifyRepositoryPermission(namespace, repository)
+ if permission.can():
handler = BuildTriggerHandler.get_handler(trigger)
- if trigger.repository.kind.name != 'image':
- abort(501, 'Build triggers cannot be invoked on application repositories')
-
- if trigger.repository.state != RepositoryState.NORMAL:
- abort(503, 'Repository is currently in read only or mirror mode')
-
logger.debug('Passing webhook request to handler %s', handler)
try:
prepared = handler.handle_trigger_request(request)
@@ -121,7 +100,7 @@ def build_trigger_webhook(trigger_uuid, **kwargs):
except InvalidPayloadException as ipe:
logger.exception('Invalid payload')
# The payload was malformed
- abort(400, message=str(ipe))
+ abort(400, message=ipe.message)
pull_robot_name = model.build.get_pull_robot_name(trigger)
repo = model.repository.get_repository(namespace, repository)
@@ -129,9 +108,6 @@ def build_trigger_webhook(trigger_uuid, **kwargs):
start_build(repo, prepared, pull_robot_name=pull_robot_name)
except MaximumBuildsQueuedException:
abort(429, message='Maximum queued build rate exceeded.')
- except BuildTriggerDisabledException:
- logger.debug('Build trigger %s is disabled', trigger_uuid)
- abort(400, message='This build trigger is currently disabled. Please re-enable to continue.')
return make_response('Okay')
diff --git a/endpoints/wellknown.py b/endpoints/wellknown.py
index bafc2aab4..c1a86d18d 100644
--- a/endpoints/wellknown.py
+++ b/endpoints/wellknown.py
@@ -2,9 +2,7 @@ import json
import logging
from app import get_app_url
-from auth.decorators import require_session_login
-from auth.auth_context import get_authenticated_user
-from flask import Blueprint, make_response, redirect
+from flask import Blueprint, make_response
logger = logging.getLogger(__name__)
wellknown = Blueprint('wellknown', __name__)
@@ -12,12 +10,8 @@ wellknown = Blueprint('wellknown', __name__)
@wellknown.route('/app-capabilities', methods=['GET'])
def app_capabilities():
view_image_tmpl = '%s/{namespace}/{reponame}:{tag}' % get_app_url()
-
- image_security = '%s/api/v1/repository/{namespace}/{reponame}/image/{imageid}/security'
- image_security_tmpl = image_security % get_app_url()
-
- manifest_security = '%s/api/v1/repository/{namespace}/{reponame}/manifest/{digest}/security'
- manifest_security_tmpl = manifest_security % get_app_url()
+ image_security_tmpl = ('%s/api/v1/repository/{namespace}/{reponame}/image/{imageid}/security' %
+ get_app_url())
metadata = {
'appName': 'io.quay',
@@ -29,19 +23,9 @@ def app_capabilities():
'io.quay.image-security': {
'rest-api-template': image_security_tmpl,
},
-
- 'io.quay.manifest-security': {
- 'rest-api-template': manifest_security_tmpl,
- },
},
}
resp = make_response(json.dumps(metadata))
resp.headers['Content-Type'] = 'application/json'
return resp
-
-
-@wellknown.route('/change-password', methods=['GET'])
-@require_session_login
-def change_password():
- return redirect('/user/%s?tab=settings' % get_authenticated_user().username)
diff --git a/events/build_cancelled.html b/events/build_cancelled.html
deleted file mode 100644
index 91c00b519..000000000
--- a/events/build_cancelled.html
+++ /dev/null
@@ -1,2 +0,0 @@
-{% extends "build_event.html" %}
-{% block eventkind %}canceled{% endblock %}
diff --git a/events/repo_mirror_sync_failed.html b/events/repo_mirror_sync_failed.html
deleted file mode 100644
index 87fb64444..000000000
--- a/events/repo_mirror_sync_failed.html
+++ /dev/null
@@ -1,7 +0,0 @@
-{% if notification_data.performer_data.entity_name %}
-{{ notification_data.performer_data.entity_name | user_reference }}
-{% endif %}
-
-{% if event_data.message %}
- {{ event_data.message }}
-{% endif %}
diff --git a/events/repo_mirror_sync_started.html b/events/repo_mirror_sync_started.html
deleted file mode 100644
index 87fb64444..000000000
--- a/events/repo_mirror_sync_started.html
+++ /dev/null
@@ -1,7 +0,0 @@
-{% if notification_data.performer_data.entity_name %}
-{{ notification_data.performer_data.entity_name | user_reference }}
-{% endif %}
-
-{% if event_data.message %}
- {{ event_data.message }}
-{% endif %}
diff --git a/events/repo_mirror_sync_success.html b/events/repo_mirror_sync_success.html
deleted file mode 100644
index 87fb64444..000000000
--- a/events/repo_mirror_sync_success.html
+++ /dev/null
@@ -1,7 +0,0 @@
-{% if notification_data.performer_data.entity_name %}
-{{ notification_data.performer_data.entity_name | user_reference }}
-{% endif %}
-
-{% if event_data.message %}
- {{ event_data.message }}
-{% endif %}
diff --git a/events/vulnerability_found.html b/events/vulnerability_found.html
index 89cfd28a7..fd75df641 100644
--- a/events/vulnerability_found.html
+++ b/events/vulnerability_found.html
@@ -1,8 +1,4 @@
-{% if event_data.vulnerabilities %}
-{{ event_data.vulnerabilities|length }} vulnerabilities were detected in tags
-{% else %}
A {{ event_data.vulnerability.priority }} vulnerability ({{ event_data.vulnerability.id }}) was detected in tags
-{% endif %}
{{ 'tags' | icon_image }}
{% for tag in event_data.tags[0:3] %}{%if loop.index > 1 %}, {% endif %}{{ (event_data.repository, tag) | repository_tag_reference }}{% endfor %} {% if event_data.tags|length > 3 %}(and {{ event_data.tags|length - 3 }} more) {% endif %} in
- repository {{ event_data.repository | repository_reference }}
\ No newline at end of file
+ repository {{ event_data.repository | repository_reference }}
diff --git a/external_libraries.py b/external_libraries.py
index fd5b2a31f..d99431283 100644
--- a/external_libraries.py
+++ b/external_libraries.py
@@ -1,17 +1,8 @@
-import logging
-import logging.config
import urllib2
import re
import os
-import hashlib
-from _init import STATIC_FONTS_DIR, STATIC_WEBFONTS_DIR, STATIC_LDN_DIR
-from util.log import logfile_path
-
-
-LOCAL_PATH = '/static/ldn/'
-
-MAX_RETRY_COUNT = 3
+LOCAL_DIRECTORY = '/static/ldn/'
EXTERNAL_JS = [
'code.jquery.com/jquery.js',
@@ -27,28 +18,23 @@ EXTERNAL_JS = [
'cdn.jsdelivr.net/g/bootbox@4.1.0,underscorejs@1.5.2,restangular@1.2.0,d3js@3.3.3',
'cdn.ravenjs.com/3.1.0/angular/raven.min.js',
'cdn.jsdelivr.net/cal-heatmap/3.3.10/cal-heatmap.min.js',
- 'cdnjs.cloudflare.com/ajax/libs/angular-recaptcha/4.1.3/angular-recaptcha.min.js',
- 'cdnjs.cloudflare.com/ajax/libs/ng-tags-input/3.1.1/ng-tags-input.min.js',
- 'cdnjs.cloudflare.com/ajax/libs/corejs-typeahead/1.1.1/typeahead.bundle.min.js',
+ 'cdnjs.cloudflare.com/ajax/libs/angular-recaptcha/3.2.1/angular-recaptcha.min.js',
]
EXTERNAL_CSS = [
- 'use.fontawesome.com/releases/v5.0.4/css/all.css',
- 'netdna.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.css',
+ 'netdna.bootstrapcdn.com/font-awesome/4.6.0/css/font-awesome.css',
'netdna.bootstrapcdn.com/bootstrap/3.3.2/css/bootstrap.min.css',
'fonts.googleapis.com/css?family=Source+Sans+Pro:300,400,700',
's3.amazonaws.com/cdn.core-os.net/icons/core-icons.css',
'cdnjs.cloudflare.com/ajax/libs/bootstrap-datetimepicker/4.17.37/css/bootstrap-datetimepicker.min.css',
'cdn.jsdelivr.net/cal-heatmap/3.3.10/cal-heatmap.css',
- 'cdnjs.cloudflare.com/ajax/libs/ng-tags-input/3.1.1/ng-tags-input.min.css',
]
EXTERNAL_FONTS = [
- 'netdna.bootstrapcdn.com/font-awesome/4.7.0/fonts/fontawesome-webfont.eot?v=4.7.0',
- 'netdna.bootstrapcdn.com/font-awesome/4.7.0/fonts/fontawesome-webfont.woff?v=4.7.0',
- 'netdna.bootstrapcdn.com/font-awesome/4.7.0/fonts/fontawesome-webfont.woff2?v=4.7.0',
- 'netdna.bootstrapcdn.com/font-awesome/4.7.0/fonts/fontawesome-webfont.ttf?v=4.7.0',
- 'netdna.bootstrapcdn.com/font-awesome/4.7.0/fonts/fontawesome-webfont.svg?v=4.7.0',
+ 'netdna.bootstrapcdn.com/font-awesome/4.6.0/fonts/fontawesome-webfont.eot?v=4.6.0',
+ 'netdna.bootstrapcdn.com/font-awesome/4.6.0/fonts/fontawesome-webfont.woff?v=4.6.0',
+ 'netdna.bootstrapcdn.com/font-awesome/4.6.0/fonts/fontawesome-webfont.ttf?v=4.6.0',
+ 'netdna.bootstrapcdn.com/font-awesome/4.6.0/fonts/fontawesome-webfont.svg?v=4.6.0',
'netdna.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.eot',
'netdna.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.woff2',
@@ -57,20 +43,6 @@ EXTERNAL_FONTS = [
'netdna.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.svg',
]
-EXTERNAL_WEBFONTS = [
- 'use.fontawesome.com/releases/v5.0.4/webfonts/fa-regular-400.ttf',
- 'use.fontawesome.com/releases/v5.0.4/webfonts/fa-regular-400.woff',
- 'use.fontawesome.com/releases/v5.0.4/webfonts/fa-regular-400.woff2',
-
- 'use.fontawesome.com/releases/v5.0.4/webfonts/fa-solid-900.ttf',
- 'use.fontawesome.com/releases/v5.0.4/webfonts/fa-solid-900.woff',
- 'use.fontawesome.com/releases/v5.0.4/webfonts/fa-solid-900.woff2',
-
- 'use.fontawesome.com/releases/v5.0.4/webfonts/fa-brands-400.ttf',
- 'use.fontawesome.com/releases/v5.0.4/webfonts/fa-brands-400.woff',
- 'use.fontawesome.com/releases/v5.0.4/webfonts/fa-brands-400.woff2',
-]
-
EXTERNAL_CSS_FONTS = [
's3.amazonaws.com/cdn.core-os.net/icons/core-icons.eot',
's3.amazonaws.com/cdn.core-os.net/icons/core-icons.woff',
@@ -79,71 +51,55 @@ EXTERNAL_CSS_FONTS = [
]
-logger = logging.getLogger(__name__)
-
-
def get_external_javascript(local=False):
if local:
- return [LOCAL_PATH + format_local_name(src) for src in EXTERNAL_JS]
+ return [LOCAL_DIRECTORY + format_local_name(src) for src in EXTERNAL_JS]
return ['//' + src for src in EXTERNAL_JS]
-def get_external_css(local=False, exclude=None):
- exclude = exclude or []
+def get_external_css(local=False):
if local:
- return [LOCAL_PATH + format_local_name(src) for src in EXTERNAL_CSS if src not in exclude]
+ return [LOCAL_DIRECTORY + format_local_name(src) for src in EXTERNAL_CSS]
- return ['//' + src for src in EXTERNAL_CSS if src not in exclude]
+ return ['//' + src for src in EXTERNAL_CSS]
def format_local_name(url):
filename = url.split('/')[-1]
filename = re.sub(r'[+,?@=:]', '', filename)
-
- url_hash = hashlib.sha256(url).hexdigest()[0:12]
- filename += '-' + url_hash
-
if not filename.endswith('.css') and not filename.endswith('.js'):
- if filename.find('css') >= 0:
- filename = filename + '.css'
- else:
- filename = filename + '.js'
+ if filename.find('css') >= 0:
+ filename = filename + '.css'
+ else:
+ filename = filename + '.js'
return filename
-def _download_url(url):
- for index in range(0, MAX_RETRY_COUNT):
- try:
- response = urllib2.urlopen(url)
- return response.read()
- except urllib2.URLError:
- logger.exception('Got exception when trying to download URL %s (try #%s)', url, index + 1)
-
- raise Exception('Aborted due to maximum retries reached')
-
-
if __name__ == '__main__':
- logging.config.fileConfig(logfile_path(debug=False), disable_existing_loggers=False)
+ for url in EXTERNAL_JS + EXTERNAL_CSS:
+ print 'Downloading %s' % url
+ response = urllib2.urlopen('https://' + url)
+ contents = response.read()
- resources = [
- (STATIC_LDN_DIR, EXTERNAL_JS + EXTERNAL_CSS, True),
- (STATIC_LDN_DIR, EXTERNAL_CSS_FONTS, False),
- (STATIC_FONTS_DIR, EXTERNAL_FONTS, False),
- (STATIC_WEBFONTS_DIR, EXTERNAL_WEBFONTS, False),
- ]
+ filename = format_local_name(url)
+ print 'Writing %s' % filename
+ with open(LOCAL_DIRECTORY + filename, 'w') as f:
+ f.write(contents)
- for local_directory, urls, requires_hashing in resources:
- for url in urls:
- if requires_hashing:
- filename = format_local_name(url)
- else:
- filename = os.path.basename(url).split('?')[0]
+ for url in EXTERNAL_CSS_FONTS:
+ print 'Downloading %s' % url
+ response = urllib2.urlopen('https://' + url)
- path = os.path.join(local_directory, filename)
- print 'Downloading %s to %s' % (url, path)
- contents = _download_url('https://' + url)
+ filename = os.path.basename(url).split('?')[0]
+ with open('static/ldn/' + filename, "wb") as local_file:
+ local_file.write(response.read())
- with open(path, "wb") as local_file:
- local_file.write(contents)
+ for url in EXTERNAL_FONTS:
+ print 'Downloading %s' % url
+ response = urllib2.urlopen('https://' + url)
+
+ filename = os.path.basename(url).split('?')[0]
+ with open('static/fonts/' + filename, "wb") as local_file:
+ local_file.write(response.read())
diff --git a/features/__init__.py b/features/__init__.py
index ca8c2880a..b5822681e 100644
--- a/features/__init__.py
+++ b/features/__init__.py
@@ -27,3 +27,5 @@ class FeatureNameValue(object):
def __nonzero__(self):
return self.value.__nonzero__()
+
+
diff --git a/grunt/Gruntfile.js b/grunt/Gruntfile.js
new file mode 100644
index 000000000..ef7a4a6f9
--- /dev/null
+++ b/grunt/Gruntfile.js
@@ -0,0 +1,95 @@
+module.exports = function(grunt) {
+
+ // Project configuration.
+ grunt.initConfig({
+ pkg: grunt.file.readJSON('package.json'),
+ concat: {
+ options: {
+ process: function(src, filepath) {
+ var unwraps = ['/js/'];
+
+ var shouldWrap = true;
+ for (var i = 0; i < unwraps.length; ++i) {
+ if (filepath.indexOf(unwraps[i]) >= 0) {
+ shouldWrap = false;
+ break;
+ }
+ }
+
+ if (shouldWrap) {
+ return '// Source: ' + filepath + '\n' +
+ '(function() {\n' + src + '\n})();\n';
+ } else {
+ return '// Source: ' + filepath + '\n' + src + '\n\n';
+ }
+ },
+ },
+ build: {
+ src: ['../static/lib/**/*.js', '../static/js/**/*.js', '../static/dist/template-cache.js',
+ '!../static/js/**/*.spec.js'],
+ dest: '../static/dist/<%= pkg.name %>.js'
+ }
+ },
+
+ cssmin: {
+ '../static/dist/<%= pkg.name %>.css': ['../static/lib/**/*.css', '../static/css/**/*.css']
+ },
+
+ uglify: {
+ options: {
+ mangle: false,
+ sourceMap: false
+ },
+ js_min: {
+ files: {
+ '../static/dist/<%= pkg.name %>.min.js': ['../static/dist/<%= pkg.name %>.js']
+ }
+ }
+ },
+
+ ngtemplates: {
+ options: {
+ url: function(path) {
+ return '/' + path.substr(3); // remove the ../
+ },
+ htmlmin: {
+ collapseBooleanAttributes: false,
+ collapseWhitespace: true,
+ removeAttributeQuotes: true,
+ removeComments: true, // Only if you don't use comment directives!
+ removeEmptyAttributes: true,
+ removeRedundantAttributes: true,
+ removeScriptTypeAttributes: true,
+ removeStyleLinkTypeAttributes: true,
+ keepClosingSlash: true // For inline SVG
+ }
+ },
+ quay: {
+ src: ['../static/partials/*.html', '../static/directives/*.html', '../static/directives/*.html'
+ , '../static/directives/config/*.html', '../static/tutorial/*.html'],
+ dest: '../static/dist/template-cache.js'
+ }
+ },
+
+ cachebuster: {
+ build: {
+ options: {
+ format: 'json',
+ basedir: '../static/'
+ },
+ src: [ '../static/dist/template-cache.js', '../static/dist/<%= pkg.name %>.min.js',
+ '../static/dist/<%= pkg.name %>.css' ],
+ dest: '../static/dist/cachebusters.json'
+ }
+ }
+ });
+
+ grunt.loadNpmTasks('grunt-contrib-uglify');
+ grunt.loadNpmTasks('grunt-contrib-concat');
+ grunt.loadNpmTasks('grunt-contrib-cssmin');
+ grunt.loadNpmTasks('grunt-angular-templates');
+ grunt.loadNpmTasks('grunt-cachebuster');
+
+ // Default task(s).
+ grunt.registerTask('default', ['ngtemplates', 'concat', 'cssmin', 'uglify', 'cachebuster']);
+};
diff --git a/grunt/package.json b/grunt/package.json
new file mode 100644
index 000000000..0ea53569b
--- /dev/null
+++ b/grunt/package.json
@@ -0,0 +1,12 @@
+{
+ "name": "quay-frontend",
+ "version": "0.1.0",
+ "devDependencies": {
+ "grunt": "~0.4.4",
+ "grunt-contrib-concat": "~0.4.0",
+ "grunt-contrib-cssmin": "~0.9.0",
+ "grunt-angular-templates": "~0.5.4",
+ "grunt-contrib-uglify": "~0.4.0",
+ "grunt-cachebuster": "~0.1.5"
+ }
+}
diff --git a/health/healthcheck.py b/health/healthcheck.py
index 4210b4e0c..4208a0a62 100644
--- a/health/healthcheck.py
+++ b/health/healthcheck.py
@@ -1,15 +1,9 @@
-import logging
-import socket
-
import boto.rds2
-
-from auth.permissions import SuperUserPermission
-from flask import session
-from health.services import check_all_services, check_warning_services
+import logging
+from health.services import check_all_services
logger = logging.getLogger(__name__)
-
def get_healthchecker(app, config_provider, instance_keys):
""" Returns a HealthCheck instance for the given app. """
return HealthCheck.get_checker(app, config_provider, instance_keys)
@@ -22,20 +16,12 @@ class HealthCheck(object):
self.instance_keys = instance_keys
self.instance_skips = instance_skips or []
- def check_warning(self):
- """
- Conducts a check on the warnings, returning a dict representing the HealthCheck
- output and a number indicating the health check response code.
- """
- service_statuses = check_warning_services(self.app, [])
- return self.get_instance_health(service_statuses)
-
def check_instance(self):
"""
Conducts a check on this specific instance, returning a dict representing the HealthCheck
output and a number indicating the health check response code.
"""
- service_statuses = check_all_services(self.app, self.instance_skips, for_instance=True)
+ service_statuses = check_all_services(self.app, self.instance_skips)
return self.get_instance_health(service_statuses)
def check_endtoend(self):
@@ -43,7 +29,7 @@ class HealthCheck(object):
Conducts a check on all services, returning a dict representing the HealthCheck
output and a number indicating the health check response code.
"""
- service_statuses = check_all_services(self.app, [], for_instance=False)
+ service_statuses = check_all_services(self.app, [])
return self.calculate_overall_health(service_statuses)
def get_instance_health(self, service_statuses):
@@ -59,50 +45,24 @@ class HealthCheck(object):
is_healthy = True
notes = notes or []
- service_statuses_bools = {}
- service_status_expanded = {}
-
for service_name in service_statuses:
- status, message = service_statuses[service_name]
-
- service_statuses_bools[service_name] = status
- service_status_expanded[service_name] = {
- 'status': status,
- }
-
- if not status:
- service_status_expanded[service_name]['failure'] = message
- elif message:
- service_status_expanded[service_name]['message'] = message
-
if skip and service_name in skip:
notes.append('%s skipped in compute health' % service_name)
continue
- is_healthy = is_healthy and status
+ is_healthy = is_healthy and service_statuses[service_name]
data = {
- 'services': service_statuses_bools,
- }
-
- expanded_data = {
- 'services_expanded': service_status_expanded,
+ 'services': service_statuses,
'notes': notes,
'is_testing': self.app.config['TESTING'],
'config_provider': self.config_provider.provider_id,
'local_service_key_id': self.instance_keys.local_key_id,
- 'hostname': socket.gethostname(),
}
- add_debug_information = SuperUserPermission().can() or session.get('health_debug', False)
- if add_debug_information:
- data.update(expanded_data)
-
- if not is_healthy:
- logger.warning('[FAILED HEALTH CHECK] %s', expanded_data)
-
return (data, 200 if is_healthy else 503)
+
@classmethod
def get_checker(cls, app, config_provider, instance_keys):
name = app.config['HEALTH_CHECKER'][0]
@@ -117,8 +77,8 @@ class HealthCheck(object):
class LocalHealthCheck(HealthCheck):
def __init__(self, app, config_provider, instance_keys):
- super(LocalHealthCheck, self).__init__(app, config_provider, instance_keys, [
- 'redis', 'storage'])
+ super(LocalHealthCheck, self).__init__(app, config_provider, instance_keys,
+ ['redis', 'storage'])
@classmethod
def check_names(cls):
@@ -128,10 +88,8 @@ class LocalHealthCheck(HealthCheck):
class RDSAwareHealthCheck(HealthCheck):
def __init__(self, app, config_provider, instance_keys, access_key, secret_key,
db_instance='quay', region='us-east-1'):
- # Note: We skip the redis check because if redis is down, we don't want ELB taking the
- # machines out of service. Redis is not considered a high avaliability-required service.
- super(RDSAwareHealthCheck, self).__init__(app, config_provider, instance_keys, [
- 'redis', 'storage'])
+ super(RDSAwareHealthCheck, self).__init__(app, config_provider, instance_keys,
+ ['redis', 'storage'])
self.access_key = access_key
self.secret_key = secret_key
@@ -143,25 +101,27 @@ class RDSAwareHealthCheck(HealthCheck):
return ['RDSAwareHealthCheck', 'ProductionHealthCheck']
def get_instance_health(self, service_statuses):
+ # Note: We skip the redis check because if redis is down, we don't want ELB taking the
+ # machines out of service. Redis is not considered a high avaliability-required service.
skip = []
notes = []
# If the database is marked as unhealthy, check the status of RDS directly. If RDS is
# reporting as available, then the problem is with this instance. Otherwise, the problem is
# with RDS, and so we skip the DB status so we can keep this machine as 'healthy'.
- if 'database' in service_statuses:
- db_healthy = service_statuses['database']
- if not db_healthy:
- rds_status = self._get_rds_status()
- notes.append('DB reports unhealthy; RDS status: %s' % rds_status)
+ db_healthy = service_statuses['database']
+ if not db_healthy:
+ rds_status = self._get_rds_status()
+ notes.append('DB reports unhealthy; RDS status: %s' % rds_status)
- # If the RDS is in any state but available, then we skip the DB check since it will
- # fail and bring down the instance.
- if rds_status != 'available':
- skip.append('database')
+ # If the RDS is in any state but available, then we skip the DB check since it will
+ # fail and bring down the instance.
+ if rds_status != 'available':
+ skip.append('database')
return self.calculate_overall_health(service_statuses, skip=skip, notes=notes)
+
def _get_rds_status(self):
""" Returns the status of the RDS instance as reported by AWS. """
try:
@@ -170,8 +130,7 @@ class RDSAwareHealthCheck(HealthCheck):
response = region.describe_db_instances()['DescribeDBInstancesResponse']
result = response['DescribeDBInstancesResult']
- instances = [
- i for i in result['DBInstances'] if i['DBInstanceIdentifier'] == self.db_instance]
+ instances = [i for i in result['DBInstances'] if i['DBInstanceIdentifier'] == self.db_instance]
if not instances:
return 'error'
diff --git a/health/models_interface.py b/health/models_interface.py
deleted file mode 100644
index ff49a4dde..000000000
--- a/health/models_interface.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-
-
-@add_metaclass(ABCMeta)
-class HealthCheckDataInterface(object):
- """
- Interface that represents all data store interactions required by health checks.
- """
-
- @abstractmethod
- def check_health(self, app_config):
- """ Returns True if the connection to the database is healthy and False otherwise. """
- pass
diff --git a/health/models_pre_oci.py b/health/models_pre_oci.py
deleted file mode 100644
index 9f50b55eb..000000000
--- a/health/models_pre_oci.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from data.model import health
-from health.models_interface import HealthCheckDataInterface
-
-
-class PreOCIModel(HealthCheckDataInterface):
- def check_health(self, app_config):
- return health.check_health(app_config)
-
-
-pre_oci_model = PreOCIModel()
diff --git a/health/services.py b/health/services.py
index 368c09b2f..66a8b4033 100644
--- a/health/services.py
+++ b/health/services.py
@@ -1,197 +1,65 @@
import logging
-import os
-import tempfile
+from data.model import health
+from app import build_logs, storage
-import psutil
-
-from app import build_logs, storage, authentication, instance_keys
-from health.models_pre_oci import pre_oci_model as model
logger = logging.getLogger(__name__)
-def _compute_internal_endpoint(app, endpoint):
- # Compute the URL for checking the endpoint. We append a port if and only if the
+
+def _check_registry_gunicorn(app):
+ """ Returns the status of the registry gunicorn workers. """
+ # Compute the URL for checking the registry endpoint. We append a port if and only if the
# hostname contains one.
+ client = app.config['HTTPCLIENT']
hostname_parts = app.config['SERVER_HOSTNAME'].split(':')
port = ''
- if hostname_parts[0] == 'localhost':
- if len(hostname_parts) == 2:
- port = ':' + hostname_parts[1]
+ if len(hostname_parts) == 2:
+ port = ':' + hostname_parts[1]
scheme = app.config['PREFERRED_URL_SCHEME']
if app.config.get('EXTERNAL_TLS_TERMINATION', False):
scheme = 'http'
- if port == '':
- if scheme == 'http':
- port = ':8080'
- else:
- port = ':8443'
-
- return '%s://localhost%s/%s' % (scheme, port, endpoint)
-
-
-def _check_gunicorn(endpoint):
- def fn(app):
- """ Returns the status of the gunicorn workers. """
- client = app.config['HTTPCLIENT']
- registry_url = _compute_internal_endpoint(app, endpoint)
- try:
- status_code = client.get(registry_url, verify=False, timeout=2).status_code
- okay = status_code == 200
- message = ('Got non-200 response for worker: %s' % status_code) if not okay else None
- return (okay, message)
- except Exception as ex:
- logger.exception('Exception when checking worker health: %s', registry_url)
- return (False, 'Exception when checking worker health: %s' % registry_url)
-
- return fn
-
-
-def _check_jwt_proxy(app):
- """ Returns the status of JWT proxy in the container. """
- client = app.config['HTTPCLIENT']
- registry_url = _compute_internal_endpoint(app, 'secscan')
+ registry_url = '%s://localhost%s/v1/_internal_ping' % (scheme, port)
try:
- status_code = client.get(registry_url, verify=False, timeout=2).status_code
- okay = status_code == 403
- return (okay, ('Got non-403 response for JWT proxy: %s' % status_code) if not okay else None)
- except Exception as ex:
- logger.exception('Exception when checking jwtproxy health: %s', registry_url)
- return (False, 'Exception when checking jwtproxy health: %s' % registry_url)
+ return client.get(registry_url, verify=False, timeout=2).status_code == 200
+ except Exception:
+ logger.exception('Exception when checking registry health: %s', registry_url)
+ return False
def _check_database(app):
""" Returns the status of the database, as accessed from this instance. """
- return model.check_health(app.config)
-
+ return health.check_health(app.config)
def _check_redis(app):
""" Returns the status of Redis, as accessed from this instance. """
return build_logs.check_health()
-
def _check_storage(app):
""" Returns the status of storage, as accessed from this instance. """
- if app.config.get('REGISTRY_STATE', 'normal') == 'readonly':
- return (True, 'Storage check disabled for readonly mode')
-
try:
storage.validate(storage.preferred_locations, app.config['HTTPCLIENT'])
- return (True, None)
+ return True
except Exception as ex:
logger.exception('Storage check failed with exception %s', ex)
- return (False, 'Storage check failed with exception %s' % ex.message)
+ return False
-def _check_auth(app):
- """ Returns the status of the auth engine, as accessed from this instance. """
- return authentication.ping()
-
-
-def _check_service_key(app):
- """ Returns the status of the service key for this instance. If the key has disappeared or
- has expired, then will return False.
- """
- if not app.config.get('SETUP_COMPLETE', False):
- return (True, 'Stack not fully setup; skipping check')
-
- try:
- kid = instance_keys.local_key_id
- except IOError as ex:
- # Key has not been created yet.
- return (True, 'Stack not fully setup; skipping check')
-
- try:
- key_is_valid = bool(instance_keys.get_service_key_public_key(kid))
- message = 'Could not find valid instance service key %s' % kid if not key_is_valid else None
- return (key_is_valid, message)
- except Exception as ex:
- logger.exception('Got exception when trying to retrieve the instance key')
-
- # NOTE: We return *True* here if there was an exception when retrieving the key, as it means
- # the database is down, which will be handled by the database health check.
- return (True, 'Failed to get instance key due to a database issue; skipping check')
-
-
-
-def _disk_within_threshold(path, threshold):
- usage = psutil.disk_usage(path)
- return (1.0 - (usage.percent / 100.0)) >= threshold
-
-
-def _check_disk_space(for_warning):
- def _check_disk_space(app):
- """ Returns the status of the disk space for this instance. If the available disk space is below
- a certain threshold, then will return False.
- """
- if not app.config.get('SETUP_COMPLETE', False):
- return (True, 'Stack not fully setup; skipping check')
-
- config_key = ('DISKSPACE_HEALTH_WARNING_THRESHOLD'
- if for_warning else 'DISKSPACE_HEALTH_THRESHOLD')
- default_threshold = 0.1 if for_warning else 0.01
-
- # Check the directory in which we're running.
- currentfile = os.path.abspath(__file__)
- if not _disk_within_threshold(currentfile, app.config.get(config_key, default_threshold)):
- stats = psutil.disk_usage(currentfile)
- logger.debug('Disk space on main volume: %s', stats)
- return (False, 'Disk space has gone below threshold on main volume: %s' % stats.percent)
-
- # Check the temp directory as well.
- tempdir = tempfile.gettempdir()
- if tempdir is not None:
- if not _disk_within_threshold(tempdir, app.config.get(config_key, default_threshold)):
- stats = psutil.disk_usage(tempdir)
- logger.debug('Disk space on temp volume: %s', stats)
- return (False, 'Disk space has gone below threshold on temp volume: %s' % stats.percent)
-
- return (True, '')
-
- return _check_disk_space
-
-
-_INSTANCE_SERVICES = {
- 'registry_gunicorn': _check_gunicorn('v1/_internal_ping'),
- 'web_gunicorn': _check_gunicorn('_internal_ping'),
- 'verbs_gunicorn': _check_gunicorn('c1/_internal_ping'),
- 'service_key': _check_service_key,
- 'disk_space': _check_disk_space(for_warning=False),
- 'jwtproxy': _check_jwt_proxy,
-}
-
-_GLOBAL_SERVICES = {
+_SERVICES = {
+ 'registry_gunicorn': _check_registry_gunicorn,
'database': _check_database,
'redis': _check_redis,
'storage': _check_storage,
- 'auth': _check_auth,
}
-_WARNING_SERVICES = {
- 'disk_space_warning': _check_disk_space(for_warning=True),
-}
-
-def check_all_services(app, skip, for_instance=False):
+def check_all_services(app, skip):
""" Returns a dictionary containing the status of all the services defined. """
- if for_instance:
- services = dict(_INSTANCE_SERVICES)
- services.update(_GLOBAL_SERVICES)
- else:
- services = _GLOBAL_SERVICES
-
- return _check_services(app, skip, services)
-
-def check_warning_services(app, skip):
- """ Returns a dictionary containing the status of all the warning services defined. """
- return _check_services(app, skip, _WARNING_SERVICES)
-
-def _check_services(app, skip, services):
status = {}
- for name in services:
+ for name in _SERVICES:
if name in skip:
continue
- status[name] = services[name](app)
+ status[name] = _SERVICES[name](app)
return status
diff --git a/image/appc/__init__.py b/image/appc/__init__.py
index a30f63416..851c4698c 100644
--- a/image/appc/__init__.py
+++ b/image/appc/__init__.py
@@ -18,10 +18,10 @@ class AppCImageFormatter(TarImageFormatter):
Image formatter which produces an tarball according to the AppC specification.
"""
- def stream_generator(self, tag, parsed_manifest, synthetic_image_id, layer_iterator,
- tar_stream_getter_iterator, reporter=None):
+ def stream_generator(self, repo_image, tag, synthetic_image_id, get_image_iterator,
+ get_layer_iterator):
image_mtime = 0
- created = parsed_manifest.created_datetime
+ created = next(get_image_iterator()).v1_metadata.created
if created is not None:
image_mtime = calendar.timegm(created.utctimetuple())
@@ -30,18 +30,17 @@ class AppCImageFormatter(TarImageFormatter):
# rootfs - The root file system
# Yield the manifest.
- aci_manifest = json.dumps(DockerV1ToACIManifestTranslator.build_manifest(
+ manifest = json.dumps(DockerV1ToACIManifestTranslator.build_manifest(
+ repo_image,
tag,
- parsed_manifest,
synthetic_image_id
))
- yield self.tar_file('manifest', aci_manifest, mtime=image_mtime)
+ yield self.tar_file('manifest', manifest, mtime=image_mtime)
# Yield the merged layer dtaa.
yield self.tar_folder('rootfs', mtime=image_mtime)
- layer_merger = StreamLayerMerger(tar_stream_getter_iterator, path_prefix='rootfs/',
- reporter=reporter)
+ layer_merger = StreamLayerMerger(get_layer_iterator, path_prefix='rootfs/')
for entry in layer_merger.get_generator():
yield entry
@@ -157,8 +156,8 @@ class DockerV1ToACIManifestTranslator(object):
volume_name = DockerV1ToACIManifestTranslator._ac_name(docker_volume_path)
return "volume-%s" % volume_name
- volume_list = docker_config['Volumes'] or docker_config['volumes'] or {}
- for docker_volume_path in volume_list.iterkeys():
+ volume_list = docker_config['Volumes'] or docker_config['volumes'] or []
+ for docker_volume_path in volume_list:
if not docker_volume_path:
continue
@@ -170,16 +169,16 @@ class DockerV1ToACIManifestTranslator(object):
return volumes
@staticmethod
- def build_manifest(tag, manifest, synthetic_image_id):
+ def build_manifest(repo_image, tag, synthetic_image_id):
""" Builds an ACI manifest of an existing repository image. """
- docker_layer_data = JSONPathDict(json.loads(manifest.leaf_layer.raw_v1_metadata))
+ docker_layer_data = JSONPathDict(repo_image.compat_metadata)
config = docker_layer_data['config'] or JSONPathDict({})
- namespace = tag.repository.namespace_name
- repo_name = tag.repository.name
+ namespace = repo_image.repository.namespace_name
+ repo_name = repo_image.repository.name
source_url = "%s://%s/%s/%s:%s" % (app.config['PREFERRED_URL_SCHEME'],
app.config['SERVER_HOSTNAME'],
- namespace, repo_name, tag.name)
+ namespace, repo_name, tag)
# ACI requires that the execution command be absolutely referenced. Therefore, if we find
# a relative command, we give it as an argument to /bin/sh to resolve and execute for us.
@@ -188,7 +187,7 @@ class DockerV1ToACIManifestTranslator(object):
if exec_path and not exec_path[0].startswith('/'):
exec_path = ['/bin/sh', '-c', '""%s""' % ' '.join(exec_path)]
- # TODO: ACI doesn't support : in the name, so remove any ports.
+ # TODO(jschorr): ACI doesn't support : in the name, so remove any ports.
hostname = app.config['SERVER_HOSTNAME']
hostname = hostname.split(':', 1)[0]
@@ -209,7 +208,7 @@ class DockerV1ToACIManifestTranslator(object):
"labels": [
{
"name": "version",
- "value": tag.name,
+ "value": tag,
},
{
"name": "arch",
diff --git a/image/appc/test/test_appc.py b/image/appc/test/test_appc.py
index d078fbe9f..45696f033 100644
--- a/image/appc/test/test_appc.py
+++ b/image/appc/test/test_appc.py
@@ -1,7 +1,7 @@
import pytest
from image.appc import DockerV1ToACIManifestTranslator
-from util.dict_wrappers import JSONPathDict
+from data.interfaces.verbs import RepositoryReference, ImageWithBlob
EXAMPLE_MANIFEST_OBJ = {
@@ -73,14 +73,26 @@ EXAMPLE_MANIFEST_OBJ = {
"throwaway": True
}
-@pytest.mark.parametrize("vcfg,expected", [
- ({'Volumes': None}, []),
- ({'Volumes': {}}, []),
- ({'Volumes': {'/bin': {}}}, [{'name': 'volume-bin', 'path': '/bin', 'readOnly': False}]),
- ({'volumes': None}, []),
- ({'volumes': {}}, []),
- ({'volumes': {'/bin': {}}}, [{'name': 'volume-bin', 'path': '/bin', 'readOnly': False}]),
-])
-def test_volume_version_easy(vcfg, expected):
- output = DockerV1ToACIManifestTranslator._build_volumes(JSONPathDict(vcfg))
- assert output == expected
+
+@pytest.fixture
+def repo_image():
+ repo_ref = RepositoryReference(1, 'simple', 'devtable')
+ return ImageWithBlob(1, None, EXAMPLE_MANIFEST_OBJ, repo_ref, 1, None)
+
+
+def test_port_conversion(repo_image):
+ output = DockerV1ToACIManifestTranslator.build_manifest(repo_image, 'v3.0.15', 'abcdef')
+ ports = output['app']['ports']
+ ports.sort()
+ assert {'name':'port-2379', 'port':2379, 'protocol':'tcp'} == ports[0]
+ assert {'name':'port-2380', 'port':2380, 'protocol':'tcp'} == ports[1]
+
+
+def test_legacy_port_conversion(repo_image):
+ del repo_image.compat_metadata['config']['ExposedPorts']
+ repo_image.compat_metadata['config']['ports'] = ['8080', '8081']
+ output = DockerV1ToACIManifestTranslator.build_manifest(repo_image, 'v3.0.15', 'abcdef')
+ ports = output['app']['ports']
+ ports.sort()
+ assert {'name':'port-8080', 'port':8080, 'protocol':'tcp'} == ports[0]
+ assert {'name':'port-8081', 'port':8081, 'protocol':'tcp'} == ports[1]
diff --git a/image/common.py b/image/common.py
index 63b00e676..8d9bfefbc 100644
--- a/image/common.py
+++ b/image/common.py
@@ -7,17 +7,17 @@ class TarImageFormatter(object):
Base class for classes which produce a tar containing image and layer data.
"""
- def build_stream(self, tag, manifest, synthetic_image_id, layer_iterator,
- tar_stream_getter_iterator, reporter=None):
+ def build_stream(self, repo_image, tag, synthetic_image_id, get_image_iterator,
+ get_layer_iterator):
"""
Builds and streams a synthetic .tar.gz that represents the formatted tar created by this class's
implementation.
"""
- return GzipWrap(self.stream_generator(tag, manifest, synthetic_image_id, layer_iterator,
- tar_stream_getter_iterator, reporter=reporter))
+ return GzipWrap(self.stream_generator(repo_image, tag, synthetic_image_id, get_image_iterator,
+ get_layer_iterator))
- def stream_generator(self, tag, manifest, synthetic_image_id, layer_iterator,
- tar_stream_getter_iterator, reporter=None):
+ def stream_generator(self, repo_image, tag, synthetic_image_id, get_image_iterator,
+ get_layer_iterator):
raise NotImplementedError
def tar_file(self, name, contents, mtime=None):
diff --git a/image/docker/interfaces.py b/image/docker/interfaces.py
deleted file mode 100644
index 85a17fd06..000000000
--- a/image/docker/interfaces.py
+++ /dev/null
@@ -1,148 +0,0 @@
-from abc import ABCMeta, abstractproperty, abstractmethod
-from six import add_metaclass
-
-@add_metaclass(ABCMeta)
-class ManifestInterface(object):
- """ Defines the interface for the various manifests types supported. """
- @abstractproperty
- def is_manifest_list(self):
- """ Returns whether this manifest is a list. """
-
- @abstractproperty
- def schema_version(self):
- """ The version of the schema. """
-
- @abstractproperty
- def digest(self):
- """ The digest of the manifest, including type prefix. """
- pass
-
- @abstractproperty
- def media_type(self):
- """ The media type of the schema. """
- pass
-
- @abstractproperty
- def manifest_dict(self):
- """ Returns the manifest as a dictionary ready to be serialized to JSON. """
- pass
-
- @abstractproperty
- def bytes(self):
- """ Returns the bytes of the manifest. """
- pass
-
- @abstractproperty
- def layers_compressed_size(self):
- """ Returns the total compressed size of all the layers in this manifest. Returns None if this
- cannot be computed locally.
- """
-
- @abstractmethod
- def validate(self, content_retriever):
- """ Performs validation of required assertions about the manifest. Raises a ManifestException
- on failure.
- """
- pass
-
- @abstractmethod
- def get_layers(self, content_retriever):
- """ Returns the layers of this manifest, from base to leaf or None if this kind of manifest
- does not support layers. The layer must be of type ManifestImageLayer. """
- pass
-
- @abstractmethod
- def get_leaf_layer_v1_image_id(self, content_retriever):
- """ Returns the Docker V1 image ID for the leaf (top) layer, if any, or None if
- not applicable. """
- pass
-
- @abstractmethod
- def get_legacy_image_ids(self, content_retriever):
- """ Returns the Docker V1 image IDs for the layers of this manifest or None if not applicable.
- """
- pass
-
- @abstractproperty
- def blob_digests(self):
- """ Returns an iterator over all the blob digests referenced by this manifest,
- from base to leaf. The blob digests are strings with prefixes. For manifests that reference
- config as a blob, the blob will be included here as the last entry.
- """
-
- @abstractmethod
- def get_blob_digests_for_translation(self):
- """ Returns the blob digests for translation of this manifest into another manifest. This
- method will ignore missing IDs in layers, unlike `blob_digests`.
- """
-
- @abstractproperty
- def local_blob_digests(self):
- """ Returns an iterator over all the *non-remote* blob digests referenced by this manifest,
- from base to leaf. The blob digests are strings with prefixes. For manifests that reference
- config as a blob, the blob will be included here as the last entry.
- """
-
- @abstractmethod
- def child_manifests(self, content_retriever):
- """ Returns an iterator of all manifests that live under this manifest, if any or None if not
- applicable.
- """
-
- @abstractmethod
- def get_manifest_labels(self, content_retriever):
- """ Returns a dictionary of all the labels defined inside this manifest or None if this kind
- of manifest does not support labels. """
- pass
-
- @abstractmethod
- def get_requires_empty_layer_blob(self, content_retriever):
- """ Whether this schema requires the special empty layer blob. """
- pass
-
- @abstractmethod
- def unsigned(self):
- """ Returns an unsigned version of this manifest. """
-
- @abstractproperty
- def has_legacy_image(self):
- """ Returns True if this manifest has a legacy V1 image, or False if not. """
-
- @abstractmethod
- def generate_legacy_layers(self, images_map, content_retriever):
- """
- Rewrites Docker v1 image IDs and returns a generator of DockerV1Metadata, starting
- at the base layer and working towards the leaf.
-
- If Docker gives us a layer with a v1 image ID that already points to existing
- content, but the checksums don't match, then we need to rewrite the image ID
- to something new in order to ensure consistency.
-
- Returns None if there are no legacy images associated with the manifest.
- """
-
- @abstractmethod
- def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
- """ Returns a schema1 version of the manifest. If this is a mainfest list, should return the
- manifest that is compatible with V1, by virtue of being `amd64` and `linux`.
- If none, returns None.
- """
-
- @abstractmethod
- def convert_manifest(self, allowed_mediatypes, namespace_name, repo_name, tag_name,
- content_retriever):
- """ Returns a version of this schema that has a media type found in the given media type set.
- If not possible, or an error occurs, returns None.
- """
-
-
-@add_metaclass(ABCMeta)
-class ContentRetriever(object):
- """ Defines the interface for retrieval of various content referenced by a manifest. """
- @abstractmethod
- def get_manifest_bytes_with_digest(self, digest):
- """ Returns the bytes of the manifest with the given digest or None if none found. """
-
- @abstractmethod
- def get_blob_bytes_with_digest(self, digest):
- """ Returns the bytes of the blob with the given digest or None if none found. """
diff --git a/image/docker/schema1.py b/image/docker/schema1.py
index ef5c7e459..d93cdc62b 100644
--- a/image/docker/schema1.py
+++ b/image/docker/schema1.py
@@ -11,20 +11,13 @@ import logging
from collections import namedtuple, OrderedDict
from datetime import datetime
-import dateutil.parser
-
-from jsonschema import validate as validate_schema, ValidationError
-
-from jwkest.jws import SIGNER_ALGS, keyrep, BadSignature
+from jwkest.jws import SIGNER_ALGS, keyrep
from jwt.utils import base64url_encode, base64url_decode
from digest import digest_tools
from image.docker import ManifestException
-from image.docker.types import ManifestImageLayer
-from image.docker.interfaces import ManifestInterface
from image.docker.v1 import DockerV1Metadata
-from image.docker.schemautil import to_canonical_json
-from util.bytes import Bytes
+
logger = logging.getLogger(__name__)
@@ -59,6 +52,7 @@ _ISO_DATETIME_FORMAT_ZULU = '%Y-%m-%dT%H:%M:%SZ'
# The algorithm we use to sign the JWS.
_JWS_SIGNING_ALGORITHM = 'RS256'
+
class MalformedSchema1Manifest(ManifestException):
"""
Raised when a manifest fails an assertion that should be true according to the Docker Manifest
@@ -74,8 +68,7 @@ class InvalidSchema1Signature(ManifestException):
pass
-class Schema1Layer(namedtuple('Schema1Layer', ['digest', 'v1_metadata', 'raw_v1_metadata',
- 'compressed_size', 'is_remote', 'urls'])):
+class Schema1Layer(namedtuple('Schema1Layer', ['digest', 'v1_metadata', 'raw_v1_metadata'])):
"""
Represents all of the data about an individual layer in a given Manifest.
This is the union of the fsLayers (digest) and the history entries (v1_compatibility).
@@ -83,101 +76,24 @@ class Schema1Layer(namedtuple('Schema1Layer', ['digest', 'v1_metadata', 'raw_v1_
class Schema1V1Metadata(namedtuple('Schema1V1Metadata', ['image_id', 'parent_image_id', 'created',
- 'comment', 'command', 'author',
- 'labels'])):
+ 'comment', 'command', 'labels'])):
"""
Represents the necessary data extracted from the v1 compatibility string in a given layer of a
Manifest.
"""
-class DockerSchema1Manifest(ManifestInterface):
- METASCHEMA = {
- 'type': 'object',
- 'properties': {
- DOCKER_SCHEMA1_SIGNATURES_KEY: {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- DOCKER_SCHEMA1_PROTECTED_KEY: {
- 'type': 'string',
- },
- DOCKER_SCHEMA1_HEADER_KEY: {
- 'type': 'object',
- 'properties': {
- 'alg': {
- 'type': 'string',
- },
- 'jwk': {
- 'type': 'object',
- },
- },
- 'required': ['alg', 'jwk'],
- },
- DOCKER_SCHEMA1_SIGNATURE_KEY: {
- 'type': 'string',
- },
- },
- 'required': [DOCKER_SCHEMA1_PROTECTED_KEY, DOCKER_SCHEMA1_HEADER_KEY,
- DOCKER_SCHEMA1_SIGNATURE_KEY],
- },
- },
- DOCKER_SCHEMA1_REPO_TAG_KEY: {
- 'type': 'string',
- },
- DOCKER_SCHEMA1_REPO_NAME_KEY: {
- 'type': 'string',
- },
- DOCKER_SCHEMA1_HISTORY_KEY: {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- DOCKER_SCHEMA1_V1_COMPAT_KEY: {
- 'type': 'string',
- },
- },
- 'required': [DOCKER_SCHEMA1_V1_COMPAT_KEY],
- },
- },
- DOCKER_SCHEMA1_FS_LAYERS_KEY: {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- DOCKER_SCHEMA1_BLOB_SUM_KEY: {
- 'type': 'string',
- },
- },
- 'required': [DOCKER_SCHEMA1_BLOB_SUM_KEY],
- },
- },
- },
- 'required': [DOCKER_SCHEMA1_REPO_TAG_KEY,
- DOCKER_SCHEMA1_REPO_NAME_KEY, DOCKER_SCHEMA1_FS_LAYERS_KEY,
- DOCKER_SCHEMA1_HISTORY_KEY],
- }
-
+class DockerSchema1Manifest(object):
def __init__(self, manifest_bytes, validate=True):
- assert isinstance(manifest_bytes, Bytes)
-
self._layers = None
self._bytes = manifest_bytes
try:
- self._parsed = json.loads(manifest_bytes.as_encoded_str())
+ self._parsed = json.loads(manifest_bytes)
except ValueError as ve:
raise MalformedSchema1Manifest('malformed manifest data: %s' % ve)
- try:
- validate_schema(self._parsed, DockerSchema1Manifest.METASCHEMA)
- except ValidationError as ve:
- raise MalformedSchema1Manifest('manifest data does not match schema: %s' % ve)
-
- self._signatures = self._parsed.get(DOCKER_SCHEMA1_SIGNATURES_KEY)
- self._architecture = self._parsed.get(DOCKER_SCHEMA1_ARCH_KEY)
-
+ self._signatures = self._parsed[DOCKER_SCHEMA1_SIGNATURES_KEY]
self._tag = self._parsed[DOCKER_SCHEMA1_REPO_TAG_KEY]
repo_name = self._parsed[DOCKER_SCHEMA1_REPO_NAME_KEY]
@@ -194,56 +110,24 @@ class DockerSchema1Manifest(ManifestInterface):
self._validate()
def _validate(self):
- if not self._signatures:
- return
-
- payload_str = self._payload
for signature in self._signatures:
- bytes_to_verify = '{0}.{1}'.format(signature['protected'], base64url_encode(payload_str))
+ bytes_to_verify = '{0}.{1}'.format(signature['protected'],
+ base64url_encode(self.payload))
signer = SIGNER_ALGS[signature['header']['alg']]
key = keyrep(signature['header']['jwk'])
gk = key.get_key()
sig = base64url_decode(signature['signature'].encode('utf-8'))
-
- try:
- verified = signer.verify(bytes_to_verify, sig, gk)
- except BadSignature:
- raise InvalidSchema1Signature()
-
+ verified = signer.verify(bytes_to_verify, sig, gk)
if not verified:
raise InvalidSchema1Signature()
- def validate(self, content_retriever):
- """ Performs validation of required assertions about the manifest. Raises a ManifestException
- on failure.
- """
- # Already validated.
-
- @property
- def is_signed(self):
- """ Returns whether the schema is signed. """
- return bool(self._signatures)
-
- @property
- def architecture(self):
- return self._architecture
-
- @property
- def is_manifest_list(self):
- return False
-
- @property
- def schema_version(self):
- return 1
-
@property
def content_type(self):
- return (DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
- if self._signatures else DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE)
+ return DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
@property
def media_type(self):
- return self.content_type
+ return DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
@property
def signatures(self):
@@ -261,6 +145,10 @@ class DockerSchema1Manifest(ManifestInterface):
def tag(self):
return self._tag
+ @property
+ def json(self):
+ return self._bytes
+
@property
def bytes(self):
return self._bytes
@@ -269,17 +157,9 @@ class DockerSchema1Manifest(ManifestInterface):
def manifest_json(self):
return self._parsed
- @property
- def manifest_dict(self):
- return self._parsed
-
- @property
- def layers_compressed_size(self):
- return None
-
@property
def digest(self):
- return digest_tools.sha256_digest(self._payload)
+ return digest_tools.sha256_digest(self.payload)
@property
def image_ids(self):
@@ -298,94 +178,13 @@ class DockerSchema1Manifest(ManifestInterface):
def leaf_layer(self):
return self.layers[-1]
- @property
- def created_datetime(self):
- created_datetime_str = self.leaf_layer.v1_metadata.created
- if created_datetime_str is None:
- return None
-
- try:
- return dateutil.parser.parse(created_datetime_str).replace(tzinfo=None)
- except:
- # parse raises different exceptions, so we cannot use a specific kind of handler here.
- return None
-
@property
def layers(self):
if self._layers is None:
self._layers = list(self._generate_layers())
return self._layers
- def get_layers(self, content_retriever):
- """ Returns the layers of this manifest, from base to leaf or None if this kind of manifest
- does not support layers. """
- for layer in self.layers:
- created_datetime = None
- try:
- created_datetime = dateutil.parser.parse(layer.v1_metadata.created).replace(tzinfo=None)
- except:
- pass
-
- yield ManifestImageLayer(layer_id=layer.v1_metadata.image_id,
- compressed_size=layer.compressed_size,
- is_remote=False,
- urls=None,
- command=layer.v1_metadata.command,
- comment=layer.v1_metadata.comment,
- author=layer.v1_metadata.author,
- blob_digest=layer.digest,
- created_datetime=created_datetime,
- internal_layer=layer)
-
- @property
- def blob_digests(self):
- return [str(layer.digest) for layer in self.layers]
-
- @property
- def local_blob_digests(self):
- return self.blob_digests
-
- def get_blob_digests_for_translation(self):
- """ Returns the blob digests for translation of this manifest into another manifest. This
- method will ignore missing IDs in layers, unlike `blob_digests`.
- """
- layers = self._generate_layers(allow_missing_ids=True)
- return [str(layer.digest) for layer in layers]
-
- def child_manifests(self, content_retriever):
- return None
-
- def get_manifest_labels(self, content_retriever):
- return self.layers[-1].v1_metadata.labels
-
- def get_requires_empty_layer_blob(self, content_retriever):
- return False
-
- def _unsigned_builder(self):
- builder = DockerSchema1ManifestBuilder(self._namespace, self._repo_name, self._tag,
- self._architecture)
- for layer in reversed(self.layers):
- builder.add_layer(str(layer.digest), layer.raw_v1_metadata)
-
- return builder
-
- def unsigned(self):
- if self.media_type == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE:
- return self
-
- # Create an unsigned version of the manifest.
- return self._unsigned_builder().build()
-
- def with_tag_name(self, tag_name, json_web_key=None):
- """ Returns a copy of this manifest, with the tag changed to the given tag name. """
- builder = DockerSchema1ManifestBuilder(self._namespace, self._repo_name, tag_name,
- self._architecture)
- for layer in reversed(self.layers):
- builder.add_layer(str(layer.digest), layer.raw_v1_metadata)
-
- return builder.build(json_web_key)
-
- def _generate_layers(self, allow_missing_ids=False):
+ def _generate_layers(self):
"""
Returns a generator of objects that have the blobSum and v1Compatibility keys in them,
starting from the base image and working toward the leaf node.
@@ -401,82 +200,27 @@ class DockerSchema1Manifest(ManifestInterface):
metadata_string = history_obj[DOCKER_SCHEMA1_V1_COMPAT_KEY]
- try:
- v1_metadata = json.loads(metadata_string)
- except (ValueError, TypeError):
- raise MalformedSchema1Manifest('Could not parse metadata string: %s' % metadata_string)
+ v1_metadata = json.loads(metadata_string)
+ command_list = v1_metadata.get('container_config', {}).get('Cmd', None)
+ command = json.dumps(command_list) if command_list else None
- container_config = v1_metadata.get('container_config') or {}
- command_list = container_config.get('Cmd', None)
- command = to_canonical_json(command_list) if command_list else None
-
- if not allow_missing_ids and not 'id' in v1_metadata:
+ if not 'id' in v1_metadata:
raise MalformedSchema1Manifest('id field missing from v1Compatibility JSON')
labels = v1_metadata.get('config', {}).get('Labels', {}) or {}
- extracted = Schema1V1Metadata(image_id=v1_metadata.get('id'),
- parent_image_id=v1_metadata.get('parent'),
- created=v1_metadata.get('created'),
- comment=v1_metadata.get('comment'),
- author=v1_metadata.get('author'),
- command=command,
- labels=labels)
-
- compressed_size = v1_metadata.get('Size')
- yield Schema1Layer(image_digest, extracted, metadata_string, compressed_size, False, None)
+ extracted = Schema1V1Metadata(v1_metadata['id'], v1_metadata.get('parent'),
+ v1_metadata.get('created'), v1_metadata.get('comment'),
+ command, labels)
+ yield Schema1Layer(image_digest, extracted, metadata_string)
@property
- def _payload(self):
- if self._signatures is None:
- return self._bytes.as_encoded_str()
-
- byte_data = self._bytes.as_encoded_str()
+ def payload(self):
protected = str(self._signatures[0][DOCKER_SCHEMA1_PROTECTED_KEY])
parsed_protected = json.loads(base64url_decode(protected))
- signed_content_head = byte_data[:parsed_protected[DOCKER_SCHEMA1_FORMAT_LENGTH_KEY]]
+ signed_content_head = self._bytes[:parsed_protected[DOCKER_SCHEMA1_FORMAT_LENGTH_KEY]]
signed_content_tail = base64url_decode(str(parsed_protected[DOCKER_SCHEMA1_FORMAT_TAIL_KEY]))
return signed_content_head + signed_content_tail
- def generate_legacy_layers(self, images_map, content_retriever):
- return self.rewrite_invalid_image_ids(images_map)
-
- def get_legacy_image_ids(self, content_retriever):
- return self.legacy_image_ids
-
- @property
- def legacy_image_ids(self):
- return {mdata.v1_metadata.image_id for mdata in self.layers}
-
- @property
- def has_legacy_image(self):
- return True
-
- @property
- def leaf_layer_v1_image_id(self):
- return self.layers[-1].v1_metadata.image_id
-
- def get_leaf_layer_v1_image_id(self, content_retriever):
- return self.layers[-1].v1_metadata.image_id
-
- def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
- """ Returns the manifest that is compatible with V1, by virtue of being `amd64` and `linux`.
- If none, returns None.
- """
- # Note: schema1 *technically* supports non-amd64 architectures, but in practice these were never
- # used, so to ensure full backwards compatibility, we just always return the schema.
- return self
-
- def convert_manifest(self, allowed_mediatypes, namespace_name, repo_name, tag_name,
- content_retriever):
- if self.media_type in allowed_mediatypes:
- return self
-
- unsigned = self.unsigned()
- if unsigned.media_type in allowed_mediatypes:
- return unsigned
-
- return None
-
def rewrite_invalid_image_ids(self, images_map):
"""
Rewrites Docker v1 image IDs and returns a generator of DockerV1Metadata.
@@ -530,7 +274,6 @@ class DockerSchema1Manifest(ManifestInterface):
image_id=working_image_id,
created=extracted_v1_metadata.created,
comment=extracted_v1_metadata.comment,
- author=extracted_v1_metadata.author,
command=extracted_v1_metadata.command,
compat_json=v1_metadata_json,
parent_image_id=parent_image_id,
@@ -559,67 +302,20 @@ class DockerSchema1ManifestBuilder(object):
self._fs_layer_digests = []
self._history = []
- self._namespace_name = namespace_name
- self._repo_name = repo_name
- self._tag = tag
- self._architecture = architecture
def add_layer(self, layer_digest, v1_json_metadata):
self._fs_layer_digests.append({
DOCKER_SCHEMA1_BLOB_SUM_KEY: layer_digest,
})
self._history.append({
- DOCKER_SCHEMA1_V1_COMPAT_KEY: v1_json_metadata or '{}',
+ DOCKER_SCHEMA1_V1_COMPAT_KEY: v1_json_metadata,
})
return self
- def with_metadata_removed(self):
- """ Returns a copy of the builder where every layer but the leaf layer has
- its metadata stripped down to the bare essentials.
+
+ def build(self, json_web_key):
"""
- builder = DockerSchema1ManifestBuilder(self._namespace_name, self._repo_name, self._tag,
- self._architecture)
-
- for index, fs_layer in enumerate(self._fs_layer_digests):
- try:
- metadata = json.loads(self._history[index][DOCKER_SCHEMA1_V1_COMPAT_KEY])
- except (ValueError, TypeError):
- logger.exception('Could not parse existing builder')
- raise MalformedSchema1Manifest
-
- fixed_metadata = {}
- if index == 0: # Leaf layer is at index 0 in schema 1.
- fixed_metadata = metadata
- else:
- # Remove all container config from the metadata.
- fixed_metadata['id'] = metadata['id']
- if 'parent' in metadata:
- fixed_metadata['parent'] = metadata['parent']
-
- if 'created' in metadata:
- fixed_metadata['created'] = metadata['created']
-
- if 'author' in metadata:
- fixed_metadata['author'] = metadata['author']
-
- if 'comment' in metadata:
- fixed_metadata['comment'] = metadata['comment']
-
- if 'Size' in metadata:
- fixed_metadata['Size'] = metadata['Size']
-
- if 'Cmd' in metadata.get('container_config', {}):
- fixed_metadata['container_config'] = {
- 'Cmd': metadata['container_config']['Cmd'],
- }
-
- builder.add_layer(fs_layer[DOCKER_SCHEMA1_BLOB_SUM_KEY], json.dumps(fixed_metadata))
-
- return builder
-
- def build(self, json_web_key=None, ensure_ascii=True):
- """
- Builds a DockerSchema1Manifest object, with optional signature.
+ Builds a DockerSchema1Manifest object complete with signature.
"""
payload = OrderedDict(self._base_payload)
payload.update({
@@ -627,11 +323,8 @@ class DockerSchema1ManifestBuilder(object):
DOCKER_SCHEMA1_FS_LAYERS_KEY: self._fs_layer_digests,
})
- payload_str = json.dumps(payload, indent=3, ensure_ascii=ensure_ascii)
- if json_web_key is None:
- return DockerSchema1Manifest(Bytes.for_string_or_unicode(payload_str))
+ payload_str = json.dumps(payload, indent=3)
- payload_str = Bytes.for_string_or_unicode(payload_str).as_encoded_str()
split_point = payload_str.rfind('\n}')
protected_payload = {
@@ -639,7 +332,7 @@ class DockerSchema1ManifestBuilder(object):
'formatLength': split_point,
'time': datetime.utcnow().strftime(_ISO_DATETIME_FORMAT_ZULU),
}
- protected = base64url_encode(json.dumps(protected_payload, ensure_ascii=ensure_ascii))
+ protected = base64url_encode(json.dumps(protected_payload))
logger.debug('Generated protected block: %s', protected)
bytes_to_sign = '{0}.{1}'.format(protected, base64url_encode(payload_str))
@@ -659,10 +352,10 @@ class DockerSchema1ManifestBuilder(object):
}
logger.debug('Encoded signature block: %s', json.dumps(signature_block))
+
payload.update({DOCKER_SCHEMA1_SIGNATURES_KEY: [signature_block]})
- json_str = json.dumps(payload, indent=3, ensure_ascii=ensure_ascii)
- return DockerSchema1Manifest(Bytes.for_string_or_unicode(json_str))
+ return DockerSchema1Manifest(json.dumps(payload, indent=3))
def _updated_v1_metadata(v1_metadata_json, updated_id_map):
@@ -680,4 +373,4 @@ def _updated_v1_metadata(v1_metadata_json, updated_id_map):
if existing_image in updated_id_map:
parsed['container_config']['image'] = updated_id_map[existing_image]
- return to_canonical_json(parsed)
+ return json.dumps(parsed)
diff --git a/image/docker/schema2.py b/image/docker/schema2.py
new file mode 100644
index 000000000..504f5df80
--- /dev/null
+++ b/image/docker/schema2.py
@@ -0,0 +1,11 @@
+"""
+schema2 implements pure data transformations according to the Docker Manifest v2.2 Specification.
+
+https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md
+"""
+
+# Content Types
+DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE = 'application/vnd.docker.distribution.manifest.v2+json'
+DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE = 'application/vnd.docker.distribution.manifest.list.v2+json'
+DOCKER_SCHEMA2_CONTENT_TYPES = {DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
+ DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE}
diff --git a/image/docker/schema2/__init__.py b/image/docker/schema2/__init__.py
deleted file mode 100644
index 8477596ae..000000000
--- a/image/docker/schema2/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-schema2 implements pure data transformations according to the Docker Manifest v2.2 Specification.
-
-https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md
-"""
-
-# Content Types
-DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE = 'application/vnd.docker.distribution.manifest.v2+json'
-DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE = 'application/vnd.docker.distribution.manifest.list.v2+json'
-
-DOCKER_SCHEMA2_LAYER_CONTENT_TYPE = 'application/vnd.docker.image.rootfs.diff.tar.gzip'
-DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE = 'application/vnd.docker.image.rootfs.foreign.diff.tar.gzip'
-
-DOCKER_SCHEMA2_CONFIG_CONTENT_TYPE = 'application/vnd.docker.container.image.v1+json'
-
-OCI_MANIFEST_CONTENT_TYPE = 'application/vnd.oci.image.manifest.v1+json'
-OCI_MANIFESTLIST_CONTENT_TYPE = 'application/vnd.oci.image.index.v1+json'
-
-DOCKER_SCHEMA2_CONTENT_TYPES = {DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
- DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE}
-OCI_CONTENT_TYPES = {OCI_MANIFEST_CONTENT_TYPE, OCI_MANIFESTLIST_CONTENT_TYPE}
-
-# The magical digest to be used for "empty" layers.
-# https://github.com/docker/distribution/blob/749f6afb4572201e3c37325d0ffedb6f32be8950/manifest/schema1/config_builder.go#L22
-EMPTY_LAYER_BLOB_DIGEST = 'sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4'
-EMPTY_LAYER_SIZE = 32
-EMPTY_LAYER_BYTES = "".join(map(chr, [
- 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
- 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
-]))
diff --git a/image/docker/schema2/config.py b/image/docker/schema2/config.py
deleted file mode 100644
index c5b14862f..000000000
--- a/image/docker/schema2/config.py
+++ /dev/null
@@ -1,275 +0,0 @@
-"""
-Implements validation and conversion for the Schema2 config JSON.
-
-Example:
-{
- "architecture": "amd64",
- "config": {
- "Hostname": "",
- "Domainname": "",
- "User": "",
- "AttachStdin": false,
- "AttachStdout": false,
- "AttachStderr": false,
- "Tty": false,
- "OpenStdin": false,
- "StdinOnce": false,
- "Env": [
- "HTTP_PROXY=http:\/\/localhost:8080",
- "http_proxy=http:\/\/localhost:8080",
- "PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin"
- ],
- "Cmd": [
- "sh"
- ],
- "Image": "",
- "Volumes": null,
- "WorkingDir": "",
- "Entrypoint": null,
- "OnBuild": null,
- "Labels": {
-
- }
- },
- "container": "b7a43694b435c8e9932615643f61f975a9213e453b15cd6c2a386f144a2d2de9",
- "container_config": {
- "Hostname": "b7a43694b435",
- "Domainname": "",
- "User": "",
- "AttachStdin": true,
- "AttachStdout": true,
- "AttachStderr": true,
- "Tty": true,
- "OpenStdin": true,
- "StdinOnce": true,
- "Env": [
- "HTTP_PROXY=http:\/\/localhost:8080",
- "http_proxy=http:\/\/localhost:8080",
- "PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin"
- ],
- "Cmd": [
- "sh"
- ],
- "Image": "somenamespace\/somerepo",
- "Volumes": null,
- "WorkingDir": "",
- "Entrypoint": null,
- "OnBuild": null,
- "Labels": {
-
- }
- },
- "created": "2018-04-16T10:41:19.079522722Z",
- "docker_version": "17.09.0-ce",
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "\/bin\/sh -c #(nop) ADD file:9e4ca21cbd24dc05b454b6be21c7c639216ae66559b21ba24af0d665c62620dc in \/ "
- },
- {
- "created": "2018-04-03T18:37:09.613317719Z",
- "created_by": "\/bin\/sh -c #(nop) CMD [\"sh\"]",
- "empty_layer": true
- },
- {
- "created": "2018-04-16T10:37:44.418262777Z",
- "created_by": "sh"
- },
- {
- "created": "2018-04-16T10:41:19.079522722Z",
- "created_by": "sh"
- }
- ],
- "os": "linux",
- "rootfs": {
- "type": "layers",
- "diff_ids": [
- "sha256:3e596351c689c8827a3c9635bc1083cff17fa4a174f84f0584bd0ae6f384195b",
- "sha256:4552be273c71275a88de0b8c8853dcac18cb74d5790f5383d9b38d4ac55062d5",
- "sha256:1319c76152ca37fbeb7fb71e0ffa7239bc19ffbe3b95c00417ece39d89d06e6e"
- ]
- }
-}
-"""
-
-import copy
-import json
-import hashlib
-
-from collections import namedtuple
-from jsonschema import validate as validate_schema, ValidationError
-from dateutil.parser import parse as parse_date
-
-from digest import digest_tools
-from image.docker import ManifestException
-from util.bytes import Bytes
-
-
-DOCKER_SCHEMA2_CONFIG_HISTORY_KEY = "history"
-DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY = "rootfs"
-DOCKER_SCHEMA2_CONFIG_CREATED_KEY = "created"
-DOCKER_SCHEMA2_CONFIG_CREATED_BY_KEY = "created_by"
-DOCKER_SCHEMA2_CONFIG_COMMENT_KEY = "comment"
-DOCKER_SCHEMA2_CONFIG_AUTHOR_KEY = "author"
-DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY = "empty_layer"
-DOCKER_SCHEMA2_CONFIG_TYPE_KEY = "type"
-
-
-LayerHistory = namedtuple('LayerHistory', ['created', 'created_datetime', 'command', 'is_empty',
- 'author', 'comment', 'raw_entry'])
-
-
-class MalformedSchema2Config(ManifestException):
- """
- Raised when a config fails an assertion that should be true according to the Docker Manifest
- v2.2 Config Specification.
- """
- pass
-
-
-class DockerSchema2Config(object):
- METASCHEMA = {
- 'type': 'object',
- 'description': 'The container configuration found in a schema 2 manifest',
- 'required': [DOCKER_SCHEMA2_CONFIG_HISTORY_KEY, DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY],
- 'properties': {
- DOCKER_SCHEMA2_CONFIG_HISTORY_KEY: {
- 'type': 'array',
- 'description': 'The history used to create the container image',
- 'items': {
- 'type': 'object',
- 'properties': {
- DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY: {
- 'type': 'boolean',
- 'description': 'If present, this layer is empty',
- },
- DOCKER_SCHEMA2_CONFIG_CREATED_KEY: {
- 'type': 'string',
- 'description': 'The date/time that the layer was created',
- 'format': 'date-time',
- 'x-example': '2018-04-03T18:37:09.284840891Z',
- },
- DOCKER_SCHEMA2_CONFIG_CREATED_BY_KEY: {
- 'type': 'string',
- 'description': 'The command used to create the layer',
- 'x-example': '\/bin\/sh -c #(nop) ADD file:somesha in /',
- },
- DOCKER_SCHEMA2_CONFIG_COMMENT_KEY: {
- 'type': 'string',
- 'description': 'Comment describing the layer',
- },
- DOCKER_SCHEMA2_CONFIG_AUTHOR_KEY: {
- 'type': 'string',
- 'description': 'The author of the layer',
- },
- },
- 'additionalProperties': True,
- },
- },
- DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY: {
- 'type': 'object',
- 'description': 'Describes the root filesystem for this image',
- 'properties': {
- DOCKER_SCHEMA2_CONFIG_TYPE_KEY: {
- 'type': 'string',
- 'description': 'The type of the root file system entries',
- },
- },
- 'required': [DOCKER_SCHEMA2_CONFIG_TYPE_KEY],
- 'additionalProperties': True,
- },
- },
- 'additionalProperties': True,
- }
-
- def __init__(self, config_bytes):
- assert isinstance(config_bytes, Bytes)
-
- self._config_bytes = config_bytes
-
- try:
- self._parsed = json.loads(config_bytes.as_unicode())
- except ValueError as ve:
- raise MalformedSchema2Config('malformed config data: %s' % ve)
-
- try:
- validate_schema(self._parsed, DockerSchema2Config.METASCHEMA)
- except ValidationError as ve:
- raise MalformedSchema2Config('config data does not match schema: %s' % ve)
-
- @property
- def digest(self):
- """ Returns the digest of this config object. """
- return digest_tools.sha256_digest(self._config_bytes.as_encoded_str())
-
- @property
- def size(self):
- """ Returns the size of this config object. """
- return len(self._config_bytes.as_encoded_str())
-
- @property
- def bytes(self):
- """ Returns the bytes of this config object. """
- return self._config_bytes
-
- @property
- def labels(self):
- """ Returns a dictionary of all the labels defined in this configuration. """
- return self._parsed.get('config', {}).get('Labels', {}) or {}
-
- @property
- def has_empty_layer(self):
- """ Returns whether this config contains an empty layer. """
- for history_entry in self._parsed[DOCKER_SCHEMA2_CONFIG_HISTORY_KEY]:
- if history_entry.get(DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY, False):
- return True
-
- return False
-
- @property
- def history(self):
- """ Returns the history of the image, started at the base layer. """
- for history_entry in self._parsed[DOCKER_SCHEMA2_CONFIG_HISTORY_KEY]:
- created_datetime = parse_date(history_entry[DOCKER_SCHEMA2_CONFIG_CREATED_KEY])
- yield LayerHistory(created_datetime=created_datetime,
- created=history_entry.get(DOCKER_SCHEMA2_CONFIG_CREATED_KEY),
- command=history_entry.get(DOCKER_SCHEMA2_CONFIG_CREATED_BY_KEY),
- author=history_entry.get(DOCKER_SCHEMA2_CONFIG_AUTHOR_KEY),
- comment=history_entry.get(DOCKER_SCHEMA2_CONFIG_COMMENT_KEY),
- is_empty=history_entry.get(DOCKER_SCHEMA2_CONFIG_EMPTY_LAYER_KEY, False),
- raw_entry=history_entry)
-
- def build_v1_compatibility(self, history, v1_id, v1_parent_id, is_leaf, compressed_size=None):
- """ Builds the V1 compatibility block for the given layer.
- """
- # If the layer is the leaf, it gets the full config (minus 2 fields). Otherwise, it gets only
- # IDs.
- v1_compatibility = copy.deepcopy(self._parsed) if is_leaf else {}
- v1_compatibility['id'] = v1_id
- if v1_parent_id is not None:
- v1_compatibility['parent'] = v1_parent_id
-
- if 'created' not in v1_compatibility and history.created:
- v1_compatibility['created'] = history.created
-
- if 'author' not in v1_compatibility and history.author:
- v1_compatibility['author'] = history.author
-
- if 'comment' not in v1_compatibility and history.comment:
- v1_compatibility['comment'] = history.comment
-
- if 'throwaway' not in v1_compatibility and history.is_empty:
- v1_compatibility['throwaway'] = True
-
- if 'container_config' not in v1_compatibility:
- v1_compatibility['container_config'] = {
- 'Cmd': [history.command],
- }
-
- if compressed_size is not None:
- v1_compatibility['Size'] = compressed_size
-
- # The history and rootfs keys are schema2-config specific.
- v1_compatibility.pop(DOCKER_SCHEMA2_CONFIG_HISTORY_KEY, None)
- v1_compatibility.pop(DOCKER_SCHEMA2_CONFIG_ROOTFS_KEY, None)
- return v1_compatibility
diff --git a/image/docker/schema2/list.py b/image/docker/schema2/list.py
deleted file mode 100644
index bf1c52132..000000000
--- a/image/docker/schema2/list.py
+++ /dev/null
@@ -1,379 +0,0 @@
-import logging
-import json
-
-from cachetools.func import lru_cache
-from jsonschema import validate as validate_schema, ValidationError
-
-from digest import digest_tools
-from image.docker import ManifestException
-from image.docker.interfaces import ManifestInterface
-from image.docker.schema1 import DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
-from image.docker.schema1 import DockerSchema1Manifest
-from image.docker.schema2 import (DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE,
- DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
-from image.docker.schema2.manifest import DockerSchema2Manifest
-from util.bytes import Bytes
-
-
-logger = logging.getLogger(__name__)
-
-# Keys.
-DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY = 'schemaVersion'
-DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY = 'mediaType'
-DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY = 'size'
-DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY = 'digest'
-DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY = 'manifests'
-DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY = 'platform'
-DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY = 'architecture'
-DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY = 'os'
-DOCKER_SCHEMA2_MANIFESTLIST_OS_VERSION_KEY = 'os.version'
-DOCKER_SCHEMA2_MANIFESTLIST_OS_FEATURES_KEY = 'os.features'
-DOCKER_SCHEMA2_MANIFESTLIST_FEATURES_KEY = 'features'
-DOCKER_SCHEMA2_MANIFESTLIST_VARIANT_KEY = 'variant'
-
-
-class MalformedSchema2ManifestList(ManifestException):
- """
- Raised when a manifest list fails an assertion that should be true according to the
- Docker Manifest v2.2 Specification.
- """
- pass
-
-
-class MismatchManifestException(MalformedSchema2ManifestList):
- """ Raised when a manifest list contains a schema 1 manifest with a differing architecture
- from that specified in the manifest list for the manifest.
- """
- pass
-
-
-class LazyManifestLoader(object):
- def __init__(self, manifest_data, content_retriever):
- self._manifest_data = manifest_data
- self._content_retriever = content_retriever
- self._loaded_manifest = None
-
- @property
- def manifest_obj(self):
- if self._loaded_manifest is not None:
- return self._loaded_manifest
-
- self._loaded_manifest = self._load_manifest()
- return self._loaded_manifest
-
- def _load_manifest(self):
- digest = self._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY]
- size = self._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY]
- manifest_bytes = self._content_retriever.get_manifest_bytes_with_digest(digest)
- if manifest_bytes is None:
- raise MalformedSchema2ManifestList('Could not find child manifest with digest `%s`' % digest)
-
- if len(manifest_bytes) != size:
- raise MalformedSchema2ManifestList('Size of manifest does not match that retrieved: %s vs %s',
- len(manifest_bytes), size)
-
- content_type = self._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY]
- if content_type == DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE:
- return DockerSchema2Manifest(Bytes.for_string_or_unicode(manifest_bytes))
-
- if content_type == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE:
- return DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=False)
-
- raise MalformedSchema2ManifestList('Unknown manifest content type')
-
-
-class DockerSchema2ManifestList(ManifestInterface):
- METASCHEMA = {
- 'type': 'object',
- 'properties': {
- DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY: {
- 'type': 'number',
- 'description': 'The version of the manifest list. Must always be `2`.',
- 'minimum': 2,
- 'maximum': 2,
- },
- DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: {
- 'type': 'string',
- 'description': 'The media type of the manifest list.',
- 'enum': [DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE],
- },
- DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY: {
- 'type': 'array',
- 'description': 'The manifests field contains a list of manifests for specific platforms',
- 'items': {
- 'type': 'object',
- 'properties': {
- DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: {
- 'type': 'string',
- 'description': 'The MIME type of the referenced object. This will generally be ' +
- 'application/vnd.docker.distribution.manifest.v2+json, but it ' +
- 'could also be application/vnd.docker.distribution.manifest.v1+json ' +
- 'if the manifest list references a legacy schema-1 manifest.',
- 'enum': [DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE, DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE],
- },
- DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY: {
- 'type': 'number',
- 'description': 'The size in bytes of the object. This field exists so that a ' +
- 'client will have an expected size for the content before ' +
- 'validating. If the length of the retrieved content does not ' +
- 'match the specified length, the content should not be trusted.',
- },
- DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY: {
- 'type': 'string',
- 'description': 'The content addressable digest of the manifest in the blob store',
- },
- DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY: {
- 'type': 'object',
- 'description': 'The platform object describes the platform which the image in ' +
- 'the manifest runs on',
- 'properties': {
- DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY: {
- 'type': 'string',
- 'description': 'Specifies the CPU architecture, for example amd64 or ppc64le.',
- },
- DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY: {
- 'type': 'string',
- 'description': 'Specifies the operating system, for example linux or windows',
- },
- DOCKER_SCHEMA2_MANIFESTLIST_OS_VERSION_KEY: {
- 'type': 'string',
- 'description': 'Specifies the operating system version, for example 10.0.10586',
- },
- DOCKER_SCHEMA2_MANIFESTLIST_OS_FEATURES_KEY: {
- 'type': 'array',
- 'description': 'specifies an array of strings, each listing a required OS ' +
- 'feature (for example on Windows win32k)',
- 'items': {
- 'type': 'string',
- },
- },
- DOCKER_SCHEMA2_MANIFESTLIST_VARIANT_KEY: {
- 'type': 'string',
- 'description': 'Specifies a variant of the CPU, for example armv6l to specify ' +
- 'a particular CPU variant of the ARM CPU',
- },
- DOCKER_SCHEMA2_MANIFESTLIST_FEATURES_KEY: {
- 'type': 'array',
- 'description': 'specifies an array of strings, each listing a required CPU ' +
- 'feature (for example sse4 or aes).',
- 'items': {
- 'type': 'string',
- },
- },
- },
- 'required': [DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY,
- DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY],
- },
- },
- 'required': [DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY,
- DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY,
- DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY,
- DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY],
- },
- },
- },
- 'required': [DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY,
- DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY,
- DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY],
- }
-
- def __init__(self, manifest_bytes):
- assert isinstance(manifest_bytes, Bytes)
-
- self._layers = None
- self._manifest_bytes = manifest_bytes
-
- try:
- self._parsed = json.loads(manifest_bytes.as_unicode())
- except ValueError as ve:
- raise MalformedSchema2ManifestList('malformed manifest data: %s' % ve)
-
- try:
- validate_schema(self._parsed, DockerSchema2ManifestList.METASCHEMA)
- except ValidationError as ve:
- raise MalformedSchema2ManifestList('manifest data does not match schema: %s' % ve)
-
- @property
- def is_manifest_list(self):
- """ Returns whether this manifest is a list. """
- return True
-
- @property
- def schema_version(self):
- return 2
-
- @property
- def digest(self):
- """ The digest of the manifest, including type prefix. """
- return digest_tools.sha256_digest(self._manifest_bytes.as_encoded_str())
-
- @property
- def media_type(self):
- """ The media type of the schema. """
- return self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY]
-
- @property
- def manifest_dict(self):
- """ Returns the manifest as a dictionary ready to be serialized to JSON. """
- return self._parsed
-
- @property
- def bytes(self):
- return self._manifest_bytes
-
- def get_layers(self, content_retriever):
- """ Returns the layers of this manifest, from base to leaf or None if this kind of manifest
- does not support layers. """
- return None
-
- @property
- def blob_digests(self):
- # Manifest lists have no blob digests, since everything is stored as a manifest.
- return []
-
- @property
- def local_blob_digests(self):
- return self.blob_digests
-
- def get_blob_digests_for_translation(self):
- return self.blob_digests
-
- @property
- def layers_compressed_size(self):
- return None
-
- @lru_cache(maxsize=1)
- def manifests(self, content_retriever):
- """ Returns the manifests in the list.
- """
- manifests = self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]
- return [LazyManifestLoader(m, content_retriever) for m in manifests]
-
- def validate(self, content_retriever):
- """ Performs validation of required assertions about the manifest. Raises a ManifestException
- on failure.
- """
- for index, m in enumerate(self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]):
- if m[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY] == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE:
- platform = m[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
-
- # Validate the architecture against the schema 1 architecture defined.
- parsed = self.manifests(content_retriever)[index].manifest_obj
- assert isinstance(parsed, DockerSchema1Manifest)
- if (parsed.architecture and
- parsed.architecture != platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY]):
- raise MismatchManifestException('Mismatch in arch for manifest `%s`' % parsed.digest)
-
- def child_manifests(self, content_retriever):
- return self.manifests(content_retriever)
-
- def child_manifest_digests(self):
- return [m[DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY]
- for m in self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]]
-
- def get_manifest_labels(self, content_retriever):
- return None
-
- def get_leaf_layer_v1_image_id(self, content_retriever):
- return None
-
- def get_legacy_image_ids(self, content_retriever):
- return None
-
- @property
- def has_legacy_image(self):
- return False
-
- def get_requires_empty_layer_blob(self, content_retriever):
- return False
-
- def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
- """ Returns the manifest that is compatible with V1, by virtue of being `amd64` and `linux`.
- If none, returns None.
- """
- legacy_manifest = self._get_legacy_manifest(content_retriever)
- if legacy_manifest is None:
- return None
-
- return legacy_manifest.get_schema1_manifest(namespace_name, repo_name, tag_name,
- content_retriever)
-
- def convert_manifest(self, allowed_mediatypes, namespace_name, repo_name, tag_name,
- content_retriever):
- if self.media_type in allowed_mediatypes:
- return self
-
- legacy_manifest = self._get_legacy_manifest(content_retriever)
- if legacy_manifest is None:
- return None
-
- return legacy_manifest.convert_manifest(allowed_mediatypes, namespace_name, repo_name,
- tag_name, content_retriever)
-
- def _get_legacy_manifest(self, content_retriever):
- """ Returns the manifest under this list with architecture amd64 and os linux, if any, or None
- if none or error.
- """
- for manifest_ref in self.manifests(content_retriever):
- platform = manifest_ref._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
- architecture = platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY]
- os = platform[DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY]
- if architecture != 'amd64' or os != 'linux':
- continue
-
- try:
- return manifest_ref.manifest_obj
- except (ManifestException, IOError):
- logger.exception('Could not load child manifest')
- return None
-
- return None
-
- def unsigned(self):
- return self
-
- def generate_legacy_layers(self, images_map, content_retriever):
- return None
-
-
-class DockerSchema2ManifestListBuilder(object):
- """
- A convenient abstraction around creating new DockerSchema2ManifestList's.
- """
- def __init__(self):
- self.manifests = []
-
- def add_manifest(self, manifest, architecture, os):
- """ Adds a manifest to the list. """
- manifest = manifest.unsigned() # Make sure we add the unsigned version to the list.
- self.add_manifest_digest(manifest.digest,
- len(manifest.bytes.as_encoded_str()),
- manifest.media_type,
- architecture, os)
-
- def add_manifest_digest(self, manifest_digest, manifest_size, media_type, architecture, os):
- """ Adds a manifest to the list. """
- self.manifests.append((manifest_digest, manifest_size, media_type, {
- DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY: architecture,
- DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY: os,
- }))
-
- def build(self):
- """ Builds and returns the DockerSchema2ManifestList. """
- assert self.manifests
-
- manifest_list_dict = {
- DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY: 2,
- DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE,
- DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY: [
- {
- DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: manifest[2],
- DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY: manifest[0],
- DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY: manifest[1],
- DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY: manifest[3],
- } for manifest in self.manifests
- ],
- }
-
- json_str = Bytes.for_string_or_unicode(json.dumps(manifest_list_dict, indent=3))
- return DockerSchema2ManifestList(json_str)
diff --git a/image/docker/schema2/manifest.py b/image/docker/schema2/manifest.py
deleted file mode 100644
index e6188554e..000000000
--- a/image/docker/schema2/manifest.py
+++ /dev/null
@@ -1,462 +0,0 @@
-import json
-import logging
-import hashlib
-
-from collections import namedtuple
-from jsonschema import validate as validate_schema, ValidationError
-
-from digest import digest_tools
-from image.docker import ManifestException
-from image.docker.interfaces import ManifestInterface
-from image.docker.types import ManifestImageLayer
-from image.docker.schema2 import (DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
- DOCKER_SCHEMA2_CONFIG_CONTENT_TYPE,
- DOCKER_SCHEMA2_LAYER_CONTENT_TYPE,
- DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE,
- EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_SIZE)
-from image.docker.schema1 import DockerSchema1ManifestBuilder
-from image.docker.schema2.config import DockerSchema2Config
-from util.bytes import Bytes
-
-# Keys.
-DOCKER_SCHEMA2_MANIFEST_VERSION_KEY = 'schemaVersion'
-DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY = 'mediaType'
-DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY = 'config'
-DOCKER_SCHEMA2_MANIFEST_SIZE_KEY = 'size'
-DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY = 'digest'
-DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY = 'layers'
-DOCKER_SCHEMA2_MANIFEST_URLS_KEY = 'urls'
-
-# Named tuples.
-DockerV2ManifestConfig = namedtuple('DockerV2ManifestConfig', ['size', 'digest'])
-DockerV2ManifestLayer = namedtuple('DockerV2ManifestLayer', ['index', 'digest',
- 'is_remote', 'urls',
- 'compressed_size'])
-
-DockerV2ManifestImageLayer = namedtuple('DockerV2ManifestImageLayer', ['history', 'blob_layer',
- 'v1_id', 'v1_parent_id',
- 'compressed_size',
- 'blob_digest'])
-
-logger = logging.getLogger(__name__)
-
-class MalformedSchema2Manifest(ManifestException):
- """
- Raised when a manifest fails an assertion that should be true according to the Docker Manifest
- v2.2 Specification.
- """
- pass
-
-
-class DockerSchema2Manifest(ManifestInterface):
- METASCHEMA = {
- 'type': 'object',
- 'properties': {
- DOCKER_SCHEMA2_MANIFEST_VERSION_KEY: {
- 'type': 'number',
- 'description': 'The version of the schema. Must always be `2`.',
- 'minimum': 2,
- 'maximum': 2,
- },
- DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: {
- 'type': 'string',
- 'description': 'The media type of the schema.',
- 'enum': [DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE],
- },
- DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY: {
- 'type': 'object',
- 'description': 'The config field references a configuration object for a container, ' +
- 'by digest. This configuration item is a JSON blob that the runtime ' +
- 'uses to set up the container.',
- 'properties': {
- DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: {
- 'type': 'string',
- 'description': 'The MIME type of the referenced object. This should generally be ' +
- 'application/vnd.docker.container.image.v1+json',
- 'enum': [DOCKER_SCHEMA2_CONFIG_CONTENT_TYPE],
- },
- DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: {
- 'type': 'number',
- 'description': 'The size in bytes of the object. This field exists so that a ' +
- 'client will have an expected size for the content before ' +
- 'validating. If the length of the retrieved content does not ' +
- 'match the specified length, the content should not be trusted.',
- },
- DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: {
- 'type': 'string',
- 'description': 'The content addressable digest of the config in the blob store',
- },
- },
- 'required': [DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY, DOCKER_SCHEMA2_MANIFEST_SIZE_KEY,
- DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY],
- },
- DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY: {
- 'type': 'array',
- 'description': 'The layer list is ordered starting from the base ' +
- 'image (opposite order of schema1).',
- 'items': {
- 'type': 'object',
- 'properties': {
- DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: {
- 'type': 'string',
- 'description': 'The MIME type of the referenced object. This should generally be ' +
- 'application/vnd.docker.image.rootfs.diff.tar.gzip. Layers of type ' +
- 'application/vnd.docker.image.rootfs.foreign.diff.tar.gzip may be ' +
- 'pulled from a remote location but they should never be pushed.',
- 'enum': [DOCKER_SCHEMA2_LAYER_CONTENT_TYPE, DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE],
- },
- DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: {
- 'type': 'number',
- 'description': 'The size in bytes of the object. This field exists so that a ' +
- 'client will have an expected size for the content before ' +
- 'validating. If the length of the retrieved content does not ' +
- 'match the specified length, the content should not be trusted.',
- },
- DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: {
- 'type': 'string',
- 'description': 'The content addressable digest of the layer in the blob store',
- },
- },
- 'required': [
- DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY, DOCKER_SCHEMA2_MANIFEST_SIZE_KEY,
- DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY,
- ],
- },
- },
- },
- 'required': [DOCKER_SCHEMA2_MANIFEST_VERSION_KEY, DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY,
- DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY, DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY],
- }
-
- def __init__(self, manifest_bytes):
- assert isinstance(manifest_bytes, Bytes)
-
- self._payload = manifest_bytes
-
- self._filesystem_layers = None
- self._cached_built_config = None
-
- try:
- self._parsed = json.loads(self._payload.as_unicode())
- except ValueError as ve:
- raise MalformedSchema2Manifest('malformed manifest data: %s' % ve)
-
- try:
- validate_schema(self._parsed, DockerSchema2Manifest.METASCHEMA)
- except ValidationError as ve:
- raise MalformedSchema2Manifest('manifest data does not match schema: %s' % ve)
-
- for layer in self.filesystem_layers:
- if layer.is_remote and not layer.urls:
- raise MalformedSchema2Manifest('missing `urls` for remote layer')
-
- def validate(self, content_retriever):
- """ Performs validation of required assertions about the manifest. Raises a ManifestException
- on failure.
- """
- # Nothing to validate.
-
- @property
- def is_manifest_list(self):
- return False
-
- @property
- def schema_version(self):
- return 2
-
- @property
- def manifest_dict(self):
- return self._parsed
-
- @property
- def media_type(self):
- return self._parsed[DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY]
-
- @property
- def digest(self):
- return digest_tools.sha256_digest(self._payload.as_encoded_str())
-
- @property
- def config(self):
- config = self._parsed[DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY]
- return DockerV2ManifestConfig(size=config[DOCKER_SCHEMA2_MANIFEST_SIZE_KEY],
- digest=config[DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY])
-
- @property
- def filesystem_layers(self):
- """ Returns the file system layers of this manifest, from base to leaf. """
- if self._filesystem_layers is None:
- self._filesystem_layers = list(self._generate_filesystem_layers())
- return self._filesystem_layers
-
- @property
- def leaf_filesystem_layer(self):
- """ Returns the leaf file system layer for this manifest. """
- return self.filesystem_layers[-1]
-
- @property
- def layers_compressed_size(self):
- return sum(layer.compressed_size for layer in self.filesystem_layers)
-
- @property
- def has_remote_layer(self):
- for layer in self.filesystem_layers:
- if layer.is_remote:
- return True
-
- return False
-
- @property
- def blob_digests(self):
- return [str(layer.digest) for layer in self.filesystem_layers] + [str(self.config.digest)]
-
- @property
- def local_blob_digests(self):
- return ([str(layer.digest) for layer in self.filesystem_layers if not layer.urls] +
- [str(self.config.digest)])
-
- def get_blob_digests_for_translation(self):
- return self.blob_digests
-
- def get_manifest_labels(self, content_retriever):
- return self._get_built_config(content_retriever).labels
-
- def get_layers(self, content_retriever):
- """ Returns the layers of this manifest, from base to leaf or None if this kind of manifest
- does not support layers. """
- for image_layer in self._manifest_image_layers(content_retriever):
- is_remote = image_layer.blob_layer.is_remote if image_layer.blob_layer else False
- urls = image_layer.blob_layer.urls if image_layer.blob_layer else None
- yield ManifestImageLayer(layer_id=image_layer.v1_id,
- compressed_size=image_layer.compressed_size,
- is_remote=is_remote,
- urls=urls,
- command=image_layer.history.command,
- blob_digest=image_layer.blob_digest,
- created_datetime=image_layer.history.created_datetime,
- author=image_layer.history.author,
- comment=image_layer.history.comment,
- internal_layer=image_layer)
-
- @property
- def bytes(self):
- return self._payload
-
- def child_manifests(self, content_retriever):
- return None
-
- def _manifest_image_layers(self, content_retriever):
- # Retrieve the configuration for the manifest.
- config = self._get_built_config(content_retriever)
- history = list(config.history)
- if len(history) < len(self.filesystem_layers):
- raise MalformedSchema2Manifest('Found less history than layer blobs')
-
- digest_history = hashlib.sha256()
- v1_layer_parent_id = None
- v1_layer_id = None
- blob_index = 0
-
- for history_index, history_entry in enumerate(history):
- if not history_entry.is_empty and blob_index >= len(self.filesystem_layers):
- raise MalformedSchema2Manifest('Missing history entry #%s' % blob_index)
-
- v1_layer_parent_id = v1_layer_id
- blob_layer = None if history_entry.is_empty else self.filesystem_layers[blob_index]
- blob_digest = EMPTY_LAYER_BLOB_DIGEST if blob_layer is None else str(blob_layer.digest)
- compressed_size = EMPTY_LAYER_SIZE if blob_layer is None else blob_layer.compressed_size
-
- # Create a new synthesized V1 ID for the history layer by hashing its content and
- # the blob associated with it.
- digest_history.update(json.dumps(history_entry.raw_entry))
- digest_history.update("|")
- digest_history.update(str(history_index))
- digest_history.update("|")
- digest_history.update(blob_digest)
- digest_history.update("||")
-
- v1_layer_id = digest_history.hexdigest()
- yield DockerV2ManifestImageLayer(history=history_entry,
- blob_layer=blob_layer,
- blob_digest=blob_digest,
- v1_id=v1_layer_id,
- v1_parent_id=v1_layer_parent_id,
- compressed_size=compressed_size)
-
- if not history_entry.is_empty:
- blob_index += 1
-
- @property
- def has_legacy_image(self):
- return not self.has_remote_layer
-
- def generate_legacy_layers(self, images_map, content_retriever):
- assert not self.has_remote_layer
-
- # NOTE: We use the DockerSchema1ManifestBuilder here because it already contains
- # the logic for generating the DockerV1Metadata. All of this will go away once we get
- # rid of legacy images in the database, so this is a temporary solution.
- v1_builder = DockerSchema1ManifestBuilder('', '', '')
- self._populate_schema1_builder(v1_builder, content_retriever)
- return v1_builder.build().generate_legacy_layers(images_map, content_retriever)
-
- def get_leaf_layer_v1_image_id(self, content_retriever):
- # NOTE: If there exists a layer with remote content, then we consider this manifest
- # to not support legacy images.
- if self.has_remote_layer:
- return None
-
- return self.get_legacy_image_ids(content_retriever)[-1].v1_id
-
- def get_legacy_image_ids(self, content_retriever):
- if self.has_remote_layer:
- return None
-
- return [l.v1_id for l in self._manifest_image_layers(content_retriever)]
-
- def convert_manifest(self, allowed_mediatypes, namespace_name, repo_name, tag_name,
- content_retriever):
- if self.media_type in allowed_mediatypes:
- return self
-
- # If this manifest is not on the allowed list, try to convert the schema 1 version (if any)
- schema1 = self.get_schema1_manifest(namespace_name, repo_name, tag_name, content_retriever)
- if schema1 is None:
- return None
-
- return schema1.convert_manifest(allowed_mediatypes, namespace_name, repo_name, tag_name,
- content_retriever)
-
- def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
- if self.has_remote_layer:
- return None
-
- v1_builder = DockerSchema1ManifestBuilder(namespace_name, repo_name, tag_name)
- self._populate_schema1_builder(v1_builder, content_retriever)
- return v1_builder.build()
-
- def unsigned(self):
- return self
-
- def get_requires_empty_layer_blob(self, content_retriever):
- schema2_config = self._get_built_config(content_retriever)
- if schema2_config is None:
- return None
-
- return schema2_config.has_empty_layer
-
- def _populate_schema1_builder(self, v1_builder, content_retriever):
- """ Populates a DockerSchema1ManifestBuilder with the layers and config from
- this schema.
- """
- assert not self.has_remote_layer
- schema2_config = self._get_built_config(content_retriever)
- layers = list(self._manifest_image_layers(content_retriever))
-
- for index, layer in enumerate(reversed(layers)): # Schema 1 layers are in reverse order
- v1_compatibility = schema2_config.build_v1_compatibility(layer.history,
- layer.v1_id,
- layer.v1_parent_id,
- index == 0,
- layer.compressed_size)
- v1_builder.add_layer(str(layer.blob_digest), json.dumps(v1_compatibility))
-
- return v1_builder
-
- def _get_built_config(self, content_retriever):
- if self._cached_built_config:
- return self._cached_built_config
-
- config_bytes = content_retriever.get_blob_bytes_with_digest(self.config.digest)
- if config_bytes is None:
- raise MalformedSchema2Manifest('Could not load config blob for manifest')
-
- if len(config_bytes) != self.config.size:
- msg = 'Size of config does not match that retrieved: %s vs %s' % (len(config_bytes),
- self.config.size)
- raise MalformedSchema2Manifest(msg)
-
- self._cached_built_config = DockerSchema2Config(Bytes.for_string_or_unicode(config_bytes))
- return self._cached_built_config
-
- def _generate_filesystem_layers(self):
- for index, layer in enumerate(self._parsed[DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY]):
- content_type = layer[DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY]
- is_remote = content_type == DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE
-
- try:
- digest = digest_tools.Digest.parse_digest(layer[DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY])
- except digest_tools.InvalidDigestException:
- raise MalformedSchema2Manifest('could not parse manifest digest: %s' %
- layer[DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY])
-
- yield DockerV2ManifestLayer(index=index,
- compressed_size=layer[DOCKER_SCHEMA2_MANIFEST_SIZE_KEY],
- digest=digest,
- is_remote=is_remote,
- urls=layer.get(DOCKER_SCHEMA2_MANIFEST_URLS_KEY))
-
-
-class DockerSchema2ManifestBuilder(object):
- """
- A convenient abstraction around creating new DockerSchema2Manifests.
- """
- def __init__(self):
- self.config = None
- self.filesystem_layers = []
-
- def set_config(self, schema2_config):
- """ Sets the configuration for the manifest being built. """
- self.set_config_digest(schema2_config.digest, schema2_config.size)
-
- def set_config_digest(self, config_digest, config_size):
- """ Sets the digest and size of the configuration layer. """
- self.config = DockerV2ManifestConfig(size=config_size, digest=config_digest)
-
- def add_layer(self, digest, size, urls=None):
- """ Adds a filesystem layer to the manifest. """
- self.filesystem_layers.append(DockerV2ManifestLayer(index=len(self.filesystem_layers),
- digest=digest,
- compressed_size=size,
- urls=urls,
- is_remote=bool(urls)))
-
- def build(self, ensure_ascii=True):
- """ Builds and returns the DockerSchema2Manifest. """
- assert self.filesystem_layers
- assert self.config
-
- def _build_layer(layer):
- if layer.urls:
- return {
- DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE,
- DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: layer.compressed_size,
- DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(layer.digest),
- DOCKER_SCHEMA2_MANIFEST_URLS_KEY: layer.urls,
- }
-
- return {
- DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_LAYER_CONTENT_TYPE,
- DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: layer.compressed_size,
- DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(layer.digest),
- }
-
- manifest_dict = {
- DOCKER_SCHEMA2_MANIFEST_VERSION_KEY: 2,
- DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
-
- # Config
- DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY: {
- DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_CONFIG_CONTENT_TYPE,
- DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: self.config.size,
- DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(self.config.digest),
- },
-
- # Layers
- DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY: [
- _build_layer(layer) for layer in self.filesystem_layers
- ],
- }
-
- json_str = json.dumps(manifest_dict, ensure_ascii=ensure_ascii, indent=3)
- return DockerSchema2Manifest(Bytes.for_string_or_unicode(json_str))
diff --git a/image/docker/schema2/test/__init__.py b/image/docker/schema2/test/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/image/docker/schema2/test/conversion_data/complex.config.json b/image/docker/schema2/test/conversion_data/complex.config.json
deleted file mode 100644
index eda84dc49..000000000
--- a/image/docker/schema2/test/conversion_data/complex.config.json
+++ /dev/null
@@ -1,129 +0,0 @@
-{
- "architecture": "amd64",
- "config": {
- "Hostname": "",
- "Domainname": "",
- "User": "",
- "AttachStdin": false,
- "AttachStdout": false,
- "AttachStderr": false,
- "ExposedPorts": {
- "3306/tcp": {},
- "33060/tcp": {}
- },
- "Tty": false,
- "OpenStdin": false,
- "StdinOnce": false,
- "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "GOSU_VERSION=1.7", "MYSQL_MAJOR=5.7", "MYSQL_VERSION=5.7.24-1debian9"],
- "Cmd": ["mysqld"],
- "ArgsEscaped": true,
- "Image": "sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265",
- "Volumes": {
- "/var/lib/mysql": {}
- },
- "WorkingDir": "",
- "Entrypoint": ["docker-entrypoint.sh"],
- "OnBuild": [],
- "Labels": null
- },
- "container": "54bd04ff79350d28d0da33fa3e483567156c7c9f87a7254d6fa8267b0878c339",
- "container_config": {
- "Hostname": "54bd04ff7935",
- "Domainname": "",
- "User": "",
- "AttachStdin": false,
- "AttachStdout": false,
- "AttachStderr": false,
- "ExposedPorts": {
- "3306/tcp": {},
- "33060/tcp": {}
- },
- "Tty": false,
- "OpenStdin": false,
- "StdinOnce": false,
- "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "GOSU_VERSION=1.7", "MYSQL_MAJOR=5.7", "MYSQL_VERSION=5.7.24-1debian9"],
- "Cmd": ["/bin/sh", "-c", "#(nop) ", "CMD [\"mysqld\"]"],
- "ArgsEscaped": true,
- "Image": "sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265",
- "Volumes": {
- "/var/lib/mysql": {}
- },
- "WorkingDir": "",
- "Entrypoint": ["docker-entrypoint.sh"],
- "OnBuild": [],
- "Labels": {}
- },
- "created": "2018-11-16T01:14:20.755008004Z",
- "docker_version": "17.06.2-ce",
- "history": [{
- "created": "2018-11-15T22:45:06.938205528Z",
- "created_by": "/bin/sh -c #(nop) ADD file:dab9baf938799c515ddce14c02f899da5992f0b76a432fa10a2338556a3cb04f in / "
- }, {
- "created": "2018-11-15T22:45:07.243453424Z",
- "created_by": "/bin/sh -c #(nop) CMD [\"bash\"]",
- "empty_layer": true
- }, {
- "created": "2018-11-16T01:11:01.00193007Z",
- "created_by": "/bin/sh -c groupadd -r mysql \u0026\u0026 useradd -r -g mysql mysql"
- }, {
- "created": "2018-11-16T01:11:11.128616814Z",
- "created_by": "/bin/sh -c apt-get update \u0026\u0026 apt-get install -y --no-install-recommends gnupg dirmngr \u0026\u0026 rm -rf /var/lib/apt/lists/*"
- }, {
- "created": "2018-11-16T01:11:11.466721945Z",
- "created_by": "/bin/sh -c #(nop) ENV GOSU_VERSION=1.7",
- "empty_layer": true
- }, {
- "created": "2018-11-16T01:11:33.651099664Z",
- "created_by": "/bin/sh -c set -x \t\u0026\u0026 apt-get update \u0026\u0026 apt-get install -y --no-install-recommends ca-certificates wget \u0026\u0026 rm -rf /var/lib/apt/lists/* \t\u0026\u0026 wget -O /usr/local/bin/gosu \"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture)\" \t\u0026\u0026 wget -O /usr/local/bin/gosu.asc \"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture).asc\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 \t\u0026\u0026 gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu \t\u0026\u0026 gpgconf --kill all \t\u0026\u0026 rm -rf \"$GNUPGHOME\" /usr/local/bin/gosu.asc \t\u0026\u0026 chmod +x /usr/local/bin/gosu \t\u0026\u0026 gosu nobody true \t\u0026\u0026 apt-get purge -y --auto-remove ca-certificates wget"
- }, {
- "created": "2018-11-16T01:11:34.772616243Z",
- "created_by": "/bin/sh -c mkdir /docker-entrypoint-initdb.d"
- }, {
- "created": "2018-11-16T01:11:46.048879774Z",
- "created_by": "/bin/sh -c apt-get update \u0026\u0026 apt-get install -y --no-install-recommends \t\tpwgen \t\topenssl \t\tperl \t\u0026\u0026 rm -rf /var/lib/apt/lists/*"
- }, {
- "created": "2018-11-16T01:11:49.672488713Z",
- "created_by": "/bin/sh -c set -ex; \tkey='A4A9406876FCBD3C456770C88C718D3B5072E1F5'; \texport GNUPGHOME=\"$(mktemp -d)\"; \tgpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys \"$key\"; \tgpg --batch --export \"$key\" \u003e /etc/apt/trusted.gpg.d/mysql.gpg; \tgpgconf --kill all; \trm -rf \"$GNUPGHOME\"; \tapt-key list \u003e /dev/null"
- }, {
- "created": "2018-11-16T01:13:49.699875841Z",
- "created_by": "/bin/sh -c #(nop) ENV MYSQL_MAJOR=5.7",
- "empty_layer": true
- }, {
- "created": "2018-11-16T01:13:50.087751031Z",
- "created_by": "/bin/sh -c #(nop) ENV MYSQL_VERSION=5.7.24-1debian9",
- "empty_layer": true
- }, {
- "created": "2018-11-16T01:13:51.211877582Z",
- "created_by": "/bin/sh -c echo \"deb http://repo.mysql.com/apt/debian/ stretch mysql-${MYSQL_MAJOR}\" \u003e /etc/apt/sources.list.d/mysql.list"
- }, {
- "created": "2018-11-16T01:14:17.521774936Z",
- "created_by": "/bin/sh -c { \t\techo mysql-community-server mysql-community-server/data-dir select ''; \t\techo mysql-community-server mysql-community-server/root-pass password ''; \t\techo mysql-community-server mysql-community-server/re-root-pass password ''; \t\techo mysql-community-server mysql-community-server/remove-test-db select false; \t} | debconf-set-selections \t\u0026\u0026 apt-get update \u0026\u0026 apt-get install -y mysql-server=\"${MYSQL_VERSION}\" \u0026\u0026 rm -rf /var/lib/apt/lists/* \t\u0026\u0026 rm -rf /var/lib/mysql \u0026\u0026 mkdir -p /var/lib/mysql /var/run/mysqld \t\u0026\u0026 chown -R mysql:mysql /var/lib/mysql /var/run/mysqld \t\u0026\u0026 chmod 777 /var/run/mysqld \t\u0026\u0026 find /etc/mysql/ -name '*.cnf' -print0 \t\t| xargs -0 grep -lZE '^(bind-address|log)' \t\t| xargs -rt -0 sed -Ei 's/^(bind-address|log)/#\u0026/' \t\u0026\u0026 echo '[mysqld]\\nskip-host-cache\\nskip-name-resolve' \u003e /etc/mysql/conf.d/docker.cnf"
- }, {
- "created": "2018-11-16T01:14:17.959906008Z",
- "created_by": "/bin/sh -c #(nop) VOLUME [/var/lib/mysql]",
- "empty_layer": true
- }, {
- "created": "2018-11-16T01:14:18.574646682Z",
- "created_by": "/bin/sh -c #(nop) COPY file:4b5f8335c16a8bc9f76a2164458df1d71cf76facbf16d02f18ce7409122c2146 in /usr/local/bin/ "
- }, {
- "created": "2018-11-16T01:14:19.715707659Z",
- "created_by": "/bin/sh -c ln -s usr/local/bin/docker-entrypoint.sh /entrypoint.sh # backwards compat"
- }, {
- "created": "2018-11-16T01:14:20.063426223Z",
- "created_by": "/bin/sh -c #(nop) ENTRYPOINT [\"docker-entrypoint.sh\"]",
- "empty_layer": true
- }, {
- "created": "2018-11-16T01:14:20.416001274Z",
- "created_by": "/bin/sh -c #(nop) EXPOSE 3306/tcp 33060/tcp",
- "empty_layer": true
- }, {
- "created": "2018-11-16T01:14:20.755008004Z",
- "created_by": "/bin/sh -c #(nop) CMD [\"mysqld\"]",
- "empty_layer": true
- }],
- "os": "linux",
- "rootfs": {
- "type": "layers",
- "diff_ids": ["sha256:ef68f6734aa485edf13a8509fe60e4272428deaf63f446a441b79d47fc5d17d3", "sha256:a588c986cf971b87ee2aacd9b57877c47e68e4981b67793d301720a1d0d03a68", "sha256:0f1205f1cd43db6d5f837f792eecb84e773482eb0fb353a4f3f42c3cabb5747f", "sha256:0ad177796f339bf4f5c114bbd97721536d48b452915479884ff3d16acc1c612f", "sha256:2566141f200b8e249db6663d24063a3e1d0e33622e933fa99bee27a4f5b8db02", "sha256:783b13a988e3ec069e08019c89292fdf4e6316141ed74a6d896a422f7ee30077", "sha256:3d4164460bf0c8c4959e6acb51757d63dea47c162a334f65dfbf32537a4b552f", "sha256:ea66b8e6103f47f1934007a9b4c03c28f0398fdc7f9fbe9b5eea335b10448fed", "sha256:347571a8da208bf019b880ef4c73bad7884ad0570ec70dbfe8f95c6c0b37c082", "sha256:ceb15396dc26b48c1dc6222a4cc3934761c1ec06623d895efdb1cb77517a3887", "sha256:0d954c604c768947cd9630283f96bca6c244b971d004565b57f42db100ca3178"]
- }
-}
\ No newline at end of file
diff --git a/image/docker/schema2/test/conversion_data/complex.schema1.json b/image/docker/schema2/test/conversion_data/complex.schema1.json
deleted file mode 100644
index e57fab7a5..000000000
--- a/image/docker/schema2/test/conversion_data/complex.schema1.json
+++ /dev/null
@@ -1,140 +0,0 @@
-{
- "schemaVersion": 1,
- "name": "user/test",
- "tag": "1",
- "architecture": "amd64",
- "fsLayers": [
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:e81e5181556720e9c821bdb826dd9dbeb078dd28af8fe84586aa904ff212d117"
- },
- {
- "blobSum": "sha256:5f906b8da5fed2070448fed578b93cb1a995be5bdde5624163fbcb842ce4460f"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:cd2a6583723557a1dc58584f53564f791dbb7a5d264bb2f8d71aa8c5d21ac38c"
- },
- {
- "blobSum": "sha256:a7905d9fbbea59dc29d709d1d61a96c06c26a2d1e506ac5c3832a348969052b8"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:0283dc49ef4e5bc0dc8897b14818de315faeceb0a5272464ff3c48cd4ea3b626"
- },
- {
- "blobSum": "sha256:5ed0ae805b65407ddd0ff1aeb8371de3133e5daed97726717d4225cb7a8efaaa"
- },
- {
- "blobSum": "sha256:e2ae0d063e89542abdd8abd1613e8e27743fa669f4a418c8b0a813991621d892"
- },
- {
- "blobSum": "sha256:1f212fb371f936c524c624e6830242a8cb91b3b710942f9241004dae45828f87"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:283fa4c95fb4e349b501ef8c864b2259500d83ca6529253da12d658aa480cbb5"
- },
- {
- "blobSum": "sha256:936836019e67889c1f5a95732c62c476d2450010988792e99d6e7ece84fdce2f"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:a5a6f2f73cd8abbdc55d0df0d8834f7262713e87d6c8800ea3851f103025e0f0"
- }
- ],
- "history": [
- {
- "v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"3306/tcp\":{},\"33060/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOSU_VERSION=1.7\",\"MYSQL_MAJOR=5.7\",\"MYSQL_VERSION=5.7.24-1debian9\"],\"Cmd\":[\"mysqld\"],\"ArgsEscaped\":true,\"Image\":\"sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265\",\"Volumes\":{\"/var/lib/mysql\":{}},\"WorkingDir\":\"\",\"Entrypoint\":[\"docker-entrypoint.sh\"],\"OnBuild\":[],\"Labels\":null},\"container\":\"54bd04ff79350d28d0da33fa3e483567156c7c9f87a7254d6fa8267b0878c339\",\"container_config\":{\"Hostname\":\"54bd04ff7935\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"3306/tcp\":{},\"33060/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOSU_VERSION=1.7\",\"MYSQL_MAJOR=5.7\",\"MYSQL_VERSION=5.7.24-1debian9\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"CMD [\\\"mysqld\\\"]\"],\"ArgsEscaped\":true,\"Image\":\"sha256:fbfb8f1311c4e46e2b5122aef8b6753945c9db8de03258cc9ebff85aa9f59265\",\"Volumes\":{\"/var/lib/mysql\":{}},\"WorkingDir\":\"\",\"Entrypoint\":[\"docker-entrypoint.sh\"],\"OnBuild\":[],\"Labels\":{}},\"created\":\"2018-11-16T01:14:20.755008004Z\",\"docker_version\":\"17.06.2-ce\",\"id\":\"3cc8ec7719abb3a11fc9ce9b5c5151f368bf3e7e2702d3618e17b4f5055237f8\",\"os\":\"linux\",\"parent\":\"2904b34db6cd1083a7b47ec5e8c1fcb538b9d0ecb790488ec22badabf6143fcb\",\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"2904b34db6cd1083a7b47ec5e8c1fcb538b9d0ecb790488ec22badabf6143fcb\",\"parent\":\"53d4b89c676dd5970862f366ded1a212a24f10a862a12a340ca2f35b5d766308\",\"created\":\"2018-11-16T01:14:20.416001274Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) EXPOSE 3306/tcp 33060/tcp\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"53d4b89c676dd5970862f366ded1a212a24f10a862a12a340ca2f35b5d766308\",\"parent\":\"73c0e3215914add0dc50583090572ae5cd78fb16cc3b3427c8874472cdca93fb\",\"created\":\"2018-11-16T01:14:20.063426223Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENTRYPOINT [\\\"docker-entrypoint.sh\\\"]\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"73c0e3215914add0dc50583090572ae5cd78fb16cc3b3427c8874472cdca93fb\",\"parent\":\"95180e8ac981681a12fa3767b32747b21514581605b20a99cf1713c78cf2ddaa\",\"created\":\"2018-11-16T01:14:19.715707659Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c ln -s usr/local/bin/docker-entrypoint.sh /entrypoint.sh # backwards compat\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"95180e8ac981681a12fa3767b32747b21514581605b20a99cf1713c78cf2ddaa\",\"parent\":\"afb72c06112722395dcb38ffdda4c9564480a69bb0fe587bba8f10d8d0adffaa\",\"created\":\"2018-11-16T01:14:18.574646682Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) COPY file:4b5f8335c16a8bc9f76a2164458df1d71cf76facbf16d02f18ce7409122c2146 in /usr/local/bin/ \"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"afb72c06112722395dcb38ffdda4c9564480a69bb0fe587bba8f10d8d0adffaa\",\"parent\":\"ccadd71e7e80b1772df1c309938e1cbac71c6deed75c9b21212f72a662ce11be\",\"created\":\"2018-11-16T01:14:17.959906008Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) VOLUME [/var/lib/mysql]\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"ccadd71e7e80b1772df1c309938e1cbac71c6deed75c9b21212f72a662ce11be\",\"parent\":\"e053ced3cc09f28a3ab8547dac6bde4220a5f920c559318ba2c807353c0cbdad\",\"created\":\"2018-11-16T01:14:17.521774936Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c { \\t\\techo mysql-community-server mysql-community-server/data-dir select ''; \\t\\techo mysql-community-server mysql-community-server/root-pass password ''; \\t\\techo mysql-community-server mysql-community-server/re-root-pass password ''; \\t\\techo mysql-community-server mysql-community-server/remove-test-db select false; \\t} | debconf-set-selections \\t\\u0026\\u0026 apt-get update \\u0026\\u0026 apt-get install -y mysql-server=\\\"${MYSQL_VERSION}\\\" \\u0026\\u0026 rm -rf /var/lib/apt/lists/* \\t\\u0026\\u0026 rm -rf /var/lib/mysql \\u0026\\u0026 mkdir -p /var/lib/mysql /var/run/mysqld \\t\\u0026\\u0026 chown -R mysql:mysql /var/lib/mysql /var/run/mysqld \\t\\u0026\\u0026 chmod 777 /var/run/mysqld \\t\\u0026\\u0026 find /etc/mysql/ -name '*.cnf' -print0 \\t\\t| xargs -0 grep -lZE '^(bind-address|log)' \\t\\t| xargs -rt -0 sed -Ei 's/^(bind-address|log)/#\\u0026/' \\t\\u0026\\u0026 echo '[mysqld]\\\\nskip-host-cache\\\\nskip-name-resolve' \\u003e /etc/mysql/conf.d/docker.cnf\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"e053ced3cc09f28a3ab8547dac6bde4220a5f920c559318ba2c807353c0cbdad\",\"parent\":\"0dd7718b64000ac1bfb2c1d4bd3226244c9d55e4b741ef2eddf22c03ee638c3b\",\"created\":\"2018-11-16T01:13:51.211877582Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c echo \\\"deb http://repo.mysql.com/apt/debian/ stretch mysql-${MYSQL_MAJOR}\\\" \\u003e /etc/apt/sources.list.d/mysql.list\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"0dd7718b64000ac1bfb2c1d4bd3226244c9d55e4b741ef2eddf22c03ee638c3b\",\"parent\":\"5e0187996d55a7fa5c81fa75caa2cb57677edbd45abfa68a7a8769d8f640466b\",\"created\":\"2018-11-16T01:13:50.087751031Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV MYSQL_VERSION=5.7.24-1debian9\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"5e0187996d55a7fa5c81fa75caa2cb57677edbd45abfa68a7a8769d8f640466b\",\"parent\":\"2d69915517f4a342dd3b3c719212e7349274a213551239b38c54ac0c44e7fb12\",\"created\":\"2018-11-16T01:13:49.699875841Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV MYSQL_MAJOR=5.7\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"2d69915517f4a342dd3b3c719212e7349274a213551239b38c54ac0c44e7fb12\",\"parent\":\"a3492a643c2e7bd54083848276a38e7569e47ccdf42541abd082191f55632e22\",\"created\":\"2018-11-16T01:11:49.672488713Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -ex; \\tkey='A4A9406876FCBD3C456770C88C718D3B5072E1F5'; \\texport GNUPGHOME=\\\"$(mktemp -d)\\\"; \\tgpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys \\\"$key\\\"; \\tgpg --batch --export \\\"$key\\\" \\u003e /etc/apt/trusted.gpg.d/mysql.gpg; \\tgpgconf --kill all; \\trm -rf \\\"$GNUPGHOME\\\"; \\tapt-key list \\u003e /dev/null\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"a3492a643c2e7bd54083848276a38e7569e47ccdf42541abd082191f55632e22\",\"parent\":\"2e7e8bdd723f6a45f9d789b8d2595b1f6c0a702c70f6922792296c681cb5a14e\",\"created\":\"2018-11-16T01:11:46.048879774Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c apt-get update \\u0026\\u0026 apt-get install -y --no-install-recommends \\t\\tpwgen \\t\\topenssl \\t\\tperl \\t\\u0026\\u0026 rm -rf /var/lib/apt/lists/*\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"2e7e8bdd723f6a45f9d789b8d2595b1f6c0a702c70f6922792296c681cb5a14e\",\"parent\":\"855801645898a11047b72b6740ccc614f49a9cd5bd07f60820ade1635180acb3\",\"created\":\"2018-11-16T01:11:34.772616243Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c mkdir /docker-entrypoint-initdb.d\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"855801645898a11047b72b6740ccc614f49a9cd5bd07f60820ade1635180acb3\",\"parent\":\"123f7f7e13504138215a283c07589c9a506f249305ff2c78567ef3d1eaf27314\",\"created\":\"2018-11-16T01:11:33.651099664Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -x \\t\\u0026\\u0026 apt-get update \\u0026\\u0026 apt-get install -y --no-install-recommends ca-certificates wget \\u0026\\u0026 rm -rf /var/lib/apt/lists/* \\t\\u0026\\u0026 wget -O /usr/local/bin/gosu \\\"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture)\\\" \\t\\u0026\\u0026 wget -O /usr/local/bin/gosu.asc \\\"https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$(dpkg --print-architecture).asc\\\" \\t\\u0026\\u0026 export GNUPGHOME=\\\"$(mktemp -d)\\\" \\t\\u0026\\u0026 gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 \\t\\u0026\\u0026 gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu \\t\\u0026\\u0026 gpgconf --kill all \\t\\u0026\\u0026 rm -rf \\\"$GNUPGHOME\\\" /usr/local/bin/gosu.asc \\t\\u0026\\u0026 chmod +x /usr/local/bin/gosu \\t\\u0026\\u0026 gosu nobody true \\t\\u0026\\u0026 apt-get purge -y --auto-remove ca-certificates wget\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"123f7f7e13504138215a283c07589c9a506f249305ff2c78567ef3d1eaf27314\",\"parent\":\"6f3aeec2779f98f81f65151bc886b26eac21c79eecbc79aed3a414e1413643a4\",\"created\":\"2018-11-16T01:11:11.466721945Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV GOSU_VERSION=1.7\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"6f3aeec2779f98f81f65151bc886b26eac21c79eecbc79aed3a414e1413643a4\",\"parent\":\"4597be70a8abf812caed7f0d37ddd738d361ff4fc271e8dc4dde8b7746378d0b\",\"created\":\"2018-11-16T01:11:11.128616814Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c apt-get update \\u0026\\u0026 apt-get install -y --no-install-recommends gnupg dirmngr \\u0026\\u0026 rm -rf /var/lib/apt/lists/*\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"4597be70a8abf812caed7f0d37ddd738d361ff4fc271e8dc4dde8b7746378d0b\",\"parent\":\"97569d305060de34859e5d55a8bbb010f4026af7cc4b9ca40294689bd6af1909\",\"created\":\"2018-11-16T01:11:01.00193007Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c groupadd -r mysql \\u0026\\u0026 useradd -r -g mysql mysql\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"97569d305060de34859e5d55a8bbb010f4026af7cc4b9ca40294689bd6af1909\",\"parent\":\"0454203f6769f870345aa82f55f4699dfaab41bcb3e38f0c44c9ecc11ef2a38f\",\"created\":\"2018-11-15T22:45:07.243453424Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"bash\\\"]\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"0454203f6769f870345aa82f55f4699dfaab41bcb3e38f0c44c9ecc11ef2a38f\",\"created\":\"2018-11-15T22:45:06.938205528Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:dab9baf938799c515ddce14c02f899da5992f0b76a432fa10a2338556a3cb04f in / \"]}}"
- }
- ],
- "signatures": [
- {
- "header": {
- "jwk": {
- "crv": "P-256",
- "kid": "BTGA:CY7S:HZ7T:FEUS:DZJD:FNS5:O5U2:BTGQ:SGZZ:AY5P:R5MA:UJEY",
- "kty": "EC",
- "x": "0xF2dZ_HLk8VVrqMLMm838LWFAi60P7V5fBjlhlt7xI",
- "y": "niBqFvBqOvtABZSpMoQoSMT7H13Pb0POo00OX7Xsmvc"
- },
- "alg": "ES256"
- },
- "signature": "w8TITz0xkMNqgchKNSfQ-4OlfIGUnG4MLT4Tt738Z0NiD1bHaWFef8wCCBNuDLiKHllrqcqM6Aj__LhsctSwyA",
- "protected": "eyJmb3JtYXRMZW5ndGgiOjEyODM2LCJmb3JtYXRUYWlsIjoiQ24wIiwidGltZSI6IjIwMTgtMTEtMjFUMTk6MTU6MTNaIn0"
- }
- ]
-}
\ No newline at end of file
diff --git a/image/docker/schema2/test/conversion_data/complex.schema2.json b/image/docker/schema2/test/conversion_data/complex.schema2.json
deleted file mode 100644
index 10ec4661c..000000000
--- a/image/docker/schema2/test/conversion_data/complex.schema2.json
+++ /dev/null
@@ -1,66 +0,0 @@
-{
- "schemaVersion": 2,
- "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
- "config": {
- "mediaType": "application/vnd.docker.container.image.v1+json",
- "size": 8171,
- "digest": "sha256:ae6b78bedf88330a5e5392164f40d28ed8a38120b142905d30b652ebffece10e"
- },
- "layers": [
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 22486277,
- "digest": "sha256:a5a6f2f73cd8abbdc55d0df0d8834f7262713e87d6c8800ea3851f103025e0f0"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 1747,
- "digest": "sha256:936836019e67889c1f5a95732c62c476d2450010988792e99d6e7ece84fdce2f"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 4500948,
- "digest": "sha256:283fa4c95fb4e349b501ef8c864b2259500d83ca6529253da12d658aa480cbb5"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 1270313,
- "digest": "sha256:1f212fb371f936c524c624e6830242a8cb91b3b710942f9241004dae45828f87"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 115,
- "digest": "sha256:e2ae0d063e89542abdd8abd1613e8e27743fa669f4a418c8b0a813991621d892"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 12091270,
- "digest": "sha256:5ed0ae805b65407ddd0ff1aeb8371de3133e5daed97726717d4225cb7a8efaaa"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 24045,
- "digest": "sha256:0283dc49ef4e5bc0dc8897b14818de315faeceb0a5272464ff3c48cd4ea3b626"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 223,
- "digest": "sha256:a7905d9fbbea59dc29d709d1d61a96c06c26a2d1e506ac5c3832a348969052b8"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 83565354,
- "digest": "sha256:cd2a6583723557a1dc58584f53564f791dbb7a5d264bb2f8d71aa8c5d21ac38c"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 2876,
- "digest": "sha256:5f906b8da5fed2070448fed578b93cb1a995be5bdde5624163fbcb842ce4460f"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 121,
- "digest": "sha256:e81e5181556720e9c821bdb826dd9dbeb078dd28af8fe84586aa904ff212d117"
- }
- ]
-}
\ No newline at end of file
diff --git a/image/docker/schema2/test/conversion_data/simple.config.json b/image/docker/schema2/test/conversion_data/simple.config.json
deleted file mode 100644
index 8b2355666..000000000
--- a/image/docker/schema2/test/conversion_data/simple.config.json
+++ /dev/null
@@ -1,75 +0,0 @@
-{
- "architecture": "amd64",
- "config": {
- "Hostname": "",
- "Domainname": "",
- "User": "",
- "AttachStdin": false,
- "AttachStdout": false,
- "AttachStderr": false,
- "Tty": false,
- "OpenStdin": false,
- "StdinOnce": false,
- "Env": [
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- ],
- "Cmd": [
- "sh"
- ],
- "Image": "",
- "Volumes": null,
- "WorkingDir": "",
- "Entrypoint": null,
- "OnBuild": null,
- "Labels": {}
- },
- "container": "86fff20ea922659929a4716850cc9b3a2cca6c197f7a7ece7da5b6d9d8ac4954",
- "container_config": {
- "Hostname": "86fff20ea922",
- "Domainname": "",
- "User": "",
- "AttachStdin": true,
- "AttachStdout": true,
- "AttachStderr": true,
- "Tty": true,
- "OpenStdin": true,
- "StdinOnce": true,
- "Env": [
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- ],
- "Cmd": [
- "sh"
- ],
- "Image": "busybox",
- "Volumes": null,
- "WorkingDir": "",
- "Entrypoint": null,
- "OnBuild": null,
- "Labels": {}
- },
- "created": "2018-11-20T21:15:01.569237Z",
- "docker_version": "17.09.0-ce",
- "history": [
- {
- "created": "2018-10-02T17:19:34.03981888Z",
- "created_by": "/bin/sh -c #(nop) ADD file:63eebd629a5f7558c361be0305df5f16baac1d3bbec014b7c486e28812441969 in / "
- },
- {
- "created": "2018-10-02T17:19:34.239926273Z",
- "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
- "empty_layer": true
- },
- {
- "created": "2018-11-20T21:15:01.569237Z",
- "created_by": "sh"
- }
- ],
- "os": "linux",
- "rootfs": {
- "type": "layers",
- "diff_ids": [
- "sha256:8a788232037eaf17794408ff3df6b922a1aedf9ef8de36afdae3ed0b0381907b",
- "sha256:70d967d052ce14cd372b12663d84046ade5712c3a4ece6078cdb63e75bbfcfa1"
- ]
- }
- }
\ No newline at end of file
diff --git a/image/docker/schema2/test/conversion_data/simple.schema1.json b/image/docker/schema2/test/conversion_data/simple.schema1.json
deleted file mode 100644
index 459c3bd1c..000000000
--- a/image/docker/schema2/test/conversion_data/simple.schema1.json
+++ /dev/null
@@ -1,44 +0,0 @@
-{
- "schemaVersion": 1,
- "name": "devtable/somerepo",
- "tag": "latest",
- "architecture": "amd64",
- "fsLayers": [
- {
- "blobSum": "sha256:28b98663b93a1c984379691300f284ee1536db1b6ecd8a1d59222528f80cee89"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:90e01955edcd85dac7985b72a8374545eac617ccdddcc992b732e43cd42534af"
- }
- ],
- "history": [
- {
- "v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"container\":\"86fff20ea922659929a4716850cc9b3a2cca6c197f7a7ece7da5b6d9d8ac4954\",\"container_config\":{\"Hostname\":\"86fff20ea922\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":true,\"AttachStdout\":true,\"AttachStderr\":true,\"Tty\":true,\"OpenStdin\":true,\"StdinOnce\":true,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"Image\":\"busybox\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"created\":\"2018-11-20T21:15:01.569237Z\",\"docker_version\":\"17.09.0-ce\",\"id\":\"692854afd8718d5285bf99cecfc9d6385f41122d3cea70fc9961b3f23ae0d768\",\"os\":\"linux\",\"parent\":\"61b2663f44edc9a6af340b9bfd46d17d8ed2574ffe289e0d95c0476da3c6faac\"}"
- },
- {
- "v1Compatibility": "{\"id\":\"61b2663f44edc9a6af340b9bfd46d17d8ed2574ffe289e0d95c0476da3c6faac\",\"parent\":\"5327db1e651c0f49157ace3ffd8569c7361b1f2e61d0b49ff617e83a42bf78d6\",\"created\":\"2018-10-02T17:19:34.239926273Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"sh\\\"]\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"5327db1e651c0f49157ace3ffd8569c7361b1f2e61d0b49ff617e83a42bf78d6\",\"created\":\"2018-10-02T17:19:34.03981888Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:63eebd629a5f7558c361be0305df5f16baac1d3bbec014b7c486e28812441969 in / \"]}}"
- }
- ],
- "signatures": [
- {
- "header": {
- "jwk": {
- "crv": "P-256",
- "kid": "AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX",
- "kty": "EC",
- "x": "34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY",
- "y": "LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8"
- },
- "alg": "ES256"
- },
- "signature": "4-nlo2R9Dn3PIGHuhvPkamCzLgFYURziihwZYAnmw5eMKLRj4ir-VeEJI30mDh8ArTeDo-PnMLRNZGRX2NwXHw",
- "protected": "eyJmb3JtYXRMZW5ndGgiOjIzNDEsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0xMS0yMFQyMToxNzozMVoifQ"
- }
- ]
- }
\ No newline at end of file
diff --git a/image/docker/schema2/test/conversion_data/simple.schema2.json b/image/docker/schema2/test/conversion_data/simple.schema2.json
deleted file mode 100644
index 0c443a9e3..000000000
--- a/image/docker/schema2/test/conversion_data/simple.schema2.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
- "schemaVersion": 2,
- "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
- "config": {
- "mediaType": "application/vnd.docker.container.image.v1+json",
- "size": 1977,
- "digest": "sha256:e7a06c2e5b7afb1bbfa9124812e87f1138c4c10d77e0a217f0b8c8c9694dc5cf"
- },
- "layers": [
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 727978,
- "digest": "sha256:90e01955edcd85dac7985b72a8374545eac617ccdddcc992b732e43cd42534af"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 190,
- "digest": "sha256:28b98663b93a1c984379691300f284ee1536db1b6ecd8a1d59222528f80cee89"
- }
- ]
- }
\ No newline at end of file
diff --git a/image/docker/schema2/test/conversion_data/ubuntu.config.json b/image/docker/schema2/test/conversion_data/ubuntu.config.json
deleted file mode 100644
index bbda91c35..000000000
--- a/image/docker/schema2/test/conversion_data/ubuntu.config.json
+++ /dev/null
@@ -1,90 +0,0 @@
-{
- "architecture": "amd64",
- "config": {
- "Hostname": "",
- "Domainname": "",
- "User": "",
- "AttachStdin": false,
- "AttachStdout": false,
- "AttachStderr": false,
- "Tty": false,
- "OpenStdin": false,
- "StdinOnce": false,
- "Env": [
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- ],
- "Cmd": [
- "/bin/bash"
- ],
- "ArgsEscaped": true,
- "Image": "sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a",
- "Volumes": null,
- "WorkingDir": "",
- "Entrypoint": null,
- "OnBuild": null,
- "Labels": null
- },
- "container": "1501390588c62f6c7c0e4fec25d6587c75c2f330536b9d08c610a56ed013f64b",
- "container_config": {
- "Hostname": "1501390588c6",
- "Domainname": "",
- "User": "",
- "AttachStdin": false,
- "AttachStdout": false,
- "AttachStderr": false,
- "Tty": false,
- "OpenStdin": false,
- "StdinOnce": false,
- "Env": [
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- ],
- "Cmd": [
- "/bin/sh",
- "-c",
- "#(nop) ",
- "CMD [\"/bin/bash\"]"
- ],
- "ArgsEscaped": true,
- "Image": "sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a",
- "Volumes": null,
- "WorkingDir": "",
- "Entrypoint": null,
- "OnBuild": null,
- "Labels": {}
- },
- "created": "2018-11-19T21:20:42.235528208Z",
- "docker_version": "17.06.2-ce",
- "history": [
- {
- "created": "2018-11-19T21:20:39.739838469Z",
- "created_by": "/bin/sh -c #(nop) ADD file:39e5bc157a8be63bbb36a142e18b644b0cfff07a8a02b42f7d0c4ee4ba75a5bc in / "
- },
- {
- "created": "2018-11-19T21:20:40.571619714Z",
- "created_by": "/bin/sh -c set -xe \t\t&& echo '#!/bin/sh' > /usr/sbin/policy-rc.d \t&& echo 'exit 101' >> /usr/sbin/policy-rc.d \t&& chmod +x /usr/sbin/policy-rc.d \t\t&& dpkg-divert --local --rename --add /sbin/initctl \t&& cp -a /usr/sbin/policy-rc.d /sbin/initctl \t&& sed -i 's/^exit.*/exit 0/' /sbin/initctl \t\t&& echo 'force-unsafe-io' > /etc/dpkg/dpkg.cfg.d/docker-apt-speedup \t\t&& echo 'DPkg::Post-Invoke { \"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\"; };' > /etc/apt/apt.conf.d/docker-clean \t&& echo 'APT::Update::Post-Invoke { \"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\"; };' >> /etc/apt/apt.conf.d/docker-clean \t&& echo 'Dir::Cache::pkgcache \"\"; Dir::Cache::srcpkgcache \"\";' >> /etc/apt/apt.conf.d/docker-clean \t\t&& echo 'Acquire::Languages \"none\";' > /etc/apt/apt.conf.d/docker-no-languages \t\t&& echo 'Acquire::GzipIndexes \"true\"; Acquire::CompressionTypes::Order:: \"gz\";' > /etc/apt/apt.conf.d/docker-gzip-indexes \t\t&& echo 'Apt::AutoRemove::SuggestsImportant \"false\";' > /etc/apt/apt.conf.d/docker-autoremove-suggests"
- },
- {
- "created": "2018-11-19T21:20:41.293060457Z",
- "created_by": "/bin/sh -c rm -rf /var/lib/apt/lists/*"
- },
- {
- "created": "2018-11-19T21:20:42.002883522Z",
- "created_by": "/bin/sh -c mkdir -p /run/systemd && echo 'docker' > /run/systemd/container"
- },
- {
- "created": "2018-11-19T21:20:42.235528208Z",
- "created_by": "/bin/sh -c #(nop) CMD [\"/bin/bash\"]",
- "empty_layer": true
- }
- ],
- "os": "linux",
- "rootfs": {
- "type": "layers",
- "diff_ids": [
- "sha256:bc7f4b25d0ae3524466891c41cefc7c6833c533e00ba80f8063c68da9a8b65fe",
- "sha256:a768c3f3878e96565d2bf0dcf90508261862847b2e7b8fc804a0770c07f0d5d5",
- "sha256:ca2991e4676cba899ad9bc6ad3a044cd0816915f9e97a6f2e67b6accbc779ba5",
- "sha256:b9b7103af585bd8ae9130de947817be7ce76092aa19cf6d2f9d5290440c645eb"
- ]
- }
- }
\ No newline at end of file
diff --git a/image/docker/schema2/test/conversion_data/ubuntu.schema1.json b/image/docker/schema2/test/conversion_data/ubuntu.schema1.json
deleted file mode 100644
index 6319c70e7..000000000
--- a/image/docker/schema2/test/conversion_data/ubuntu.schema1.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
- "schemaVersion": 1,
- "name": "devtable/ubuntu",
- "tag": "latest",
- "architecture": "amd64",
- "fsLayers": [
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:f85999a86bef2603a9e9a4fa488a7c1f82e471cbb76c3b5068e54e1a9320964a"
- },
- {
- "blobSum": "sha256:fa83472a3562898caaf8d77542181a473a84039376f2ba56254619d9317ba00d"
- },
- {
- "blobSum": "sha256:da1315cffa03c17988ae5c66f56d5f50517652a622afc1611a8bdd6c00b1fde3"
- },
- {
- "blobSum": "sha256:32802c0cfa4defde2981bec336096350d0bb490469c494e21f678b1dcf6d831f"
- }
- ],
- "history": [
- {
- "v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/bash\"],\"ArgsEscaped\":true,\"Image\":\"sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"container\":\"1501390588c62f6c7c0e4fec25d6587c75c2f330536b9d08c610a56ed013f64b\",\"container_config\":{\"Hostname\":\"1501390588c6\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"CMD [\\\"/bin/bash\\\"]\"],\"ArgsEscaped\":true,\"Image\":\"sha256:a141f6fbdbbcfc331f12db000daa2e636820152fa35dff4ca56cf02382aece7a\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"created\":\"2018-11-19T21:20:42.235528208Z\",\"docker_version\":\"17.06.2-ce\",\"id\":\"d71fc6939e162a01d90cefeeb3d7f6d6b2583fac2ef98833ec69a95d12ffeeaa\",\"os\":\"linux\",\"parent\":\"ba7177adc95198e86c00039d17d22f35ed1eed39f4e2c3ffc7b2c29a3e81271a\",\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"ba7177adc95198e86c00039d17d22f35ed1eed39f4e2c3ffc7b2c29a3e81271a\",\"parent\":\"69d0081dfb37f77fa9c971f367c6b86a3eb4090e7ab56741da954523ec3a786f\",\"created\":\"2018-11-19T21:20:42.002883522Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c mkdir -p /run/systemd \\u0026\\u0026 echo 'docker' \\u003e /run/systemd/container\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"69d0081dfb37f77fa9c971f367c6b86a3eb4090e7ab56741da954523ec3a786f\",\"parent\":\"1bd3843430506ff885fc1a3c1d050c19e2dcf70f8ef6cea1536692fd396c87bc\",\"created\":\"2018-11-19T21:20:41.293060457Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -rf /var/lib/apt/lists/*\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"1bd3843430506ff885fc1a3c1d050c19e2dcf70f8ef6cea1536692fd396c87bc\",\"parent\":\"248632e87271aa5118ebc0ebf46758791e032c481f9702a2a36e7c85e83d33d2\",\"created\":\"2018-11-19T21:20:40.571619714Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -xe \\t\\t\\u0026\\u0026 echo '#!/bin/sh' \\u003e /usr/sbin/policy-rc.d \\t\\u0026\\u0026 echo 'exit 101' \\u003e\\u003e /usr/sbin/policy-rc.d \\t\\u0026\\u0026 chmod +x /usr/sbin/policy-rc.d \\t\\t\\u0026\\u0026 dpkg-divert --local --rename --add /sbin/initctl \\t\\u0026\\u0026 cp -a /usr/sbin/policy-rc.d /sbin/initctl \\t\\u0026\\u0026 sed -i 's/^exit.*/exit 0/' /sbin/initctl \\t\\t\\u0026\\u0026 echo 'force-unsafe-io' \\u003e /etc/dpkg/dpkg.cfg.d/docker-apt-speedup \\t\\t\\u0026\\u0026 echo 'DPkg::Post-Invoke { \\\"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\\\"; };' \\u003e /etc/apt/apt.conf.d/docker-clean \\t\\u0026\\u0026 echo 'APT::Update::Post-Invoke { \\\"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true\\\"; };' \\u003e\\u003e /etc/apt/apt.conf.d/docker-clean \\t\\u0026\\u0026 echo 'Dir::Cache::pkgcache \\\"\\\"; Dir::Cache::srcpkgcache \\\"\\\";' \\u003e\\u003e /etc/apt/apt.conf.d/docker-clean \\t\\t\\u0026\\u0026 echo 'Acquire::Languages \\\"none\\\";' \\u003e /etc/apt/apt.conf.d/docker-no-languages \\t\\t\\u0026\\u0026 echo 'Acquire::GzipIndexes \\\"true\\\"; Acquire::CompressionTypes::Order:: \\\"gz\\\";' \\u003e /etc/apt/apt.conf.d/docker-gzip-indexes \\t\\t\\u0026\\u0026 echo 'Apt::AutoRemove::SuggestsImportant \\\"false\\\";' \\u003e /etc/apt/apt.conf.d/docker-autoremove-suggests\"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"248632e87271aa5118ebc0ebf46758791e032c481f9702a2a36e7c85e83d33d2\",\"created\":\"2018-11-19T21:20:39.739838469Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:39e5bc157a8be63bbb36a142e18b644b0cfff07a8a02b42f7d0c4ee4ba75a5bc in / \"]}}"
- }
- ],
- "signatures": [
- {
- "header": {
- "jwk": {
- "crv": "P-256",
- "kid": "AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX",
- "kty": "EC",
- "x": "34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY",
- "y": "LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8"
- },
- "alg": "ES256"
- },
- "signature": "0wBIubWqf-71Im54gbPlOjFBH7lr6MFLW75bdb-McFvDnfgSdOIMuJ9NHtKEYNF8qFe9hMoO6_GrSDVTJ-pryQ",
- "protected": "eyJmb3JtYXRMZW5ndGgiOjQ5MjMsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0xMS0yNlQxMDo0MjozMloifQ"
- }
- ]
- }
\ No newline at end of file
diff --git a/image/docker/schema2/test/conversion_data/ubuntu.schema2.json b/image/docker/schema2/test/conversion_data/ubuntu.schema2.json
deleted file mode 100644
index 2b5a201c5..000000000
--- a/image/docker/schema2/test/conversion_data/ubuntu.schema2.json
+++ /dev/null
@@ -1,31 +0,0 @@
-{
- "schemaVersion": 2,
- "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
- "config": {
- "mediaType": "application/vnd.docker.container.image.v1+json",
- "size": 3894,
- "digest": "sha256:93fd78260bd1495afb484371928661f63e64be306b7ac48e2d13ce9422dfee26"
- },
- "layers": [
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 32102249,
- "digest": "sha256:32802c0cfa4defde2981bec336096350d0bb490469c494e21f678b1dcf6d831f"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 847,
- "digest": "sha256:da1315cffa03c17988ae5c66f56d5f50517652a622afc1611a8bdd6c00b1fde3"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 556,
- "digest": "sha256:fa83472a3562898caaf8d77542181a473a84039376f2ba56254619d9317ba00d"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 162,
- "digest": "sha256:f85999a86bef2603a9e9a4fa488a7c1f82e471cbb76c3b5068e54e1a9320964a"
- }
- ]
- }
\ No newline at end of file
diff --git a/image/docker/schema2/test/test_config.py b/image/docker/schema2/test/test_config.py
deleted file mode 100644
index ba59feea3..000000000
--- a/image/docker/schema2/test/test_config.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import json
-import pytest
-
-from image.docker.schema2.config import MalformedSchema2Config, DockerSchema2Config
-from util.bytes import Bytes
-
-@pytest.mark.parametrize('json_data', [
- '',
- '{}',
- """
- {
- "unknown": "key"
- }
- """,
-])
-def test_malformed_configs(json_data):
- with pytest.raises(MalformedSchema2Config):
- DockerSchema2Config(Bytes.for_string_or_unicode(json_data))
-
-CONFIG_BYTES = json.dumps({
- "architecture": "amd64",
- "config": {
- "Hostname": "",
- "Domainname": "",
- "User": "",
- "AttachStdin": False,
- "AttachStdout": False,
- "AttachStderr": False,
- "Tty": False,
- "OpenStdin": False,
- "StdinOnce": False,
- "Env": [
- "HTTP_PROXY=http:\/\/localhost:8080",
- "http_proxy=http:\/\/localhost:8080",
- "PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin"
- ],
- "Cmd": [
- "sh"
- ],
- "Image": "",
- "Volumes": None,
- "WorkingDir": "",
- "Entrypoint": None,
- "OnBuild": None,
- "Labels": {
-
- }
- },
- "container": "b7a43694b435c8e9932615643f61f975a9213e453b15cd6c2a386f144a2d2de9",
- "container_config": {
- "Hostname": "b7a43694b435",
- "Domainname": "",
- "User": "",
- "AttachStdin": True,
- "AttachStdout": True,
- "AttachStderr": True,
- "Tty": True,
- "OpenStdin": True,
- "StdinOnce": True,
- "Env": [
- "HTTP_PROXY=http:\/\/localhost:8080",
- "http_proxy=http:\/\/localhost:8080",
- "PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin"
- ],
- "Cmd": [
- "sh"
- ],
- "Image": "jschorr\/somerepo",
- "Volumes": None,
- "WorkingDir": "",
- "Entrypoint": None,
- "OnBuild": None,
- "Labels": {
-
- }
- },
- "created": "2018-04-16T10:41:19.079522722Z",
- "docker_version": "17.09.0-ce",
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "\/bin\/sh -c #(nop) ADD file:9e4ca21cbd24dc05b454b6be21c7c639216ae66559b21ba24af0d665c62620dc in \/ "
- },
- {
- "created": "2018-04-03T18:37:09.613317719Z",
- "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
- "empty_layer": True
- },
- {
- "created": "2018-04-16T10:37:44.418262777Z",
- "created_by": "sh"
- },
- {
- "created": "2018-04-16T10:41:19.079522722Z",
- "created_by": "sh"
- }
- ],
- "os": "linux",
- "rootfs": {
- "type": "layers",
- "diff_ids": [
- "sha256:3e596351c689c8827a3c9635bc1083cff17fa4a174f84f0584bd0ae6f384195b",
- "sha256:4552be273c71275a88de0b8c8853dcac18cb74d5790f5383d9b38d4ac55062d5",
- "sha256:1319c76152ca37fbeb7fb71e0ffa7239bc19ffbe3b95c00417ece39d89d06e6e"
- ]
- }
-})
-
-def test_valid_config():
- config = DockerSchema2Config(Bytes.for_string_or_unicode(CONFIG_BYTES))
- history = list(config.history)
- assert len(history) == 4
-
- assert not history[0].is_empty
- assert history[1].is_empty
-
- assert history[0].created_datetime.year == 2018
- assert history[1].command == '/bin/sh -c #(nop) CMD ["sh"]'
- assert history[2].command == 'sh'
-
- for index, history_entry in enumerate(history):
- v1_compat = config.build_v1_compatibility(history_entry, 'somev1id', 'someparentid',
- index == 3)
- assert v1_compat['id'] == 'somev1id'
- assert v1_compat['parent'] == 'someparentid'
-
- if index == 3:
- assert v1_compat['container_config'] == config._parsed['container_config']
- else:
- assert 'Hostname' not in v1_compat['container_config']
- assert v1_compat['container_config']['Cmd'] == [history_entry.command]
-
- assert config.labels == {}
diff --git a/image/docker/schema2/test/test_conversion.py b/image/docker/schema2/test/test_conversion.py
deleted file mode 100644
index 75a1bece9..000000000
--- a/image/docker/schema2/test/test_conversion.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import os
-import json
-
-import pytest
-
-from image.docker.schema1 import DockerSchema1Manifest, DOCKER_SCHEMA1_CONTENT_TYPES
-from image.docker.schema2.manifest import DockerSchema2Manifest
-from image.docker.schemautil import ContentRetrieverForTesting
-from util.bytes import Bytes
-
-
-def _get_test_file_contents(test_name, kind):
- filename = '%s.%s.json' % (test_name, kind)
- data_dir = os.path.dirname(__file__)
- with open(os.path.join(data_dir, 'conversion_data', filename), 'r') as f:
- return Bytes.for_string_or_unicode(f.read())
-
-
-@pytest.mark.parametrize('name, config_sha', [
- ('simple', 'sha256:e7a06c2e5b7afb1bbfa9124812e87f1138c4c10d77e0a217f0b8c8c9694dc5cf'),
- ('complex', 'sha256:ae6b78bedf88330a5e5392164f40d28ed8a38120b142905d30b652ebffece10e'),
- ('ubuntu', 'sha256:93fd78260bd1495afb484371928661f63e64be306b7ac48e2d13ce9422dfee26'),
-])
-def test_legacy_layers(name, config_sha):
- cr = {}
- cr[config_sha] = _get_test_file_contents(name, 'config').as_encoded_str()
- retriever = ContentRetrieverForTesting(cr)
-
- schema2 = DockerSchema2Manifest(_get_test_file_contents(name, 'schema2'))
- schema1 = DockerSchema1Manifest(_get_test_file_contents(name, 'schema1'), validate=False)
-
- # Check legacy layers
- schema2_legacy_layers = list(schema2.generate_legacy_layers({}, retriever))
- schema1_legacy_layers = list(schema1.generate_legacy_layers({}, retriever))
- assert len(schema1_legacy_layers) == len(schema2_legacy_layers)
-
- for index in range(0, len(schema1_legacy_layers)):
- schema1_legacy_layer = schema1_legacy_layers[index]
- schema2_legacy_layer = schema2_legacy_layers[index]
- assert schema1_legacy_layer.content_checksum == schema2_legacy_layer.content_checksum
- assert schema1_legacy_layer.comment == schema2_legacy_layer.comment
- assert schema1_legacy_layer.command == schema2_legacy_layer.command
-
-
-@pytest.mark.parametrize('name, config_sha', [
- ('simple', 'sha256:e7a06c2e5b7afb1bbfa9124812e87f1138c4c10d77e0a217f0b8c8c9694dc5cf'),
- ('complex', 'sha256:ae6b78bedf88330a5e5392164f40d28ed8a38120b142905d30b652ebffece10e'),
- ('ubuntu', 'sha256:93fd78260bd1495afb484371928661f63e64be306b7ac48e2d13ce9422dfee26'),
-])
-def test_conversion(name, config_sha):
- cr = {}
- cr[config_sha] = _get_test_file_contents(name, 'config').as_encoded_str()
- retriever = ContentRetrieverForTesting(cr)
-
- schema2 = DockerSchema2Manifest(_get_test_file_contents(name, 'schema2'))
- schema1 = DockerSchema1Manifest(_get_test_file_contents(name, 'schema1'), validate=False)
-
- s2to2 = schema2.convert_manifest([schema2.media_type], 'devtable', 'somerepo', 'latest',
- retriever)
- assert s2to2 == schema2
-
- s1to1 = schema1.convert_manifest([schema1.media_type], 'devtable', 'somerepo', 'latest',
- retriever)
- assert s1to1 == schema1
-
- s2to1 = schema2.convert_manifest(DOCKER_SCHEMA1_CONTENT_TYPES, 'devtable', 'somerepo', 'latest',
- retriever)
- assert s2to1.media_type in DOCKER_SCHEMA1_CONTENT_TYPES
- assert len(s2to1.layers) == len(schema1.layers)
-
- s2toempty = schema2.convert_manifest([], 'devtable', 'somerepo', 'latest', retriever)
- assert s2toempty is None
-
-
-@pytest.mark.parametrize('name, config_sha', [
- ('simple', 'sha256:e7a06c2e5b7afb1bbfa9124812e87f1138c4c10d77e0a217f0b8c8c9694dc5cf'),
- ('complex', 'sha256:ae6b78bedf88330a5e5392164f40d28ed8a38120b142905d30b652ebffece10e'),
- ('ubuntu', 'sha256:93fd78260bd1495afb484371928661f63e64be306b7ac48e2d13ce9422dfee26'),
-])
-def test_2to1_conversion(name, config_sha):
- cr = {}
- cr[config_sha] = _get_test_file_contents(name, 'config').as_encoded_str()
- retriever = ContentRetrieverForTesting(cr)
-
- schema2 = DockerSchema2Manifest(_get_test_file_contents(name, 'schema2'))
- schema1 = DockerSchema1Manifest(_get_test_file_contents(name, 'schema1'), validate=False)
-
- converted = schema2.get_schema1_manifest('devtable', 'somerepo', 'latest', retriever)
- assert len(converted.layers) == len(schema1.layers)
-
- image_id_map = {}
- for index in range(0, len(converted.layers)):
- converted_layer = converted.layers[index]
- schema1_layer = schema1.layers[index]
-
- image_id_map[schema1_layer.v1_metadata.image_id] = converted_layer.v1_metadata.image_id
-
- assert str(schema1_layer.digest) == str(converted_layer.digest)
-
- schema1_parent_id = schema1_layer.v1_metadata.parent_image_id
- converted_parent_id = converted_layer.v1_metadata.parent_image_id
- assert (schema1_parent_id is None) == (converted_parent_id is None)
-
- if schema1_parent_id is not None:
- assert image_id_map[schema1_parent_id] == converted_parent_id
-
- assert schema1_layer.v1_metadata.created == converted_layer.v1_metadata.created
- assert schema1_layer.v1_metadata.comment == converted_layer.v1_metadata.comment
- assert schema1_layer.v1_metadata.command == converted_layer.v1_metadata.command
- assert schema1_layer.v1_metadata.labels == converted_layer.v1_metadata.labels
-
- schema1_container_config = json.loads(schema1_layer.raw_v1_metadata)['container_config']
- converted_container_config = json.loads(converted_layer.raw_v1_metadata)['container_config']
-
- assert schema1_container_config == converted_container_config
diff --git a/image/docker/schema2/test/test_list.py b/image/docker/schema2/test/test_list.py
deleted file mode 100644
index 04a321ce0..000000000
--- a/image/docker/schema2/test/test_list.py
+++ /dev/null
@@ -1,151 +0,0 @@
-import json
-import pytest
-
-from image.docker.schema1 import (DockerSchema1Manifest, DOCKER_SCHEMA1_CONTENT_TYPES,
- DockerSchema1ManifestBuilder)
-from image.docker.schema2 import DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE
-from image.docker.schema2.manifest import DockerSchema2Manifest
-from image.docker.schema2.list import (MalformedSchema2ManifestList, DockerSchema2ManifestList,
- DockerSchema2ManifestListBuilder, MismatchManifestException)
-from image.docker.schema2.test.test_manifest import MANIFEST_BYTES as v22_bytes
-from image.docker.schemautil import ContentRetrieverForTesting
-from image.docker.test.test_schema1 import MANIFEST_BYTES as v21_bytes
-from util.bytes import Bytes
-
-
-@pytest.mark.parametrize('json_data', [
- '',
- '{}',
- """
- {
- "unknown": "key"
- }
- """,
-])
-def test_malformed_manifest_lists(json_data):
- with pytest.raises(MalformedSchema2ManifestList):
- DockerSchema2ManifestList(Bytes.for_string_or_unicode(json_data))
-
-
-MANIFESTLIST_BYTES = json.dumps({
- "schemaVersion": 2,
- "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
- "manifests": [
- {
- "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
- "size": 946,
- "digest": "sha256:e6",
- "platform": {
- "architecture": "ppc64le",
- "os": "linux",
- }
- },
- {
- "mediaType": "application/vnd.docker.distribution.manifest.v1+json",
- "size": 878,
- "digest": "sha256:5b",
- "platform": {
- "architecture": "amd64",
- "os": "linux",
- "features": [
- "sse4"
- ]
- }
- }
- ]
-})
-
-NO_AMD_MANIFESTLIST_BYTES = json.dumps({
- "schemaVersion": 2,
- "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
- "manifests": [
- {
- "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
- "size": 946,
- "digest": "sha256:e6",
- "platform": {
- "architecture": "ppc64le",
- "os": "linux",
- }
- },
- ]
-})
-
-retriever = ContentRetrieverForTesting({
- 'sha256:e6': v22_bytes,
- 'sha256:5b': v21_bytes,
-})
-
-def test_valid_manifestlist():
- manifestlist = DockerSchema2ManifestList(Bytes.for_string_or_unicode(MANIFESTLIST_BYTES))
- assert len(manifestlist.manifests(retriever)) == 2
-
- assert manifestlist.media_type == 'application/vnd.docker.distribution.manifest.list.v2+json'
- assert manifestlist.bytes.as_encoded_str() == MANIFESTLIST_BYTES
- assert manifestlist.manifest_dict == json.loads(MANIFESTLIST_BYTES)
- assert manifestlist.get_layers(retriever) is None
- assert not manifestlist.blob_digests
-
- for index, manifest in enumerate(manifestlist.manifests(retriever)):
- if index == 0:
- assert isinstance(manifest.manifest_obj, DockerSchema2Manifest)
- assert manifest.manifest_obj.schema_version == 2
- else:
- assert isinstance(manifest.manifest_obj, DockerSchema1Manifest)
- assert manifest.manifest_obj.schema_version == 1
-
- # Check retrieval of a schema 2 manifest. This should return None, because the schema 2 manifest
- # is not amd64-compatible.
- schema2_manifest = manifestlist.convert_manifest([DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE], 'foo',
- 'bar', 'baz', retriever)
- assert schema2_manifest is None
-
- # Check retrieval of a schema 1 manifest.
- compatible_manifest = manifestlist.get_schema1_manifest('foo', 'bar', 'baz', retriever)
- assert compatible_manifest.schema_version == 1
-
- schema1_manifest = manifestlist.convert_manifest(DOCKER_SCHEMA1_CONTENT_TYPES, 'foo',
- 'bar', 'baz', retriever)
- assert schema1_manifest.schema_version == 1
- assert schema1_manifest.digest == compatible_manifest.digest
-
- # Ensure it validates.
- manifestlist.validate(retriever)
-
-
-def test_get_schema1_manifest_no_matching_list():
- manifestlist = DockerSchema2ManifestList(Bytes.for_string_or_unicode(NO_AMD_MANIFESTLIST_BYTES))
- assert len(manifestlist.manifests(retriever)) == 1
-
- assert manifestlist.media_type == 'application/vnd.docker.distribution.manifest.list.v2+json'
- assert manifestlist.bytes.as_encoded_str() == NO_AMD_MANIFESTLIST_BYTES
-
- compatible_manifest = manifestlist.get_schema1_manifest('foo', 'bar', 'baz', retriever)
- assert compatible_manifest is None
-
-
-def test_builder():
- existing = DockerSchema2ManifestList(Bytes.for_string_or_unicode(MANIFESTLIST_BYTES))
- builder = DockerSchema2ManifestListBuilder()
- for index, manifest in enumerate(existing.manifests(retriever)):
- builder.add_manifest(manifest.manifest_obj, "amd64", "os")
-
- built = builder.build()
- assert len(built.manifests(retriever)) == 2
-
-
-def test_invalid_manifestlist():
- # Build a manifest list with a schema 1 manifest of the wrong architecture.
- builder = DockerSchema1ManifestBuilder('foo', 'bar', 'baz')
- builder.add_layer('sha:2356', '{"id": "foo"}')
- manifest = builder.build().unsigned()
-
- listbuilder = DockerSchema2ManifestListBuilder()
- listbuilder.add_manifest(manifest, 'amd32', 'linux')
- manifestlist = listbuilder.build()
-
- retriever = ContentRetrieverForTesting()
- retriever.add_digest(manifest.digest, manifest.bytes.as_encoded_str())
-
- with pytest.raises(MismatchManifestException):
- manifestlist.validate(retriever)
diff --git a/image/docker/schema2/test/test_manifest.py b/image/docker/schema2/test/test_manifest.py
deleted file mode 100644
index 6d2193b6d..000000000
--- a/image/docker/schema2/test/test_manifest.py
+++ /dev/null
@@ -1,422 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import json
-import pytest
-import os
-
-from app import docker_v2_signing_key
-from image.docker.schema1 import (DockerSchema1ManifestBuilder,
- DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE,
- DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE)
-from image.docker.schema2.manifest import (MalformedSchema2Manifest, DockerSchema2Manifest,
- DockerSchema2ManifestBuilder, EMPTY_LAYER_BLOB_DIGEST)
-from image.docker.schema2.config import DockerSchema2Config
-from image.docker.schema2.test.test_config import CONFIG_BYTES
-from image.docker.schemautil import ContentRetrieverForTesting
-from util.bytes import Bytes
-
-
-@pytest.mark.parametrize('json_data', [
- '',
- '{}',
- """
- {
- "unknown": "key"
- }
- """,
-])
-def test_malformed_manifests(json_data):
- with pytest.raises(MalformedSchema2Manifest):
- DockerSchema2Manifest(Bytes.for_string_or_unicode(json_data))
-
-
-MANIFEST_BYTES = json.dumps({
- "schemaVersion": 2,
- "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
- "config": {
- "mediaType": "application/vnd.docker.container.image.v1+json",
- "size": 1885,
- "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
- },
- "layers": [
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 1234,
- "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 32654,
- "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 16724,
- "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 73109,
- "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
- },
- ],
-})
-
-REMOTE_MANIFEST_BYTES = json.dumps({
- "schemaVersion": 2,
- "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
- "config": {
- "mediaType": "application/vnd.docker.container.image.v1+json",
- "size": 1885,
- "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7",
- },
- "layers": [
- {
- "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip",
- "size": 1234,
- "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
- "urls": ['http://some/url'],
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 32654,
- "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 16724,
- "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
- },
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 73109,
- "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
- },
- ],
-})
-
-def test_valid_manifest():
- manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES))
- assert manifest.config.size == 1885
- assert str(manifest.config.digest) == 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7'
- assert manifest.media_type == "application/vnd.docker.distribution.manifest.v2+json"
- assert not manifest.has_remote_layer
- assert manifest.has_legacy_image
-
- retriever = ContentRetrieverForTesting.for_config({
- "config": {
- "Labels": {},
- },
- "rootfs": {"type": "layers", "diff_ids": []},
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "foo"
- },
- {
- "created": "2018-04-12T18:37:09.284840891Z",
- "created_by": "bar"
- },
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "foo"
- },
- {
- "created": "2018-04-12T18:37:09.284840891Z",
- "created_by": "bar"
- },
- ],
- }, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
-
- assert len(manifest.filesystem_layers) == 4
- assert manifest.filesystem_layers[0].compressed_size == 1234
- assert str(manifest.filesystem_layers[0].digest) == 'sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736'
- assert not manifest.filesystem_layers[0].is_remote
-
- assert manifest.leaf_filesystem_layer == manifest.filesystem_layers[3]
- assert not manifest.leaf_filesystem_layer.is_remote
- assert manifest.leaf_filesystem_layer.compressed_size == 73109
-
- blob_digests = list(manifest.blob_digests)
- expected = [str(layer.digest) for layer in manifest.filesystem_layers] + [manifest.config.digest]
- assert blob_digests == expected
- assert list(manifest.local_blob_digests) == expected
-
- manifest_image_layers = list(manifest.get_layers(retriever))
- assert len(manifest_image_layers) == len(list(manifest.filesystem_layers))
- for index in range(0, 4):
- assert manifest_image_layers[index].blob_digest == str(manifest.filesystem_layers[index].digest)
-
-
-def test_valid_remote_manifest():
- manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(REMOTE_MANIFEST_BYTES))
- assert manifest.config.size == 1885
- assert str(manifest.config.digest) == 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7'
- assert manifest.media_type == "application/vnd.docker.distribution.manifest.v2+json"
- assert manifest.has_remote_layer
-
- assert len(manifest.filesystem_layers) == 4
- assert manifest.filesystem_layers[0].compressed_size == 1234
- assert str(manifest.filesystem_layers[0].digest) == 'sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736'
- assert manifest.filesystem_layers[0].is_remote
- assert manifest.filesystem_layers[0].urls == ['http://some/url']
-
- assert manifest.leaf_filesystem_layer == manifest.filesystem_layers[3]
- assert not manifest.leaf_filesystem_layer.is_remote
- assert manifest.leaf_filesystem_layer.compressed_size == 73109
-
- expected = set([str(layer.digest) for layer in manifest.filesystem_layers] +
- [manifest.config.digest])
-
- blob_digests = set(manifest.blob_digests)
- local_digests = set(manifest.local_blob_digests)
-
- assert blob_digests == expected
- assert local_digests == (expected - {manifest.filesystem_layers[0].digest})
-
- assert manifest.has_remote_layer
- assert manifest.get_leaf_layer_v1_image_id(None) is None
- assert manifest.get_legacy_image_ids(None) is None
-
- retriever = ContentRetrieverForTesting.for_config({
- "config": {
- "Labels": {},
- },
- "rootfs": {"type": "layers", "diff_ids": []},
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "foo"
- },
- {
- "created": "2018-04-12T18:37:09.284840891Z",
- "created_by": "bar"
- },
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "foo"
- },
- {
- "created": "2018-04-12T18:37:09.284840891Z",
- "created_by": "bar"
- },
- ],
- }, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
-
- manifest_image_layers = list(manifest.get_layers(retriever))
- assert len(manifest_image_layers) == len(list(manifest.filesystem_layers))
- for index in range(0, 4):
- assert manifest_image_layers[index].blob_digest == str(manifest.filesystem_layers[index].digest)
-
-
-def test_schema2_builder():
- manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES))
-
- builder = DockerSchema2ManifestBuilder()
- builder.set_config_digest(manifest.config.digest, manifest.config.size)
-
- for layer in manifest.filesystem_layers:
- builder.add_layer(layer.digest, layer.compressed_size, urls=layer.urls)
-
- built = builder.build()
- assert built.filesystem_layers == manifest.filesystem_layers
- assert built.config == manifest.config
-
-
-def test_get_manifest_labels():
- labels = dict(foo='bar', baz='meh')
- retriever = ContentRetrieverForTesting.for_config({
- "config": {
- "Labels": labels,
- },
- "rootfs": {"type": "layers", "diff_ids": []},
- "history": [],
- }, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
-
- manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES))
- assert manifest.get_manifest_labels(retriever) == labels
-
-
-def test_build_schema1():
- manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES))
- assert not manifest.has_remote_layer
-
- retriever = ContentRetrieverForTesting({
- 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7': CONFIG_BYTES,
- })
-
- builder = DockerSchema1ManifestBuilder('somenamespace', 'somename', 'sometag')
- manifest._populate_schema1_builder(builder, retriever)
- schema1 = builder.build(docker_v2_signing_key)
-
- assert schema1.media_type == DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
-
-
-def test_get_schema1_manifest():
- retriever = ContentRetrieverForTesting.for_config({
- "config": {
- "Labels": {},
- },
- "rootfs": {"type": "layers", "diff_ids": []},
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "foo"
- },
- {
- "created": "2018-04-12T18:37:09.284840891Z",
- "created_by": "bar"
- },
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "foo"
- },
- {
- "created": "2018-04-12T18:37:09.284840891Z",
- "created_by": "bar"
- },
- ],
- }, 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7', 1885)
-
- manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES))
- schema1 = manifest.get_schema1_manifest('somenamespace', 'somename', 'sometag', retriever)
- assert schema1 is not None
- assert schema1.media_type == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
-
- via_convert = manifest.convert_manifest([schema1.media_type], 'somenamespace', 'somename',
- 'sometag', retriever)
- assert via_convert.digest == schema1.digest
-
-
-def test_generate_legacy_layers():
- builder = DockerSchema2ManifestBuilder()
- builder.add_layer('sha256:abc123', 123)
- builder.add_layer('sha256:def456', 789)
- builder.set_config_digest('sha256:def456', 2000)
- manifest = builder.build()
-
- retriever = ContentRetrieverForTesting.for_config({
- "config": {
- },
- "rootfs": {"type": "layers", "diff_ids": []},
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "base"
- },
- {
- "created": "2018-04-06T18:37:09.284840891Z",
- "created_by": "middle",
- "empty_layer": True,
- },
- {
- "created": "2018-04-12T18:37:09.284840891Z",
- "created_by": "leaf"
- },
- ],
- }, 'sha256:def456', 2000)
-
- legacy_layers = list(manifest.generate_legacy_layers({}, retriever))
- assert len(legacy_layers) == 3
- assert legacy_layers[0].content_checksum == 'sha256:abc123'
- assert legacy_layers[1].content_checksum == EMPTY_LAYER_BLOB_DIGEST
- assert legacy_layers[2].content_checksum == 'sha256:def456'
-
- assert legacy_layers[0].created == "2018-04-03T18:37:09.284840891Z"
- assert legacy_layers[1].created == "2018-04-06T18:37:09.284840891Z"
- assert legacy_layers[2].created == "2018-04-12T18:37:09.284840891Z"
-
- assert legacy_layers[0].command == '["base"]'
- assert legacy_layers[1].command == '["middle"]'
- assert legacy_layers[2].command == '["leaf"]'
-
- assert legacy_layers[2].parent_image_id == legacy_layers[1].image_id
- assert legacy_layers[1].parent_image_id == legacy_layers[0].image_id
- assert legacy_layers[0].parent_image_id is None
-
- assert legacy_layers[1].image_id != legacy_layers[2]
- assert legacy_layers[0].image_id != legacy_layers[1]
-
-
-def test_remote_layer_manifest():
- builder = DockerSchema2ManifestBuilder()
- builder.set_config_digest('sha256:abcd', 1234)
- builder.add_layer('sha256:adef', 1234, urls=['http://some/url'])
- builder.add_layer('sha256:1352', 4567)
- builder.add_layer('sha256:1353', 4567)
- manifest = builder.build()
-
- assert manifest.has_remote_layer
- assert manifest.get_leaf_layer_v1_image_id(None) is None
- assert manifest.get_legacy_image_ids(None) is None
- assert not manifest.has_legacy_image
-
- schema1 = manifest.get_schema1_manifest('somenamespace', 'somename', 'sometag', None)
- assert schema1 is None
-
- assert set(manifest.blob_digests) == {'sha256:adef', 'sha256:abcd', 'sha256:1352', 'sha256:1353'}
- assert set(manifest.local_blob_digests) == {'sha256:abcd', 'sha256:1352', 'sha256:1353'}
-
-
-def test_unencoded_unicode_manifest():
- builder = DockerSchema2ManifestBuilder()
- builder.add_layer('sha256:abc123', 123)
- builder.set_config_digest('sha256:def456', 2000)
- manifest = builder.build()
-
- retriever = ContentRetrieverForTesting.for_config({
- "config": {
- "author": u"Sômé guy",
- },
- "rootfs": {"type": "layers", "diff_ids": []},
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "base",
- "author": u"Sômé guy",
- },
- ],
- }, 'sha256:def456', 2000, ensure_ascii=False)
-
- layers = list(manifest.get_layers(retriever))
- assert layers[0].author == u"Sômé guy"
-
-
-def test_build_unencoded_unicode_manifest():
- config_json = json.dumps({
- "config": {
- "author": u"Sômé guy",
- },
- "rootfs": {"type": "layers", "diff_ids": []},
- "history": [
- {
- "created": "2018-04-03T18:37:09.284840891Z",
- "created_by": "base",
- "author": u"Sômé guy",
- },
- ],
- }, ensure_ascii=False)
-
- schema2_config = DockerSchema2Config(Bytes.for_string_or_unicode(config_json))
-
- builder = DockerSchema2ManifestBuilder()
- builder.set_config(schema2_config)
- builder.add_layer('sha256:abc123', 123)
- builder.build()
-
-
-def test_load_unicode_manifest():
- test_dir = os.path.dirname(os.path.abspath(__file__))
- with open(os.path.join(test_dir, 'unicode_manifest_config.json'), 'r') as f:
- retriever = ContentRetrieverForTesting()
- retriever.add_digest('sha256:5bdd65cdd055c7f3bbaecdc9fd6c75f155322520f85953aa0e2724cab006d407',
- f.read())
-
- with open(os.path.join(test_dir, 'unicode_manifest.json'), 'r') as f:
- manifest_bytes = f.read()
-
- manifest = DockerSchema2Manifest(Bytes.for_string_or_unicode(manifest_bytes))
- assert manifest.digest == 'sha256:97556fa8c553395bd9d8e19a04acef4716ca287ffbf6bde14dd9966053912613'
-
- layers = list(manifest.get_layers(retriever))
- assert layers[-1].author == u"Sômé guy"
diff --git a/image/docker/schema2/test/unicode_manifest.json b/image/docker/schema2/test/unicode_manifest.json
deleted file mode 100644
index c783cce2f..000000000
--- a/image/docker/schema2/test/unicode_manifest.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "schemaVersion": 2,
- "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
- "config": {
- "mediaType": "application/vnd.docker.container.image.v1+json",
- "size": 1661,
- "digest": "sha256:5bdd65cdd055c7f3bbaecdc9fd6c75f155322520f85953aa0e2724cab006d407"
- },
- "layers": [
- {
- "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
- "size": 727978,
- "digest": "sha256:90e01955edcd85dac7985b72a8374545eac617ccdddcc992b732e43cd42534af"
- }
- ]
-}
\ No newline at end of file
diff --git a/image/docker/schema2/test/unicode_manifest_config.json b/image/docker/schema2/test/unicode_manifest_config.json
deleted file mode 100644
index d7df096a2..000000000
--- a/image/docker/schema2/test/unicode_manifest_config.json
+++ /dev/null
@@ -1 +0,0 @@
-{"architecture":"amd64","author":"Sômé guy","config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"ArgsEscaped":true,"Image":"sha256:59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":[],"Labels":null},"container":"de786c5a14d0622c39dd9639abf60a4ee299ed0ee4ef3848342f46f13a77d2c8","container_config":{"Hostname":"de786c5a14d0","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ","MAINTAINER Sômé guy"],"ArgsEscaped":true,"Image":"sha256:59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":[],"Labels":{}},"created":"2018-12-17T19:02:18.9295865Z","docker_version":"17.09.0-ce","history":[{"created":"2018-10-02T17:19:34.03981888Z","created_by":"/bin/sh -c #(nop) ADD file:63eebd629a5f7558c361be0305df5f16baac1d3bbec014b7c486e28812441969 in / "},{"created":"2018-10-02T17:19:34.239926273Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]","empty_layer":true},{"created":"2018-12-17T19:02:18.9295865Z","author":"Sômé guy","created_by":"/bin/sh -c #(nop) MAINTAINER Sômé guy","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:8a788232037eaf17794408ff3df6b922a1aedf9ef8de36afdae3ed0b0381907b"]}}
\ No newline at end of file
diff --git a/image/docker/schemas.py b/image/docker/schemas.py
deleted file mode 100644
index ab0d952f9..000000000
--- a/image/docker/schemas.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from image.docker import ManifestException
-from image.docker.schema1 import DockerSchema1Manifest, DOCKER_SCHEMA1_CONTENT_TYPES
-from image.docker.schema2 import (DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
- DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE)
-from image.docker.schema2.manifest import DockerSchema2Manifest
-from image.docker.schema2.list import DockerSchema2ManifestList
-from util.bytes import Bytes
-
-
-def parse_manifest_from_bytes(manifest_bytes, media_type, validate=True):
- """ Parses and returns a manifest from the given bytes, for the given media type.
- Raises a ManifestException if the parse fails for some reason.
- """
- assert isinstance(manifest_bytes, Bytes)
-
- if media_type == DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE:
- return DockerSchema2Manifest(manifest_bytes)
-
- if media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE:
- return DockerSchema2ManifestList(manifest_bytes)
-
- if media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
- return DockerSchema1Manifest(manifest_bytes, validate=validate)
-
- raise ManifestException('Unknown or unsupported manifest media type `%s`' % media_type)
diff --git a/image/docker/schemautil.py b/image/docker/schemautil.py
deleted file mode 100644
index adfa021c7..000000000
--- a/image/docker/schemautil.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import json
-
-from image.docker.interfaces import ContentRetriever
-
-class ContentRetrieverForTesting(ContentRetriever):
- def __init__(self, digests=None):
- self.digests = digests or {}
-
- def add_digest(self, digest, content):
- self.digests[digest] = content
-
- def get_manifest_bytes_with_digest(self, digest):
- return self.digests.get(digest)
-
- def get_blob_bytes_with_digest(self, digest):
- return self.digests.get(digest)
-
- @classmethod
- def for_config(cls, config_obj, digest, size, ensure_ascii=True):
- config_str = json.dumps(config_obj, ensure_ascii=ensure_ascii)
- padded_string = config_str + ' ' * (size - len(config_str))
- digests = {}
- digests[digest] = padded_string
- return ContentRetrieverForTesting(digests)
-
-
-class _CustomEncoder(json.JSONEncoder):
- def encode(self, o):
- encoded = super(_CustomEncoder, self).encode(o)
- if isinstance(o, basestring):
- encoded = encoded.replace('<', '\\u003c')
- encoded = encoded.replace('>', '\\u003e')
- encoded = encoded.replace('&', '\\u0026')
- return encoded
-
-
-def to_canonical_json(value, ensure_ascii=True, indent=None):
- """ Returns the canonical JSON string form of the given value,
- as per the guidelines in https://github.com/docker/distribution/blob/master/docs/spec/json.md.
-
- `indent` is allowed only for the purposes of indenting for debugging.
- """
- return json.dumps(value, ensure_ascii=ensure_ascii, sort_keys=True, separators=(',', ':'),
- cls=_CustomEncoder, indent=indent)
diff --git a/image/docker/squashed.py b/image/docker/squashed.py
index a5f5f0c64..31370513e 100644
--- a/image/docker/squashed.py
+++ b/image/docker/squashed.py
@@ -28,29 +28,30 @@ class SquashedDockerImageFormatter(TarImageFormatter):
# daemon dies when trying to load the entire tar into memory.
SIZE_MULTIPLIER = 1.2
- def stream_generator(self, tag, parsed_manifest, synthetic_image_id, layer_iterator,
- tar_stream_getter_iterator, reporter=None):
+ def stream_generator(self, repo_image, tag, synthetic_image_id, get_image_iterator,
+ get_layer_iterator):
image_mtime = 0
- created = parsed_manifest.created_datetime
+ created = next(get_image_iterator()).v1_metadata.created
if created is not None:
image_mtime = calendar.timegm(created.utctimetuple())
+
# Docker import V1 Format (.tar):
# repositories - JSON file containing a repo -> tag -> image map
# {image ID folder}:
# json - The layer JSON
# layer.tar - The tarballed contents of the layer
# VERSION - The docker import version: '1.0'
- layer_merger = StreamLayerMerger(tar_stream_getter_iterator, reporter=reporter)
+ layer_merger = StreamLayerMerger(get_layer_iterator)
# Yield the repositories file:
synthetic_layer_info = {}
- synthetic_layer_info[tag.name + '.squash'] = synthetic_image_id
+ synthetic_layer_info[tag + '.squash'] = synthetic_image_id
hostname = app.config['SERVER_HOSTNAME']
repositories = {}
- namespace = tag.repository.namespace_name
- repository = tag.repository.name
+ namespace = repo_image.repository.namespace_name
+ repository = repo_image.repository.name
repositories[hostname + '/' + namespace + '/' + repository] = synthetic_layer_info
yield self.tar_file('repositories', json.dumps(repositories), mtime=image_mtime)
@@ -59,7 +60,7 @@ class SquashedDockerImageFormatter(TarImageFormatter):
yield self.tar_folder(synthetic_image_id, mtime=image_mtime)
# Yield the JSON layer data.
- layer_json = SquashedDockerImageFormatter._build_layer_json(parsed_manifest, synthetic_image_id)
+ layer_json = SquashedDockerImageFormatter._build_layer_json(repo_image, synthetic_image_id)
yield self.tar_file(synthetic_image_id + '/json', json.dumps(layer_json), mtime=image_mtime)
# Yield the VERSION file.
@@ -67,8 +68,16 @@ class SquashedDockerImageFormatter(TarImageFormatter):
# Yield the merged layer data's header.
estimated_file_size = 0
- for layer in layer_iterator:
- estimated_file_size += layer.estimated_size(SquashedDockerImageFormatter.SIZE_MULTIPLIER)
+ for image in get_image_iterator():
+ # In V1 we have the actual uncompressed size, which is needed for back compat with
+ # older versions of Docker.
+ # In V2, we use the size given in the image JSON.
+ if image.blob.uncompressed_size:
+ estimated_file_size += image.blob.uncompressed_size
+ else:
+ image_json = image.compat_metadata
+ estimated_file_size += (image_json.get('Size', 0) *
+ SquashedDockerImageFormatter.SIZE_MULTIPLIER)
# Make sure the estimated file size is an integer number of bytes.
estimated_file_size = int(math.ceil(estimated_file_size))
@@ -85,14 +94,7 @@ class SquashedDockerImageFormatter(TarImageFormatter):
# If the yielded size is more than the estimated size (which is unlikely but possible), then
# raise an exception since the tar header will be wrong.
if yielded_size > estimated_file_size:
- leaf_image_id = parsed_manifest.leaf_layer_v1_image_id
- message = "For %s/%s:%s (%s:%s): Expected %s bytes, found %s bytes" % (namespace,
- repository,
- tag,
- parsed_manifest.digest,
- leaf_image_id,
- estimated_file_size,
- yielded_size)
+ message = "Expected %s bytes, found %s bytes" % (estimated_file_size, yielded_size)
raise FileEstimationException(message)
# If the yielded size is less than the estimated size (which is likely), fill the rest with
@@ -113,8 +115,9 @@ class SquashedDockerImageFormatter(TarImageFormatter):
@staticmethod
- def _build_layer_json(manifest, synthetic_image_id):
- updated_json = json.loads(manifest.leaf_layer.raw_v1_metadata)
+ def _build_layer_json(repo_image, synthetic_image_id):
+ layer_json = repo_image.compat_metadata
+ updated_json = copy.deepcopy(layer_json)
updated_json['id'] = synthetic_image_id
if 'parent' in updated_json:
diff --git a/image/docker/test/__init__.py b/image/docker/test/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/image/docker/test/manifest_unencoded_unicode.json b/image/docker/test/manifest_unencoded_unicode.json
deleted file mode 100644
index 5b3110c27..000000000
--- a/image/docker/test/manifest_unencoded_unicode.json
+++ /dev/null
@@ -1,44 +0,0 @@
-{
- "schemaVersion": 1,
- "name": "devtable/testimage",
- "tag": "latest",
- "architecture": "amd64",
- "fsLayers": [
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:90e01955edcd85dac7985b72a8374545eac617ccdddcc992b732e43cd42534af"
- }
- ],
- "history": [
- {
- "v1Compatibility": "{\"architecture\":\"amd64\",\"author\":\"Sômé guy\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"ArgsEscaped\":true,\"Image\":\"sha256:59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":null},\"container\":\"de786c5a14d0622c39dd9639abf60a4ee299ed0ee4ef3848342f46f13a77d2c8\",\"container_config\":{\"Hostname\":\"de786c5a14d0\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"MAINTAINER Sômé guy\"],\"ArgsEscaped\":true,\"Image\":\"sha256:59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"created\":\"2018-12-17T19:02:18.9295865Z\",\"docker_version\":\"17.09.0-ce\",\"id\":\"b68e6d1f5027887177ddf83c2b9566e1f9eb38454af649b2c0806d13c4c2f01d\",\"os\":\"linux\",\"parent\":\"61b2663f44edc9a6af340b9bfd46d17d8ed2574ffe289e0d95c0476da3c6faac\",\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"61b2663f44edc9a6af340b9bfd46d17d8ed2574ffe289e0d95c0476da3c6faac\",\"parent\":\"5327db1e651c0f49157ace3ffd8569c7361b1f2e61d0b49ff617e83a42bf78d6\",\"created\":\"2018-10-02T17:19:34.239926273Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"sh\\\"]\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"5327db1e651c0f49157ace3ffd8569c7361b1f2e61d0b49ff617e83a42bf78d6\",\"created\":\"2018-10-02T17:19:34.03981888Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:63eebd629a5f7558c361be0305df5f16baac1d3bbec014b7c486e28812441969 in / \"]}}"
- }
- ],
- "signatures": [
- {
- "header": {
- "jwk": {
- "crv": "P-256",
- "kid": "AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX",
- "kty": "EC",
- "x": "34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY",
- "y": "LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8"
- },
- "alg": "ES256"
- },
- "signature": "eAhgOTAxmWLK25O5lfpJA9ZuTvEdm-E-8qS4pbaYkKwWq9Nc0iLmJ9tKy3QBWP0QtXmK8dz2J0CpCvV0xCheSw",
- "protected": "eyJmb3JtYXRMZW5ndGgiOjI2MTQsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0xMi0xN1QxOToxMDo1M1oifQ"
- }
- ]
-}
\ No newline at end of file
diff --git a/image/docker/test/manifest_unicode_row.json b/image/docker/test/manifest_unicode_row.json
deleted file mode 100644
index 9d6b663c0..000000000
--- a/image/docker/test/manifest_unicode_row.json
+++ /dev/null
@@ -1 +0,0 @@
-[{"id":"13080314","tag_id":"93362429","digest":"sha256:dde3714ce7e23edc6413aa85c0b42792e4f2f79e9ea36afc154d63ff3d04e86c","json_data":"{\n \"schemaVersion\": 1,\n \"name\": \"josephschorr\/buildtest2\",\n \"tag\": \"unicode\",\n \"architecture\": \"amd64\",\n \"fsLayers\": [\n {\n \"blobSum\": \"sha256:9dcda8e13dc6f3aa30ce7867d8a9e3941dc3a54cfefb5e76cbdfa90d2b56ed2f\"\n },\n {\n \"blobSum\": \"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4\"\n },\n {\n \"blobSum\": \"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4\"\n },\n {\n \"blobSum\": \"sha256:8c5a7da1afbc602695fcb2cd6445743cec5ff32053ea589ea9bd8773b7068185\"\n }\n ],\n \"history\": [\n {\n \"v1Compatibility\": \"{\\\"architecture\\\":\\\"amd64\\\",\\\"config\\\":{\\\"Hostname\\\":\\\"\\\",\\\"Domainname\\\":\\\"\\\",\\\"User\\\":\\\"\\\",\\\"AttachStdin\\\":false,\\\"AttachStdout\\\":false,\\\"AttachStderr\\\":false,\\\"Tty\\\":false,\\\"OpenStdin\\\":false,\\\"StdinOnce\\\":false,\\\"Env\\\":[\\\"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin\\\"],\\\"Cmd\\\":[\\\"sh\\\"],\\\"ArgsEscaped\\\":true,\\\"Image\\\":\\\"sha256:746d49e88c1eac6e3d3384d73db788f166a51b5a2eb9da49671586f62baf6c0c\\\",\\\"Volumes\\\":null,\\\"WorkingDir\\\":\\\"\\\",\\\"Entrypoint\\\":null,\\\"OnBuild\\\":[],\\\"Labels\\\":{\\\"maintainer\\\":\\\"Ge\u00e9 L\u00e9fleur\\\"}},\\\"container\\\":\\\"654ee2461cf64a54484624d8b7efbb76c5e197ba6f3322538b6810dad097c11f\\\",\\\"container_config\\\":{\\\"Hostname\\\":\\\"\\\",\\\"Domainname\\\":\\\"\\\",\\\"User\\\":\\\"\\\",\\\"AttachStdin\\\":false,\\\"AttachStdout\\\":false,\\\"AttachStderr\\\":false,\\\"Tty\\\":false,\\\"OpenStdin\\\":false,\\\"StdinOnce\\\":false,\\\"Env\\\":[\\\"PATH=\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin\\\"],\\\"Cmd\\\":[\\\"\/bin\/sh\\\",\\\"-c\\\",\\\"echo foo \\\\u003e bar\\\"],\\\"ArgsEscaped\\\":true,\\\"Image\\\":\\\"sha256:746d49e88c1eac6e3d3384d73db788f166a51b5a2eb9da49671586f62baf6c0c\\\",\\\"Volumes\\\":null,\\\"WorkingDir\\\":\\\"\\\",\\\"Entrypoint\\\":null,\\\"OnBuild\\\":[],\\\"Labels\\\":{\\\"maintainer\\\":\\\"Ge\u00e9 L\u00e9fleur\\\"}},\\\"created\\\":\\\"2018-08-14T22:17:55.7294283Z\\\",\\\"docker_version\\\":\\\"17.09.0-ce\\\",\\\"id\\\":\\\"db077d203993a3a1cfeaf4bbaedb34ff1a706452cb598c62d2873ba78dd0d2fe\\\",\\\"os\\\":\\\"linux\\\",\\\"parent\\\":\\\"539016dae3ce29f825af4d27a60b8d42306a86727f7406371682612124bc6db3\\\"}\"\n },\n {\n \"v1Compatibility\": \"{\\\"id\\\":\\\"539016dae3ce29f825af4d27a60b8d42306a86727f7406371682612124bc6db3\\\",\\\"parent\\\":\\\"5a1738daa8064e42d79a0b1f3d1b75ca4406c6695969860ff8e814999bda9470\\\",\\\"created\\\":\\\"2018-08-14T22:17:54.5902216Z\\\",\\\"container_config\\\":{\\\"Cmd\\\":[\\\"\/bin\/sh -c #(nop) LABEL maintainer=Ge\u00e9 L\u00e9fleur\\\"]},\\\"throwaway\\\":true}\"\n },\n {\n \"v1Compatibility\": \"{\\\"id\\\":\\\"5a1738daa8064e42d79a0b1f3d1b75ca4406c6695969860ff8e814999bda9470\\\",\\\"parent\\\":\\\"97d7c933c31fa951536cacfdfe3f862ce589020fa58bdf2fccc66204191a4273\\\",\\\"created\\\":\\\"2018-07-31T22:20:07.617575594Z\\\",\\\"container_config\\\":{\\\"Cmd\\\":[\\\"\/bin\/sh -c #(nop) CMD [\\\\\\\"sh\\\\\\\"]\\\"]},\\\"throwaway\\\":true}\"\n },\n {\n \"v1Compatibility\": \"{\\\"id\\\":\\\"97d7c933c31fa951536cacfdfe3f862ce589020fa58bdf2fccc66204191a4273\\\",\\\"created\\\":\\\"2018-07-31T22:20:07.361628468Z\\\",\\\"container_config\\\":{\\\"Cmd\\\":[\\\"\/bin\/sh -c #(nop) ADD file:96fda64a6b725d4df5249c12e32245e2f02469ff637c38077740f4984cd883dd in \/ \\\"]}}\"\n }\n ],\n \"signatures\": [\n {\n \"header\": {\n \"jwk\": {\n \"crv\": \"P-256\",\n \"kid\": \"AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX\",\n \"kty\": \"EC\",\n \"x\": \"34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY\",\n \"y\": \"LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8\"\n },\n \"alg\": \"ES256\"\n },\n \"signature\": \"XvA_yxSPZwnln-pl_VyT5HgfC_NRnVj2IDZjnPy4NRm99Ik82jjliZmoNL4g54AGe3CUD4i6eJiDdCgSCqjxQw\",\n \"protected\": \"eyJmb3JtYXRMZW5ndGgiOjMwODAsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0wOC0xNFQyMjoyNTo0M1oifQ\"\n }\n ]\n}"}]
diff --git a/image/docker/test/test_schema1.py b/image/docker/test/test_schema1.py
deleted file mode 100644
index 2766cdaac..000000000
--- a/image/docker/test/test_schema1.py
+++ /dev/null
@@ -1,311 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import os
-import json
-
-import pytest
-
-from app import docker_v2_signing_key
-from image.docker.schema1 import (MalformedSchema1Manifest, DockerSchema1Manifest,
- DockerSchema1ManifestBuilder)
-from util.bytes import Bytes
-
-
-@pytest.mark.parametrize('json_data', [
- '',
- '{}',
- """
- {
- "unknown": "key"
- }
- """,
-])
-def test_malformed_manifests(json_data):
- with pytest.raises(MalformedSchema1Manifest):
- DockerSchema1Manifest(Bytes.for_string_or_unicode(json_data))
-
-
-MANIFEST_BYTES = json.dumps({
- "name": 'hello-world',
- "tag": "latest",
- "architecture": "amd64",
- "fsLayers": [
- {
- "blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11"
- },
- {
- "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
- }
- ],
- "history": [
- {
- "v1Compatibility": "{\"id\":\"someid\", \"parent\": \"anotherid\"}"
- },
- {
- "v1Compatibility": "{\"id\":\"anotherid\"}"
- },
- ],
- "schemaVersion": 1,
- "signatures": [
- {
- "header": {
- "jwk": {
- "crv": "P-256",
- "kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4",
- "kty": "EC",
- "x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A",
- "y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010"
- },
- "alg": "ES256"
- },
- "signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg",
- "protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ"
- }
- ]
-})
-
-
-def test_valid_manifest():
- manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(MANIFEST_BYTES), validate=False)
- assert len(manifest.signatures) == 1
- assert manifest.namespace == ''
- assert manifest.repo_name == 'hello-world'
- assert manifest.tag == 'latest'
- assert manifest.image_ids == {'someid', 'anotherid'}
- assert manifest.parent_image_ids == {'anotherid'}
-
- assert len(manifest.layers) == 2
-
- assert manifest.layers[0].v1_metadata.image_id == 'anotherid'
- assert manifest.layers[0].v1_metadata.parent_image_id is None
-
- assert manifest.layers[1].v1_metadata.image_id == 'someid'
- assert manifest.layers[1].v1_metadata.parent_image_id == 'anotherid'
-
- assert manifest.layers[0].compressed_size is None
- assert manifest.layers[1].compressed_size is None
-
- assert manifest.leaf_layer == manifest.layers[1]
- assert manifest.created_datetime is None
-
- unsigned = manifest.unsigned()
- assert unsigned.namespace == manifest.namespace
- assert unsigned.repo_name == manifest.repo_name
- assert unsigned.tag == manifest.tag
- assert unsigned.layers == manifest.layers
- assert unsigned.blob_digests == manifest.blob_digests
- assert unsigned.digest != manifest.digest
-
- image_layers = list(manifest.get_layers(None))
- assert len(image_layers) == 2
- for index in range(0, 2):
- assert image_layers[index].layer_id == manifest.layers[index].v1_metadata.image_id
- assert image_layers[index].blob_digest == manifest.layers[index].digest
- assert image_layers[index].command == manifest.layers[index].v1_metadata.command
-
-
-def test_validate_manifest():
- test_dir = os.path.dirname(os.path.abspath(__file__))
- with open(os.path.join(test_dir, 'validated_manifest.json'), 'r') as f:
- manifest_bytes = f.read()
-
- manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
- digest = manifest.digest
- assert digest == 'sha256:b5dc4f63fdbd64f34f2314c0747ef81008f9fcddce4edfc3fd0e8ec8b358d571'
- assert manifest.created_datetime
-
-
-def test_validate_manifest_with_unicode():
- test_dir = os.path.dirname(os.path.abspath(__file__))
- with open(os.path.join(test_dir, 'validated_manifest_with_unicode.json'), 'r') as f:
- manifest_bytes = f.read()
-
- manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
- digest = manifest.digest
- assert digest == 'sha256:815ecf45716a96b19d54d911e6ace91f78bab26ca0dd299645d9995dacd9f1ef'
- assert manifest.created_datetime
-
-
-def test_validate_manifest_with_unicode_encoded():
- test_dir = os.path.dirname(os.path.abspath(__file__))
- with open(os.path.join(test_dir, 'manifest_unicode_row.json'), 'r') as f:
- manifest_bytes = json.loads(f.read())[0]['json_data']
-
- manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
- digest = manifest.digest
- assert digest == 'sha256:dde3714ce7e23edc6413aa85c0b42792e4f2f79e9ea36afc154d63ff3d04e86c'
- assert manifest.created_datetime
-
-
-def test_validate_manifest_with_unencoded_unicode():
- test_dir = os.path.dirname(os.path.abspath(__file__))
- with open(os.path.join(test_dir, 'manifest_unencoded_unicode.json'), 'r') as f:
- manifest_bytes = f.read()
-
- manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes))
- digest = manifest.digest
- assert digest == 'sha256:5d8a0f34744a39bf566ba430251adc0cc86587f86aed3ac2acfb897f349777bc'
- assert manifest.created_datetime
-
- layers = list(manifest.get_layers(None))
- assert layers[-1].author == u'Sômé guy'
-
-
-@pytest.mark.parametrize('with_key', [
- None,
- docker_v2_signing_key,
-])
-def test_build_unencoded_unicode_manifest(with_key):
- builder = DockerSchema1ManifestBuilder('somenamespace', 'somerepo', 'sometag')
- builder.add_layer('sha256:abcde', json.dumps({
- 'id': 'someid',
- 'author': u'Sômé guy',
- }, ensure_ascii=False))
-
- built = builder.build(with_key, ensure_ascii=False)
- built._validate()
-
-
-def test_validate_manifest_known_issue():
- test_dir = os.path.dirname(os.path.abspath(__file__))
- with open(os.path.join(test_dir, 'validate_manifest_known_issue.json'), 'r') as f:
- manifest_bytes = f.read()
-
- manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes))
- digest = manifest.digest
- assert digest == 'sha256:44518f5a4d1cb5b7a6347763116fb6e10f6a8563b6c40bb389a0a982f0a9f47a'
- assert manifest.created_datetime
-
- layers = list(manifest.get_layers(None))
- assert layers[-1].author is None
-
-
-@pytest.mark.parametrize('with_key', [
- None,
- docker_v2_signing_key,
-])
-def test_validate_manifest_with_emoji(with_key):
- builder = DockerSchema1ManifestBuilder('somenamespace', 'somerepo', 'sometag')
- builder.add_layer('sha256:abcde', json.dumps({
- 'id': 'someid',
- 'author': u'😱',
- }, ensure_ascii=False))
-
- built = builder.build(with_key, ensure_ascii=False)
- built._validate()
-
- # Ensure the manifest can be reloaded.
- built_bytes = built.bytes.as_encoded_str()
- DockerSchema1Manifest(Bytes.for_string_or_unicode(built_bytes))
-
-
-@pytest.mark.parametrize('with_key', [
- None,
- docker_v2_signing_key,
-])
-def test_validate_manifest_with_none_metadata_layer(with_key):
- builder = DockerSchema1ManifestBuilder('somenamespace', 'somerepo', 'sometag')
- builder.add_layer('sha256:abcde', None)
-
- built = builder.build(with_key, ensure_ascii=False)
- built._validate()
-
- # Ensure the manifest can be reloaded.
- built_bytes = built.bytes.as_encoded_str()
- DockerSchema1Manifest(Bytes.for_string_or_unicode(built_bytes))
-
-
-def test_build_with_metadata_removed():
- builder = DockerSchema1ManifestBuilder('somenamespace', 'somerepo', 'sometag')
- builder.add_layer('sha256:abcde', json.dumps({
- 'id': 'someid',
- 'parent': 'someid',
- 'author': u'😱',
- 'comment': 'hello world!',
- 'created': '1975-01-02 12:34',
- 'Size': 5678,
- 'container_config': {
- 'Cmd': 'foobar',
- 'more': 'stuff',
- 'goes': 'here',
- },
- }))
- builder.add_layer('sha256:abcde', json.dumps({
- 'id': 'anotherid',
- 'author': u'😱',
- 'created': '1985-02-03 12:34',
- 'Size': 1234,
- 'container_config': {
- 'Cmd': 'barbaz',
- 'more': 'stuff',
- 'goes': 'here',
- },
- }))
-
- built = builder.build(None)
- built._validate()
-
- assert built.leaf_layer_v1_image_id == 'someid'
-
- with_metadata_removed = builder.with_metadata_removed().build()
- with_metadata_removed._validate()
-
- built_layers = list(built.get_layers(None))
- with_metadata_removed_layers = list(with_metadata_removed.get_layers(None))
-
- assert len(built_layers) == len(with_metadata_removed_layers)
- for index, built_layer in enumerate(built_layers):
- with_metadata_removed_layer = with_metadata_removed_layers[index]
-
- assert built_layer.layer_id == with_metadata_removed_layer.layer_id
- assert built_layer.compressed_size == with_metadata_removed_layer.compressed_size
- assert built_layer.command == with_metadata_removed_layer.command
- assert built_layer.comment == with_metadata_removed_layer.comment
- assert built_layer.author == with_metadata_removed_layer.author
- assert built_layer.blob_digest == with_metadata_removed_layer.blob_digest
- assert built_layer.created_datetime == with_metadata_removed_layer.created_datetime
-
- assert built.leaf_layer_v1_image_id == with_metadata_removed.leaf_layer_v1_image_id
- assert built_layers[-1].layer_id == built.leaf_layer_v1_image_id
-
- assert (json.loads(built_layers[-1].internal_layer.raw_v1_metadata) ==
- json.loads(with_metadata_removed_layers[-1].internal_layer.raw_v1_metadata))
-
-
-def test_validate_manifest_without_metadata():
- test_dir = os.path.dirname(os.path.abspath(__file__))
- with open(os.path.join(test_dir, 'validated_manifest.json'), 'r') as f:
- manifest_bytes = f.read()
-
- manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes), validate=True)
- digest = manifest.digest
- assert digest == 'sha256:b5dc4f63fdbd64f34f2314c0747ef81008f9fcddce4edfc3fd0e8ec8b358d571'
- assert manifest.created_datetime
-
- with_metadata_removed = manifest._unsigned_builder().with_metadata_removed().build()
- assert with_metadata_removed.leaf_layer_v1_image_id == manifest.leaf_layer_v1_image_id
-
- manifest_layers = list(manifest.get_layers(None))
- with_metadata_removed_layers = list(with_metadata_removed.get_layers(None))
-
- assert len(manifest_layers) == len(with_metadata_removed_layers)
- for index, built_layer in enumerate(manifest_layers):
- with_metadata_removed_layer = with_metadata_removed_layers[index]
-
- assert built_layer.layer_id == with_metadata_removed_layer.layer_id
- assert built_layer.compressed_size == with_metadata_removed_layer.compressed_size
- assert built_layer.command == with_metadata_removed_layer.command
- assert built_layer.comment == with_metadata_removed_layer.comment
- assert built_layer.author == with_metadata_removed_layer.author
- assert built_layer.blob_digest == with_metadata_removed_layer.blob_digest
- assert built_layer.created_datetime == with_metadata_removed_layer.created_datetime
-
- assert with_metadata_removed.digest != manifest.digest
-
- assert with_metadata_removed.namespace == manifest.namespace
- assert with_metadata_removed.repo_name == manifest.repo_name
- assert with_metadata_removed.tag == manifest.tag
- assert with_metadata_removed.created_datetime == manifest.created_datetime
- assert with_metadata_removed.checksums == manifest.checksums
- assert with_metadata_removed.image_ids == manifest.image_ids
- assert with_metadata_removed.parent_image_ids == manifest.parent_image_ids
diff --git a/image/docker/test/test_schemas.py b/image/docker/test/test_schemas.py
deleted file mode 100644
index def881984..000000000
--- a/image/docker/test/test_schemas.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import pytest
-
-from image.docker.schemas import parse_manifest_from_bytes
-from image.docker.schema1 import DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE
-from image.docker.schema2 import DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE
-from image.docker.schema2 import DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE
-from image.docker.test.test_schema1 import MANIFEST_BYTES as SCHEMA1_BYTES
-from image.docker.schema2.test.test_list import MANIFESTLIST_BYTES
-from image.docker.schema2.test.test_manifest import MANIFEST_BYTES as SCHEMA2_BYTES
-from util.bytes import Bytes
-
-
-@pytest.mark.parametrize('media_type, manifest_bytes', [
- (DOCKER_SCHEMA1_SIGNED_MANIFEST_CONTENT_TYPE, SCHEMA1_BYTES),
- (DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE, SCHEMA2_BYTES),
- (DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE, MANIFESTLIST_BYTES),
-])
-def test_parse_manifest_from_bytes(media_type, manifest_bytes):
- assert parse_manifest_from_bytes(Bytes.for_string_or_unicode(manifest_bytes), media_type,
- validate=False)
diff --git a/image/docker/test/test_schemautil.py b/image/docker/test/test_schemautil.py
deleted file mode 100644
index 360a74bb7..000000000
--- a/image/docker/test/test_schemautil.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import pytest
-
-from image.docker.schemautil import to_canonical_json
-
-@pytest.mark.parametrize('input, expected_output', [
- pytest.param({}, '{}', id='empty object'),
- pytest.param({'b': 2, 'a': 1}, '{"a":1,"b":2}', id='object with sorted keys'),
- pytest.param('hello world', '"hello world"', id='basic string'),
- pytest.param('hey & hi', '"hey \\u0026 hi"', id='string with &'),
- pytest.param('', '"\\u003chey\\u003e"', id='string with brackets'),
- pytest.param({
- "zxcv": [{}, True, 1000000000, 'tyui'],
- "asdf": 1,
- "qwer": [],
- }, '{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]}', id='example canonical'),
-])
-def test_to_canonical_json(input, expected_output):
- result = to_canonical_json(input)
- assert result == expected_output
-
- # Ensure the result is utf-8.
- assert isinstance(result, str)
- result.decode('utf-8')
diff --git a/image/docker/test/validate_manifest_known_issue.json b/image/docker/test/validate_manifest_known_issue.json
deleted file mode 100644
index a54e99b61..000000000
--- a/image/docker/test/validate_manifest_known_issue.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
- "schemaVersion": 1,
- "name": "quaymonitor/monitortest2",
- "tag": "latest",
- "architecture": "x86_64",
- "fsLayers": [
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:184dc3db39b5e19dc39547f43db46ea48cd6cc779e806a3c8a5e5396acd20206"
- },
- {
- "blobSum": "sha256:db80bcab0e8b69656505332fcdff3ef2b9f664a2029d1b2f97224cffcf689afc"
- },
- {
- "blobSum": "sha256:184dc3db39b5e19dc39547f43db46ea48cd6cc779e806a3c8a5e5396acd20206"
- },
- {
- "blobSum": "sha256:f0a98344d604e54694fc6118cf7a0cbd10dc7b2e9be8607ba8c5bfd7ba3c1067"
- }
- ],
- "history": [
- {
- "v1Compatibility": "{\"architecture\":\"x86_64\",\"config\":{\"Hostname\":\"4c9181ab6b87\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"HOME=/\",\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\",\"echo\",\"\\\"2019-01-08 19:13:20 +0000\\\" \\u003e foo\"],\"Image\":\"quay.io/quay/busybox\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"container\":\"4c9181ab6b87fe75b5c0955c6c78983dec337914b05e65fb0073cce0ad076106\",\"container_config\":{\"Hostname\":\"4c9181ab6b87\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"HOME=/\",\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\",\"echo\",\"\\\"2019-01-08 19:13:20 +0000\\\" \\u003e foo\"],\"Image\":\"quay.io/quay/busybox\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"created\":\"2019-01-08T19:13:20.674196032Z\",\"docker_version\":\"18.06.1-ce\",\"id\":\"7da7c4e4bcb121915fb33eb5c76ffef194cdcc14608010692cfce5734bd84751\",\"os\":\"linux\",\"parent\":\"ec75e623647b299585bdb0991293bd446e5545e9a4dabf9d37922d5671d9d860\",\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"ec75e623647b299585bdb0991293bd446e5545e9a4dabf9d37922d5671d9d860\",\"parent\":\"f32bc6daa02c76f0b1773688684bf3bee719a69db06192432e6c28a238f4cf4a\",\"created\":\"2014-02-03T15:58:08.872585903Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [/bin/sh -c /bin/sh]\"]},\"author\":\"Jérôme Petazzoni \\u003cjerome@docker.com\\u003e\"}"
- },
- {
- "v1Compatibility": "{\"id\":\"f32bc6daa02c76f0b1773688684bf3bee719a69db06192432e6c28a238f4cf4a\",\"parent\":\"02feaf4fdc57dba2b142dae9d8dd0c90e710be710bea25ce63269e65d8f32872\",\"created\":\"2014-02-03T15:58:08.72383042Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD rootfs.tar in /\"]},\"author\":\"Jérôme Petazzoni \\u003cjerome@docker.com\\u003e\"}"
- },
- {
- "v1Compatibility": "{\"id\":\"02feaf4fdc57dba2b142dae9d8dd0c90e710be710bea25ce63269e65d8f32872\",\"parent\":\"f9a6e54178f312aa3686d7305b970e7d908d58b32e3f4554731b647e07b48fd2\",\"created\":\"2014-02-03T15:58:08.52236968Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) MAINTAINER Jérôme Petazzoni \\u003cjerome@docker.com\\u003e\"]},\"author\":\"Jérôme Petazzoni \\u003cjerome@docker.com\\u003e\"}"
- },
- {
- "v1Compatibility": "{\"id\":\"f9a6e54178f312aa3686d7305b970e7d908d58b32e3f4554731b647e07b48fd2\",\"comment\":\"Imported from -\",\"created\":\"2013-06-13T14:03:50.821769-07:00\",\"container_config\":{\"Cmd\":[\"\"]}}"
- }
- ],
- "signatures": [
- {
- "header": {
- "jwk": {
- "crv": "P-256",
- "kid": "XPAM:RVQE:4LWW:ABXI:QLLK:O2LK:XJ4V:UAOJ:WM24:ZG6J:UIJ3:JAYM",
- "kty": "EC",
- "x": "ijnW3d93SINE1y3GjNsCMYghAb7NT21vSiYK8pWdBkM",
- "y": "7t-mGjoYOhEIGVaCSEclLLkMgHz2S9WXkReZJEBx-_U"
- },
- "alg": "ES256"
- },
- "signature": "N9m-NNL8CdGwxEHHHaJDhbT5_FFKBSdyy-7lP4jnWG3AQmOWbPEXTFANTeH2CNPvAbaM9ZqQm0dQFQVnOe5GNQ",
- "protected": "eyJmb3JtYXRMZW5ndGgiOjM1OTgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOS0wMS0wOFQxOToxMzoyM1oifQ"
- }
- ]
-}
\ No newline at end of file
diff --git a/image/docker/test/validated_manifest.json b/image/docker/test/validated_manifest.json
deleted file mode 100644
index 8462f516e..000000000
--- a/image/docker/test/validated_manifest.json
+++ /dev/null
@@ -1,62 +0,0 @@
-{
- "schemaVersion": 1,
- "name": "josephschorr/buildtest2",
- "tag": "latest",
- "architecture": "amd64",
- "fsLayers": [
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:f0880d1639d2e72499fe0cfb218a98ca7aa3bffda6e0b808861505a1536cca10"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:8e3ba11ec2a2b39ab372c60c16b421536e50e5ce64a0bc81765c2e38381bcff6"
- }
- ],
- "history": [
- {
- "v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\"],\"ArgsEscaped\":true,\"Image\":\"sha256:ebd938adb98827e85616f288beb990fd9f07335305c3d77ff783253b97d84b99\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{\"foo\":\"bar\",\"this.is.cool\":\"{\\\"some\\\": \\\"json\\\"}\"}},\"container\":\"a06cd9c29efac778d1e670a2d26971cf21360f9c59eb250e771f5852ff9f49ca\",\"container_config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cat baz\"],\"ArgsEscaped\":true,\"Image\":\"sha256:ebd938adb98827e85616f288beb990fd9f07335305c3d77ff783253b97d84b99\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{\"foo\":\"bar\",\"this.is.cool\":\"{\\\"some\\\": \\\"json\\\"}\"}},\"created\":\"2018-08-13T19:19:53.358734989Z\",\"docker_version\":\"18.02.0-ce\",\"id\":\"08b0a1239a30dc9c12585c415538a3a43fab399a07cb341881b46e2fb69ae8f7\",\"os\":\"linux\",\"parent\":\"bc560233cb7ec4158c1858fd24fb093dc70a6fb7ad80b25f2a6f36a2138dd724\",\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"bc560233cb7ec4158c1858fd24fb093dc70a6fb7ad80b25f2a6f36a2138dd724\",\"parent\":\"cefdff8f1be4330d2e0414f598b0f38def3fb5c6a383d3b709162d51efe859b9\",\"created\":\"2018-08-13T19:19:52.919686159Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) LABEL this.is.cool={\\\"some\\\": \\\"json\\\"}\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"cefdff8f1be4330d2e0414f598b0f38def3fb5c6a383d3b709162d51efe859b9\",\"parent\":\"a86432a6eeb137d2342ee5ddcbc0dd32b5e58dfe3301dd09991147bb458ad6a9\",\"created\":\"2018-08-13T19:19:52.834827335Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) LABEL foo=bar\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"a86432a6eeb137d2342ee5ddcbc0dd32b5e58dfe3301dd09991147bb458ad6a9\",\"parent\":\"8b5fc1032bbcc570c28adc9b13525051c83bbf37ce305735f9c7be6e36ebff7d\",\"created\":\"2018-08-13T19:19:52.766315533Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) COPY file:9990e969595bc050f081c07b0bdf71524f3c46e6ffe8537c1778516c123f9f55 in baz \"]}}"
- },
- {
- "v1Compatibility": "{\"id\":\"8b5fc1032bbcc570c28adc9b13525051c83bbf37ce305735f9c7be6e36ebff7d\",\"parent\":\"f18ee96f0b1656cab52554b270f19e8df5046d307296d2146539c04565d67747\",\"created\":\"2018-07-06T14:14:06.393355914Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"/bin/sh\\\"]\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"f18ee96f0b1656cab52554b270f19e8df5046d307296d2146539c04565d67747\",\"created\":\"2018-07-06T14:14:06.165546783Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:25f61d70254b9807a40cd3e8d820f6a5ec0e1e596de04e325f6a33810393e95a in / \"]}}"
- }
- ],
- "signatures": [
- {
- "header": {
- "jwk": {
- "crv": "P-256",
- "kid": "H4QD:5X6G:2G7T:QXGN:EH3X:3UQU:REXP:7LAH:SGCZ:4FBI:EUSI:3P7Z",
- "kty": "EC",
- "x": "FowcV0YK1Dsn8FldhFJQJnxE247QUH43EchdZSmWrsQ",
- "y": "4uUZBA9U1jC-AxmNzrwb1r9Oh2SXNXE3yqSpz7pwoiI"
- },
- "alg": "ES256"
- },
- "signature": "rJNUkqKUZ2_d2JTWTLu4XWFcNpNIMDEH6qoiOie9o_BlD_Ifhrw31OIUT23eKa-HyVm5sYOfx4DY3N5Xy1kr9A",
- "protected": "eyJmb3JtYXRMZW5ndGgiOjQxMzksImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0wOC0xM1QxOToyMDowMVoifQ"
- }
- ]
-}
\ No newline at end of file
diff --git a/image/docker/test/validated_manifest_with_unicode.json b/image/docker/test/validated_manifest_with_unicode.json
deleted file mode 100644
index adc378492..000000000
--- a/image/docker/test/validated_manifest_with_unicode.json
+++ /dev/null
@@ -1,50 +0,0 @@
-{
- "schemaVersion": 1,
- "name": "devtable/simple",
- "tag": "unicode",
- "architecture": "amd64",
- "fsLayers": [
- {
- "blobSum": "sha256:9dcda8e13dc6f3aa30ce7867d8a9e3941dc3a54cfefb5e76cbdfa90d2b56ed2f"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
- },
- {
- "blobSum": "sha256:8c5a7da1afbc602695fcb2cd6445743cec5ff32053ea589ea9bd8773b7068185"
- }
- ],
- "history": [
- {
- "v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"ArgsEscaped\":true,\"Image\":\"sha256:746d49e88c1eac6e3d3384d73db788f166a51b5a2eb9da49671586f62baf6c0c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"maintainer\":\"Geé Léfleur\"}},\"container\":\"654ee2461cf64a54484624d8b7efbb76c5e197ba6f3322538b6810dad097c11f\",\"container_config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"echo foo \\u003e bar\"],\"ArgsEscaped\":true,\"Image\":\"sha256:746d49e88c1eac6e3d3384d73db788f166a51b5a2eb9da49671586f62baf6c0c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"maintainer\":\"Geé Léfleur\"}},\"created\":\"2018-08-14T22:17:55.7294283Z\",\"docker_version\":\"17.09.0-ce\",\"id\":\"db077d203993a3a1cfeaf4bbaedb34ff1a706452cb598c62d2873ba78dd0d2fe\",\"os\":\"linux\",\"parent\":\"539016dae3ce29f825af4d27a60b8d42306a86727f7406371682612124bc6db3\"}"
- },
- {
- "v1Compatibility": "{\"id\":\"539016dae3ce29f825af4d27a60b8d42306a86727f7406371682612124bc6db3\",\"parent\":\"5a1738daa8064e42d79a0b1f3d1b75ca4406c6695969860ff8e814999bda9470\",\"created\":\"2018-08-14T22:17:54.5902216Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) LABEL maintainer=Geé Léfleur\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"5a1738daa8064e42d79a0b1f3d1b75ca4406c6695969860ff8e814999bda9470\",\"parent\":\"97d7c933c31fa951536cacfdfe3f862ce589020fa58bdf2fccc66204191a4273\",\"created\":\"2018-07-31T22:20:07.617575594Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"sh\\\"]\"]},\"throwaway\":true}"
- },
- {
- "v1Compatibility": "{\"id\":\"97d7c933c31fa951536cacfdfe3f862ce589020fa58bdf2fccc66204191a4273\",\"created\":\"2018-07-31T22:20:07.361628468Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:96fda64a6b725d4df5249c12e32245e2f02469ff637c38077740f4984cd883dd in / \"]}}"
- }
- ],
- "signatures": [
- {
- "header": {
- "jwk": {
- "crv": "P-256",
- "kid": "AARA:PFUD:3V54:7F2S:2P7E:WMCU:WRE7:KUYD:CFKH:UHZ7:AZ4I:UQEX",
- "kty": "EC",
- "x": "34N4h_uM7FedPw4k3_VabKlt7qoBWpHgpko7zE0RkeY",
- "y": "LhxxtCYh_b1EwUbl3-tQFTbg1mTu34vMxj4UaKjWZk8"
- },
- "alg": "ES256"
- },
- "signature": "WCTPkAwHteVVjQCbY4GWRtoFJewKnZ9b0syTm72hi3n3Z_G30Gn5EDTU3adyXQx24aMzTFI_vryexeuypHv2Rw",
- "protected": "eyJmb3JtYXRMZW5ndGgiOjMwNzIsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0wOC0xNFQyMjoxOTozOFoifQ"
- }
- ]
-}
diff --git a/image/docker/types.py b/image/docker/types.py
deleted file mode 100644
index 69ca6d3be..000000000
--- a/image/docker/types.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from collections import namedtuple
-
-ManifestImageLayer = namedtuple('ManifestImageLayer', ['layer_id', 'compressed_size',
- 'is_remote', 'urls', 'command',
- 'blob_digest', 'created_datetime',
- 'author', 'comment',
- 'internal_layer'])
diff --git a/image/docker/v1.py b/image/docker/v1.py
index b6700cdad..b6df9f21a 100644
--- a/image/docker/v1.py
+++ b/image/docker/v1.py
@@ -9,7 +9,7 @@ from collections import namedtuple
class DockerV1Metadata(namedtuple('DockerV1Metadata',
['namespace_name', 'repo_name', 'image_id', 'checksum',
'content_checksum', 'created', 'comment', 'command',
- 'author', 'parent_image_id', 'compat_json'])):
+ 'parent_image_id', 'compat_json'])):
"""
DockerV1Metadata represents all of the metadata for a given Docker v1 Image.
The original form of the metadata is stored in the compat_json field.
diff --git a/initdb.py b/initdb.py
index 709157f00..0e92aec80 100644
--- a/initdb.py
+++ b/initdb.py
@@ -7,7 +7,8 @@ import os
import argparse
from datetime import datetime, timedelta, date
-from peewee import SqliteDatabase
+from peewee import (SqliteDatabase, create_model_tables, drop_model_tables, savepoint_sqlite,
+ savepoint)
from itertools import count
from uuid import UUID, uuid4
from threading import Event
@@ -18,21 +19,12 @@ from data.database import (db, all_models, Role, TeamRole, Visibility, LoginServ
ImageStorageTransformation, ImageStorageSignatureKind,
ExternalNotificationEvent, ExternalNotificationMethod, NotificationKind,
QuayRegion, QuayService, UserRegion, OAuthAuthorizationCode,
- ServiceKeyApprovalType, MediaType, LabelSourceType, UserPromptKind,
- RepositoryKind, User, DisableReason, DeletedNamespace, appr_classes,
- ApprTagKind, ApprBlobPlacementLocation, Repository, TagKind,
- ManifestChild, TagToRepositoryTag, get_epoch_timestamp_ms,
- RepoMirrorConfig, RepoMirrorRule, RepositoryState)
+ ServiceKeyApprovalType, MediaType, LabelSourceType, UserPromptKind)
from data import model
-from data.fields import Credential
-from data.logs_model import logs_model
from data.queue import WorkQueue
-from data.registry_model import registry_model
-from data.registry_model.registry_pre_oci_model import pre_oci_model
from app import app, storage as store, tf
from storage.basestorage import StoragePaths
-from image.docker.schema1 import DOCKER_SCHEMA1_CONTENT_TYPES
-from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES
+from endpoints.v2.manifest import _generate_and_store_manifest
from workers import repositoryactioncounter
@@ -139,27 +131,23 @@ def __create_subtree(with_storage, repo, structure, creator_username, parent, ta
if not isinstance(last_node_tags, list):
last_node_tags = [last_node_tags]
- repo_ref = registry_model.lookup_repository(repo.namespace_user.username, repo.name)
for tag_name in last_node_tags:
- adjusted_tag_name = tag_name
- now_ms = None
- if tag_name[0] == '#':
- adjusted_tag_name = tag_name[1:]
- now_ms = get_epoch_timestamp_ms() - 1000
-
- new_tag = model.tag.create_or_update_tag(repo.namespace_user.username, repo.name,
- adjusted_tag_name,
- new_image.docker_image_id,
- now_ms=now_ms)
-
+ new_tag = model.tag.create_or_update_tag(repo.namespace_user.username, repo.name, tag_name,
+ new_image.docker_image_id)
derived = model.image.find_or_create_derived_storage(new_tag, 'squash', 'local_us')
model.storage.find_or_create_storage_signature(derived, 'gpg2')
- tag = pre_oci_model.get_repo_tag(repo_ref, adjusted_tag_name)
- assert tag._db_id == new_tag.id
- assert pre_oci_model.backfill_manifest_for_tag(tag)
+ _generate_and_store_manifest(repo.namespace_user.username, repo.name, tag_name)
tag_map[tag_name] = new_tag
+ for tag_name in last_node_tags:
+ if tag_name[0] == '#':
+ found_tag = tag_map[tag_name]
+ found_tag.name = tag_name[1:]
+ found_tag.lifetime_end_ts = tag_map[tag_name[1:]].lifetime_start_ts
+ found_tag.lifetime_start_ts = found_tag.lifetime_end_ts - 10
+ found_tag.save()
+
for subtree in subtrees:
__create_subtree(with_storage, repo, subtree, creator_username, new_image, tag_map)
@@ -171,7 +159,8 @@ def __generate_service_key(kid, name, user, timestamp, approval_type, expiration
rotation_duration=rotation_duration)
if approval_type is not None:
- model.service_keys.approve_service_key(key.kid, approval_type, notes='The **test** approval')
+ model.service_keys.approve_service_key(key.kid, user, approval_type,
+ notes='The **test** approval')
key_metadata = {
'kid': kid,
@@ -182,15 +171,14 @@ def __generate_service_key(kid, name, user, timestamp, approval_type, expiration
'auto_approved': True
}
- logs_model.log_action('service_key_approve', None, performer=user,
- timestamp=timestamp, metadata=key_metadata)
+ model.log.log_action('service_key_approve', None, performer=user,
+ timestamp=timestamp, metadata=key_metadata)
- logs_model.log_action('service_key_create', None, performer=user,
- timestamp=timestamp, metadata=key_metadata)
+ model.log.log_action('service_key_create', None, performer=user,
+ timestamp=timestamp, metadata=key_metadata)
-def __generate_repository(with_storage, user_obj, name, description, is_public, permissions,
- structure):
+def __generate_repository(with_storage, user_obj, name, description, is_public, permissions, structure):
repo = model.repository.create_repository(user_obj.username, name, user_obj)
if is_public:
@@ -219,11 +207,8 @@ def finished_database_for_testing(testcase):
""" Called when a testcase has finished using the database, indicating that
any changes should be discarded.
"""
- testcases[testcase]['savepoint'].rollback()
testcases[testcase]['savepoint'].__exit__(True, None, None)
- testcases[testcase]['transaction'].__exit__(True, None, None)
-
def setup_database_for_testing(testcase, with_storage=False, force_rebuild=False):
""" Called when a testcase has started using the database, indicating that
the database should be setup (if not already) and a savepoint created.
@@ -254,20 +239,16 @@ def setup_database_for_testing(testcase, with_storage=False, force_rebuild=False
db_initialized_for_testing.set()
- # Initialize caches.
- Repository.kind.get_id('image')
-
# Create a savepoint for the testcase.
- testcases[testcase] = {}
- testcases[testcase]['transaction'] = db.transaction()
- testcases[testcase]['transaction'].__enter__()
+ test_savepoint = savepoint(db) if IS_TESTING_REAL_DATABASE else savepoint_sqlite(db)
- testcases[testcase]['savepoint'] = db.savepoint()
+ testcases[testcase] = {}
+ testcases[testcase]['savepoint'] = test_savepoint
testcases[testcase]['savepoint'].__enter__()
def initialize_database():
- db.create_tables(all_models)
+ create_model_tables(all_models)
Role.create(name='admin')
Role.create(name='write')
@@ -285,7 +266,6 @@ def initialize_database():
LoginService.create(name='jwtauthn')
LoginService.create(name='keystone')
LoginService.create(name='dex')
- LoginService.create(name='oidc')
BuildTriggerService.create(name='github')
BuildTriggerService.create(name='custom-git')
@@ -315,11 +295,9 @@ def initialize_database():
LogEntryKind.create(name='change_repo_permission')
LogEntryKind.create(name='delete_repo_permission')
LogEntryKind.create(name='change_repo_visibility')
- LogEntryKind.create(name='change_repo_trust')
LogEntryKind.create(name='add_repo_accesstoken')
LogEntryKind.create(name='delete_repo_accesstoken')
LogEntryKind.create(name='set_repo_description')
- LogEntryKind.create(name='change_repo_state')
LogEntryKind.create(name='build_dockerfile')
@@ -352,25 +330,11 @@ def initialize_database():
LogEntryKind.create(name='add_repo_notification')
LogEntryKind.create(name='delete_repo_notification')
- LogEntryKind.create(name='reset_repo_notification')
LogEntryKind.create(name='regenerate_robot_token')
LogEntryKind.create(name='repo_verb')
- LogEntryKind.create(name='repo_mirror_enabled')
- LogEntryKind.create(name='repo_mirror_disabled')
- LogEntryKind.create(name='repo_mirror_config_changed')
- LogEntryKind.create(name='repo_mirror_sync_started')
- LogEntryKind.create(name='repo_mirror_sync_failed')
- LogEntryKind.create(name='repo_mirror_sync_success')
- LogEntryKind.create(name='repo_mirror_sync_now_requested')
- LogEntryKind.create(name='repo_mirror_sync_tag_success')
- LogEntryKind.create(name='repo_mirror_sync_tag_failed')
- LogEntryKind.create(name='repo_mirror_sync_test_success')
- LogEntryKind.create(name='repo_mirror_sync_test_failed')
- LogEntryKind.create(name='repo_mirror_sync_test_started')
-
LogEntryKind.create(name='service_key_create')
LogEntryKind.create(name='service_key_approve')
LogEntryKind.create(name='service_key_delete')
@@ -383,18 +347,9 @@ def initialize_database():
LogEntryKind.create(name='manifest_label_add')
LogEntryKind.create(name='manifest_label_delete')
- LogEntryKind.create(name='change_tag_expiration')
- LogEntryKind.create(name='toggle_repo_trigger')
-
- LogEntryKind.create(name='create_app_specific_token')
- LogEntryKind.create(name='revoke_app_specific_token')
-
ImageStorageLocation.create(name='local_eu')
ImageStorageLocation.create(name='local_us')
- ApprBlobPlacementLocation.create(name='local_eu')
- ApprBlobPlacementLocation.create(name='local_us')
-
ImageStorageTransformation.create(name='squash')
ImageStorageTransformation.create(name='aci')
@@ -410,10 +365,6 @@ def initialize_database():
ExternalNotificationEvent.create(name='build_failure')
ExternalNotificationEvent.create(name='vulnerability_found')
- ExternalNotificationEvent.create(name='repo_mirror_sync_started')
- ExternalNotificationEvent.create(name='repo_mirror_sync_success')
- ExternalNotificationEvent.create(name='repo_mirror_sync_failed')
-
ExternalNotificationMethod.create(name='quay_notification')
ExternalNotificationMethod.create(name='email')
ExternalNotificationMethod.create(name='webhook')
@@ -437,10 +388,6 @@ def initialize_database():
NotificationKind.create(name='maintenance')
NotificationKind.create(name='org_team_invite')
- NotificationKind.create(name='repo_mirror_sync_started')
- NotificationKind.create(name='repo_mirror_sync_success')
- NotificationKind.create(name='repo_mirror_sync_failed')
-
NotificationKind.create(name='test_notification')
QuayRegion.create(name='us')
@@ -449,21 +396,6 @@ def initialize_database():
MediaType.create(name='text/plain')
MediaType.create(name='application/json')
MediaType.create(name='text/markdown')
- MediaType.create(name='application/vnd.cnr.blob.v0.tar+gzip')
- MediaType.create(name='application/vnd.cnr.package-manifest.helm.v0.json')
- MediaType.create(name='application/vnd.cnr.package-manifest.kpm.v0.json')
- MediaType.create(name='application/vnd.cnr.package-manifest.docker-compose.v0.json')
- MediaType.create(name='application/vnd.cnr.package.kpm.v0.tar+gzip')
- MediaType.create(name='application/vnd.cnr.package.helm.v0.tar+gzip')
- MediaType.create(name='application/vnd.cnr.package.docker-compose.v0.tar+gzip')
- MediaType.create(name='application/vnd.cnr.manifests.v0.json')
- MediaType.create(name='application/vnd.cnr.manifest.list.v0.json')
-
- for media_type in DOCKER_SCHEMA1_CONTENT_TYPES:
- MediaType.create(name=media_type)
-
- for media_type in DOCKER_SCHEMA2_CONTENT_TYPES:
- MediaType.create(name=media_type)
LabelSourceType.create(name='manifest')
LabelSourceType.create(name='api', mutable=True)
@@ -473,19 +405,6 @@ def initialize_database():
UserPromptKind.create(name='enter_name')
UserPromptKind.create(name='enter_company')
- RepositoryKind.create(name='image')
- RepositoryKind.create(name='application')
-
- ApprTagKind.create(name='tag')
- ApprTagKind.create(name='release')
- ApprTagKind.create(name='channel')
-
- DisableReason.create(name='user_toggled')
- DisableReason.create(name='successive_build_failures')
- DisableReason.create(name='successive_build_internal_errors')
-
- TagKind.create(name='tag')
-
def wipe_database():
logger.debug('Wiping all data from the DB.')
@@ -494,21 +413,12 @@ def wipe_database():
if not IS_TESTING_REAL_DATABASE and not isinstance(db.obj, SqliteDatabase):
raise RuntimeError('Attempted to wipe production database!')
- db.drop_tables(all_models)
+ drop_model_tables(all_models, fail_silently=True)
def populate_database(minimal=False, with_storage=False):
logger.debug('Populating the DB with test data.')
- # Check if the data already exists. If so, we skip. This can happen between calls from the
- # "old style" tests and the new py.test's.
- try:
- User.get(username='devtable')
- logger.debug('DB already populated')
- return
- except User.DoesNotExist:
- pass
-
# Note: databases set up with "real" schema (via Alembic) will not have these types
# type, so we it here it necessary.
try:
@@ -543,7 +453,6 @@ def populate_database(minimal=False, with_storage=False):
disabled_user.save()
dtrobot = model.user.create_robot('dtrobot', new_user_1)
- dtrobot2 = model.user.create_robot('dtrobot2', new_user_1)
new_user_2 = model.user.create_user('public', 'password', 'jacob.moshenko@gmail.com')
new_user_2.verified = True
@@ -553,7 +462,7 @@ def populate_database(minimal=False, with_storage=False):
new_user_3.verified = True
new_user_3.save()
- another_robot = model.user.create_robot('anotherrobot', new_user_3)
+ model.user.create_robot('anotherrobot', new_user_3)
new_user_4 = model.user.create_user('randomuser', 'password', 'no4@thanks.com')
new_user_4.verified = True
@@ -601,7 +510,6 @@ def populate_database(minimal=False, with_storage=False):
first_label = model.label.create_manifest_label(tag_manifest, 'foo', 'bar', 'manifest')
model.label.create_manifest_label(tag_manifest, 'foo', 'baz', 'api')
model.label.create_manifest_label(tag_manifest, 'anotherlabel', '1234', 'internal')
- model.label.create_manifest_label(tag_manifest, 'jsonlabel', '{"hey": "there"}', 'internal')
label_metadata = {
'key': 'foo',
@@ -610,9 +518,9 @@ def populate_database(minimal=False, with_storage=False):
'manifest_digest': tag_manifest.digest
}
- logs_model.log_action('manifest_label_add', new_user_1.username, performer=new_user_1,
- timestamp=datetime.now(), metadata=label_metadata,
- repository=tag_manifest.tag.repository)
+ model.log.log_action('manifest_label_add', new_user_1.username, performer=new_user_1,
+ timestamp=datetime.now(), metadata=label_metadata,
+ repository=tag_manifest.tag.repository)
model.blob.initiate_upload(new_user_1.username, simple_repo.name, str(uuid4()), 'local_us', {})
model.notification.create_repo_notification(simple_repo, 'repo_push', 'quay_notification', {}, {})
@@ -626,13 +534,13 @@ def populate_database(minimal=False, with_storage=False):
(1, [], None)], None)], None))
__generate_repository(with_storage, new_user_1, 'history', 'Historical repository.', False,
- [], (4, [(2, [], '#latest'), (3, [], 'latest')], None))
+ [], (4, [(2, [], 'latest'), (3, [], '#latest')], None))
__generate_repository(with_storage, new_user_1, 'complex',
'Complex repository with many branches and tags.',
False, [(new_user_2, 'read'), (dtrobot[0], 'read')],
(2, [(3, [], 'v2.0'),
- (1, [(1, [(2, [], ['prod'])],
+ (1, [(1, [(1, [], ['prod'])],
'staging'),
(1, [], None)], None)], None))
@@ -646,11 +554,6 @@ def populate_database(minimal=False, with_storage=False):
(1, [(1, [], 'v5.0'), (1, [], 'v6.0')], None)],
None))
- trusted_repo = __generate_repository(with_storage, new_user_1, 'trusted', 'Trusted repository.',
- False, [], (4, [], ['latest', 'prod']))
- trusted_repo.trust_enabled = True
- trusted_repo.save()
-
publicrepo = __generate_repository(with_storage, new_user_2, 'publicrepo',
'Public repository pullable by the world.', True,
[], (10, [], 'latest'))
@@ -665,14 +568,9 @@ def populate_database(minimal=False, with_storage=False):
[(new_user_2, 'write'), (reader, 'read')],
(5, [], 'latest'))
- __generate_repository(with_storage, new_user_1, 'text-full-repo',
- 'This is a repository for testing text search', False,
- [(new_user_2, 'write'), (reader, 'read')],
- (5, [], 'latest'))
-
building = __generate_repository(with_storage, new_user_1, 'building',
'Empty repository which is building.',
- False, [(new_user_2, 'write'), (reader, 'read')], (0, [], None))
+ False, [], (0, [], None))
new_token = model.token.create_access_token(building, 'write', 'build-worker')
@@ -681,8 +579,6 @@ def populate_database(minimal=False, with_storage=False):
trigger.config = json.dumps({
'build_source': 'jakedt/testconnect',
'subdir': '',
- 'dockerfile_path': 'Dockerfile',
- 'context': '/',
})
trigger.save()
@@ -733,12 +629,6 @@ def populate_database(minimal=False, with_storage=False):
liborg = model.organization.create_organization('library', 'quay+library@devtable.com', new_user_1)
liborg.save()
- titiorg = model.organization.create_organization('titi', 'quay+titi@devtable.com', new_user_1)
- titiorg.save()
-
- thirdorg = model.organization.create_organization('sellnsmall', 'quay+sell@devtable.com', new_user_1)
- thirdorg.save()
-
model.user.create_robot('coolrobot', org)
oauth_app_1 = model.oauth.create_application(org, 'Some Test App', 'http://localhost:8000',
@@ -749,13 +639,10 @@ def populate_database(minimal=False, with_storage=False):
'http://localhost:8000/o2c.html', client_id='deadpork',
description='This is another test application')
- model.oauth.create_access_token_for_testing(new_user_1, 'deadbeef', 'repo:admin',
- access_token='%s%s' % ('b' * 40, 'c' * 40))
+ model.oauth.create_access_token_for_testing(new_user_1, 'deadbeef', 'repo:admin')
- oauth_credential = Credential.from_string('dswfhasdf1')
- OAuthAuthorizationCode.create(application=oauth_app_1, code='Z932odswfhasdf1',
- scope='repo:admin', data='{"somejson": "goeshere"}',
- code_name='Z932odswfhasdf1Z932o', code_credential=oauth_credential)
+ OAuthAuthorizationCode.create(application=oauth_app_1, code='Z932odswfhasdf1', scope='repo:admin',
+ data='{"somejson": "goeshere"}')
model.user.create_robot('neworgrobot', org)
@@ -785,19 +672,6 @@ def populate_database(minimal=False, with_storage=False):
model.team.add_user_to_team(creatorbot, creators)
model.team.add_user_to_team(creatoruser, creators)
- sell_owners = model.team.get_organization_team('sellnsmall', 'owners')
- sell_owners.description = 'Owners have unfettered access across the entire org.'
- sell_owners.save()
-
- model.team.add_user_to_team(new_user_4, sell_owners)
-
- sync_config = {'group_dn': 'cn=Test-Group,ou=Users', 'group_id': 'somegroupid'}
- synced_team = model.team.create_team('synced', org, 'member', 'Some synced team.')
- model.team.set_team_syncing(synced_team, 'ldap', sync_config)
-
- another_synced_team = model.team.create_team('synced', thirdorg, 'member', 'Some synced team.')
- model.team.set_team_syncing(another_synced_team, 'ldap', {'group_dn': 'cn=Test-Group,ou=Users'})
-
__generate_repository(with_storage, new_user_1, 'superwide', None, False, [],
[(10, [], 'latest2'),
(2, [], 'latest3'),
@@ -816,28 +690,6 @@ def populate_database(minimal=False, with_storage=False):
(2, [], 'latest17'),
(2, [], 'latest18')])
-
- mirror_repo = __generate_repository(with_storage, new_user_1, 'mirrored', 'Mirrored repository.',
- False, [(dtrobot[0], 'write'), (dtrobot2[0], 'write')],
- (4, [], ['latest', 'prod']))
- mirror_rule = model.repo_mirror.create_mirroring_rule(mirror_repo, ['latest', '3.3*'])
- mirror_args = (mirror_repo, mirror_rule, dtrobot[0], 'quay.io/coreos/etcd', 60*60*24)
- mirror_kwargs = {
- 'external_registry_username': 'fakeusername',
- 'external_registry_password': 'fakepassword',
- 'external_registry_config': {},
- 'is_enabled': True,
- 'sync_start_date': datetime.utcnow()
- }
- mirror = model.repo_mirror.enable_mirroring_for_repository(*mirror_args, **mirror_kwargs)
-
-
- read_only_repo = __generate_repository(with_storage, new_user_1, 'readonly', 'Read-Only Repo.',
- False, [], (4, [], ['latest', 'prod']))
- read_only_repo.state = RepositoryState.READ_ONLY
- read_only_repo.save()
-
-
model.permission.add_prototype_permission(org, 'read', activating_user=new_user_1,
delegate_user=new_user_2)
model.permission.add_prototype_permission(org, 'read', activating_user=new_user_1,
@@ -845,8 +697,6 @@ def populate_database(minimal=False, with_storage=False):
model.permission.add_prototype_permission(org, 'write', activating_user=new_user_2,
delegate_user=new_user_1)
-
-
today = datetime.today()
week_ago = today - timedelta(6)
six_ago = today - timedelta(5)
@@ -885,75 +735,69 @@ def populate_database(minimal=False, with_storage=False):
key = model.service_keys.create_service_key('test_service_key', 'test_service_key', 'quay',
_TEST_JWK, {}, None)
- model.service_keys.approve_service_key(key.kid, ServiceKeyApprovalType.SUPERUSER,
+ model.service_keys.approve_service_key(key.kid, new_user_1, ServiceKeyApprovalType.SUPERUSER,
notes='Test service key for local/test registry testing')
- # Add an app specific token.
- token = model.appspecifictoken.create_token(new_user_1, 'some app')
- token.token_name = 'a' * 60
- token.token_secret = 'b' * 60
- token.save()
+ model.log.log_action('org_create_team', org.username, performer=new_user_1,
+ timestamp=week_ago, metadata={'team': 'readers'})
- logs_model.log_action('org_create_team', org.username, performer=new_user_1,
- timestamp=week_ago, metadata={'team': 'readers'})
+ model.log.log_action('org_set_team_role', org.username, performer=new_user_1,
+ timestamp=week_ago,
+ metadata={'team': 'readers', 'role': 'read'})
- logs_model.log_action('org_set_team_role', org.username, performer=new_user_1,
- timestamp=week_ago,
- metadata={'team': 'readers', 'role': 'read'})
+ model.log.log_action('create_repo', org.username, performer=new_user_1,
+ repository=org_repo, timestamp=week_ago,
+ metadata={'namespace': org.username, 'repo': 'orgrepo'})
- logs_model.log_action('create_repo', org.username, performer=new_user_1,
- repository=org_repo, timestamp=week_ago,
- metadata={'namespace': org.username, 'repo': 'orgrepo'})
+ model.log.log_action('change_repo_permission', org.username,
+ performer=new_user_2, repository=org_repo,
+ timestamp=six_ago,
+ metadata={'username': new_user_1.username,
+ 'repo': 'orgrepo', 'role': 'admin'})
- logs_model.log_action('change_repo_permission', org.username,
- performer=new_user_2, repository=org_repo,
- timestamp=six_ago,
- metadata={'username': new_user_1.username,
- 'repo': 'orgrepo', 'role': 'admin'})
+ model.log.log_action('change_repo_permission', org.username,
+ performer=new_user_1, repository=org_repo,
+ timestamp=six_ago,
+ metadata={'username': new_user_2.username,
+ 'repo': 'orgrepo', 'role': 'read'})
- logs_model.log_action('change_repo_permission', org.username,
- performer=new_user_1, repository=org_repo,
- timestamp=six_ago,
- metadata={'username': new_user_2.username,
- 'repo': 'orgrepo', 'role': 'read'})
+ model.log.log_action('add_repo_accesstoken', org.username, performer=new_user_1,
+ repository=org_repo, timestamp=four_ago,
+ metadata={'repo': 'orgrepo', 'token': 'deploytoken'})
- logs_model.log_action('add_repo_accesstoken', org.username, performer=new_user_1,
- repository=org_repo, timestamp=four_ago,
- metadata={'repo': 'orgrepo', 'token': 'deploytoken'})
+ model.log.log_action('push_repo', org.username, performer=new_user_2,
+ repository=org_repo, timestamp=today,
+ metadata={'username': new_user_2.username,
+ 'repo': 'orgrepo'})
- logs_model.log_action('push_repo', org.username, performer=new_user_2,
- repository=org_repo, timestamp=today,
- metadata={'username': new_user_2.username,
- 'repo': 'orgrepo'})
+ model.log.log_action('pull_repo', org.username, performer=new_user_2,
+ repository=org_repo, timestamp=today,
+ metadata={'username': new_user_2.username,
+ 'repo': 'orgrepo'})
- logs_model.log_action('pull_repo', org.username, performer=new_user_2,
- repository=org_repo, timestamp=today,
- metadata={'username': new_user_2.username,
- 'repo': 'orgrepo'})
+ model.log.log_action('pull_repo', org.username, repository=org_repo,
+ timestamp=today,
+ metadata={'token': 'sometoken', 'token_code': 'somecode',
+ 'repo': 'orgrepo'})
- logs_model.log_action('pull_repo', org.username, repository=org_repo,
- timestamp=today,
- metadata={'token': 'sometoken', 'token_code': 'somecode',
- 'repo': 'orgrepo'})
+ model.log.log_action('delete_tag', org.username, performer=new_user_2,
+ repository=org_repo, timestamp=today,
+ metadata={'username': new_user_2.username,
+ 'repo': 'orgrepo', 'tag': 'sometag'})
- logs_model.log_action('delete_tag', org.username, performer=new_user_2,
- repository=org_repo, timestamp=today,
- metadata={'username': new_user_2.username,
- 'repo': 'orgrepo', 'tag': 'sometag'})
+ model.log.log_action('pull_repo', org.username, repository=org_repo,
+ timestamp=today,
+ metadata={'token_code': 'somecode', 'repo': 'orgrepo'})
- logs_model.log_action('pull_repo', org.username, repository=org_repo,
- timestamp=today,
- metadata={'token_code': 'somecode', 'repo': 'orgrepo'})
+ model.log.log_action('pull_repo', new_user_2.username, repository=publicrepo,
+ timestamp=yesterday,
+ metadata={'token_code': 'somecode', 'repo': 'publicrepo'})
- logs_model.log_action('pull_repo', new_user_2.username, repository=publicrepo,
- timestamp=yesterday,
- metadata={'token_code': 'somecode', 'repo': 'publicrepo'})
-
- logs_model.log_action('build_dockerfile', new_user_1.username, repository=building,
- timestamp=today,
- metadata={'repo': 'building', 'namespace': new_user_1.username,
- 'trigger_id': trigger.uuid, 'config': json.loads(trigger.config),
- 'service': trigger.service.name})
+ model.log.log_action('build_dockerfile', new_user_1.username, repository=building,
+ timestamp=today,
+ metadata={'repo': 'building', 'namespace': new_user_1.username,
+ 'trigger_id': trigger.uuid, 'config': json.loads(trigger.config),
+ 'service': trigger.service.name})
model.message.create([{'content': 'We love you, Quay customers!', 'severity': 'info',
'media_type': 'text/plain'}])
@@ -966,32 +810,18 @@ def populate_database(minimal=False, with_storage=False):
model.user.create_user_prompt(new_user_4, 'confirm_username')
- while True:
- to_count = model.repositoryactioncount.find_uncounted_repository()
- if not to_count:
- break
-
- model.repositoryactioncount.count_repository_actions(to_count)
- model.repositoryactioncount.update_repository_score(to_count)
-
-
-WHITELISTED_EMPTY_MODELS = ['DeletedNamespace', 'LogEntry', 'LogEntry2', 'ManifestChild',
- 'NamespaceGeoRestriction', 'RepoMirrorConfig', 'RepoMirrorRule']
+ while repositoryactioncounter.count_repository_actions():
+ pass
def find_models_missing_data():
- # As a sanity check we are going to make sure that all db tables have some data, unless explicitly
- # whitelisted.
+ # As a sanity check we are going to make sure that all db tables have some data
models_missing_data = set()
for one_model in all_models:
- if one_model in appr_classes:
- continue
-
try:
one_model.select().get()
except one_model.DoesNotExist:
- if one_model.__name__ not in WHITELISTED_EMPTY_MODELS:
- models_missing_data.add(one_model.__name__)
+ models_missing_data.add(one_model.__name__)
return models_missing_data
@@ -1001,14 +831,13 @@ if __name__ == '__main__':
parser.add_argument('--simple', action='store_true')
args = parser.parse_args()
- log_level = os.environ.get('LOGGING_LEVEL', getattr(logging, app.config['LOGGING_LEVEL']))
+ log_level = getattr(logging, app.config['LOGGING_LEVEL'])
logging.basicConfig(level=log_level)
if not IS_TESTING_REAL_DATABASE and not isinstance(db.obj, SqliteDatabase):
raise RuntimeError('Attempted to initialize production database!')
- if os.environ.get('SKIP_DB_SCHEMA', '').lower() != 'true':
- initialize_database()
+ initialize_database()
populate_database(args.simple)
diff --git a/integration_tests/README.md b/integration_tests/README.md
deleted file mode 100644
index 3c9b3e019..000000000
--- a/integration_tests/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-Quay Integration Testing
-============================
-
-## Dependencies:
-
-1. [node.js](https://nodejs.org/) >= 8 & [yarn](https://yarnpkg.com/en/docs/install) >= 1.3.2
-2. Google Chrome/Chromium >= 60 (needs --headless flag) for integration tests
-
-### Install Dependencies
-
-To install the dependencies:
-```
-yarn install
-```
-You must run this command once, and every time the dependencies change. `node_modules` are not committed to git.
-
-## Integration Tests
-
-Integration tests are run in a headless Chrome driven by [protractor](http://www.protractortest.org/#/). Requirements include Chrome, a working Quay, podman.
-
-Setup (or any time you change node_modules - `yarn add` or `yarn install`)
-```
-cd integration_tests && yarn run webdriver-update
-```
-
-Run integration tests:
-```
-yarn run test-all
-```
-
-Run integration tests against a specific test suite:
-```
-yarn run test-suite --suite
-```
-Could check test suite list in package.json.
-
-### Required Environment Varaiable
-
-```
-export QUAY_APP_ADDRESS=
-export QUAY_INTERNAL_USERNAME=
-export QUAY_INTERNAL_PASSWORD=
-```
-
-## Supported Browsers
-
-Support the latest versions of the following browsers:
-
-- Edge
-- Chrome
-- Safari
-- Firefox
-
diff --git a/integration_tests/package.json b/integration_tests/package.json
deleted file mode 100644
index 25bb7c325..000000000
--- a/integration_tests/package.json
+++ /dev/null
@@ -1,132 +0,0 @@
-{
- "description": "Quay Integration Testing",
- "license": "UNLICENSED",
- "repository": {
- "type" : "git",
- "url" : "https://github.com/quay/quay.git",
- "directory": "integration_tests"
- },
- "homepage": "https://github.com/quay/quay/blob/master/integration_tests/README.md",
- "private": true,
- "scripts": {
- "lint": "eslint --ext .js,.jsx,.ts,.tsx --color .",
- "webdriver-update": "webdriver-manager update --gecko=false",
- "test-all-tap": "TAP=true yarn run test-gui",
- "test-all": "yarn run test-suite --suite all",
- "test-suite": "ts-node -O '{\"module\":\"commonjs\"}' ./node_modules/.bin/protractor protractor.conf.ts",
- "debug-test-suite": "TS_NODE_COMPILER_OPTIONS='{\"module\":\"commonjs\"}' node -r ts-node/register --inspect-brk ./node_modules/.bin/protractor protractor.conf.ts"
- },
- "dependencies": {
- "@patternfly/patternfly": "2.6.7",
- "@patternfly/react-charts": "^3.4.5",
- "@patternfly/react-core": "3.16.16",
- "brace": "0.11.x",
- "classnames": "2.x",
- "core-js": "2.x",
- "file-saver": "1.3.x",
- "font-awesome": "4.7.x",
- "fuzzysearch": "1.0.x",
- "history": "4.x",
- "immutable": "3.x",
- "js-base64": "^2.5.1",
- "js-yaml": "3.x",
- "lodash-es": "4.x",
- "memoize-one": "5.x",
- "murmurhash-js": "1.0.x",
- "openshift-logos-icon": "1.7.1",
- "patternfly": "^3.59.1",
- "patternfly-react": "2.32.3",
- "patternfly-react-extensions": "2.18.4",
- "plotly.js": "1.47.3",
- "prop-types": "15.6.x",
- "react": "16.8.6",
- "react-copy-to-clipboard": "5.x",
- "react-dnd": "^2.6.0",
- "react-dnd-html5-backend": "^2.6.0",
- "react-dom": "16.8.6",
- "react-helmet": "5.x",
- "react-jsonschema-form": "^1.0.4",
- "react-lightweight-tooltip": "1.x",
- "react-modal": "3.x",
- "react-redux": "5.x",
- "react-router-dom": "4.x",
- "react-tagsinput": "3.19.x",
- "react-transition-group": "2.3.x",
- "react-virtualized": "9.x",
- "redux": "4.0.1",
- "sanitize-html": "1.x",
- "screenfull": "4.x",
- "semver": "^6.0.0",
- "showdown": "1.8.x",
- "text-encoding": "0.x",
- "typesafe-actions": "^4.2.1",
- "url-polyfill": "^1.1.5",
- "url-search-params-polyfill": "2.x",
- "whatwg-fetch": "2.x",
- "xterm": "^3.12.2"
- },
- "devDependencies": {
- "@types/classnames": "^2.2.7",
- "@types/enzyme": "3.x",
- "@types/glob": "7.x",
- "@types/immutable": "3.x",
- "@types/jasmine": "2.8.x",
- "@types/jasminewd2": "2.0.x",
- "@types/lodash-es": "4.x",
- "@types/prop-types": "15.5.6",
- "@types/react": "16.8.13",
- "@types/react-dom": "16.8.4",
- "@types/react-helmet": "5.x",
- "@types/react-jsonschema-form": "^1.0.8",
- "@types/react-redux": "6.0.2",
- "@types/react-router-dom": "4.2.7",
- "@types/react-transition-group": "2.x",
- "@types/react-virtualized": "9.x",
- "@types/webpack": "4.x",
- "@typescript-eslint/eslint-plugin": "^1.7.0",
- "@typescript-eslint/parser": "^1.7.0",
- "bootstrap-sass": "^3.3.7",
- "cache-loader": "1.x",
- "chalk": "2.3.x",
- "chromedriver": "^2.43.3",
- "circular-dependency-plugin": "5.0.2",
- "css-loader": "0.28.x",
- "enzyme": "^3.9.0",
- "enzyme-adapter-react-16": "1.12.1",
- "eslint": "5.16.0",
- "eslint-plugin-import": "2.x",
- "eslint-plugin-react": "7.x",
- "eslint-plugin-react-hooks": "^1.6.0",
- "file-loader": "1.x",
- "fork-ts-checker-webpack-plugin": "0.x",
- "glob": "7.x",
- "glslify-loader": "1.x",
- "html-webpack-plugin": "3.x",
- "jasmine-console-reporter": "2.x",
- "jasmine-core": "2.x",
- "jasmine-reporters": "2.x",
- "jest": "21.x",
- "jest-cli": "21.x",
- "mini-css-extract-plugin": "0.4.x",
- "node-sass": "4.8.x",
- "protractor": "5.4.x",
- "protractor-fail-fast": "3.x",
- "protractor-jasmine2-screenshot-reporter": "0.5.x",
- "read-pkg": "5.x",
- "resolve-url-loader": "2.x",
- "sass-loader": "6.x",
- "thread-loader": "1.x",
- "ts-jest": "21.x",
- "ts-loader": "5.3.3",
- "ts-node": "5.x",
- "typescript": "3.4.4",
- "webpack": "4.29.6",
- "webpack-bundle-analyzer": "2.x",
- "webpack-cli": "^2.0.12",
- "webpack-virtual-modules": "^0.1.10"
- },
- "engines": {
- "node": ">=8.x"
- }
-}
-
diff --git a/integration_tests/protractor.conf.ts b/integration_tests/protractor.conf.ts
deleted file mode 100644
index 0e758ad0e..000000000
--- a/integration_tests/protractor.conf.ts
+++ /dev/null
@@ -1,139 +0,0 @@
-import { Config, browser, logging } from 'protractor';
-import { execSync } from 'child_process';
-import * as HtmlScreenshotReporter from 'protractor-jasmine2-screenshot-reporter';
-import * as _ from 'lodash';
-import { TapReporter, JUnitXmlReporter } from 'jasmine-reporters';
-import * as ConsoleReporter from 'jasmine-console-reporter';
-import * as failFast from 'protractor-fail-fast';
-import { createWriteStream } from 'fs';
-import { format } from 'util';
-
-const tap = !!process.env.TAP;
-
-export const BROWSER_TIMEOUT = 15000;
-export const appHost = `${process.env.QUAY_APP_ADDRESS}${(process.env.QUAY_BASE_PATH || '/').replace(/\/$/, '')}`;
-export const testName = `test-${Math.random().toString(36).replace(/[^a-z]+/g, '').substr(0, 5)}`;
-
-const htmlReporter = new HtmlScreenshotReporter({ dest: './gui_test_screenshots', inlineImages: true, captureOnlyFailedSpecs: true, filename: 'test-gui-report.html' });
-const junitReporter = new JUnitXmlReporter({ savePath: './gui_test_screenshots', consolidateAll: true });
-const browserLogs: logging.Entry[] = [];
-
-//const suite = (tests: string[]) => (!_.isNil(process.env.BRIDGE_KUBEADMIN_PASSWORD) ? ['tests/login.scenario.ts'] : []).concat(['tests/base.scenario.ts', ...tests]);
-const suite = (tests: string[]) => (['tests/base.scenario.ts', ...tests]);
-
-
-export const config: Config = {
- framework: 'jasmine',
- directConnect: true,
- skipSourceMapSupport: true,
- jasmineNodeOpts: {
- print: () => null,
- defaultTimeoutInterval: 40000,
- },
- logLevel: tap ? 'ERROR' : 'INFO',
- plugins: [failFast.init()],
- capabilities: {
- browserName: 'chrome',
- acceptInsecureCerts: true,
- chromeOptions: {
- args: [
- '--disable-gpu',
-// '--headless',
- '--no-sandbox',
- '--window-size=1920,1200',
- '--disable-background-timer-throttling',
- '--disable-renderer-backgrounding',
- '--disable-raf-throttling',
- // Avoid crashes when running in a container due to small /dev/shm size
- // https://bugs.chromium.org/p/chromium/issues/detail?id=715363
- '--disable-dev-shm-usage',
- ],
- prefs: {
- 'profile.password_manager_enabled': false,
- 'credentials_enable_service': false,
- 'password_manager_enabled': false,
- },
- },
-/**
- * 'browserName': 'firefox',
- * 'moz:firefoxOptions': {
- * 'args': ['--safe-mode']
- * }
- */
- },
- beforeLaunch: () => new Promise(resolve => htmlReporter.beforeLaunch(resolve)),
- onPrepare: () => {
- browser.waitForAngularEnabled(false);
- jasmine.getEnv().addReporter(htmlReporter);
- jasmine.getEnv().addReporter(junitReporter);
- if (tap) {
- jasmine.getEnv().addReporter(new TapReporter());
- } else {
- jasmine.getEnv().addReporter(new ConsoleReporter());
- }
- },
- onComplete: async() => {
- const consoleLogStream = createWriteStream('gui_test_screenshots/browser.log', { flags: 'a' });
- browserLogs.forEach(log => {
- const { level, message } = log;
- const messageStr = _.isArray(message) ? message.join(' ') : message;
- consoleLogStream.write(`${format.apply(null, [`[${level.name}]`, messageStr])}\n`);
- });
-
- const url = await browser.getCurrentUrl();
- console.log('Last browser URL: ', url);
-
- await browser.close();
-
- // should add a step to clean up organizations and repos created during
- // testing
-// execSync(`if kubectl get ${resource} ${testName} 2> /dev/null; then kubectl delete ${resource} ${testName}; fi`);
- },
- onCleanUp: (exitCode) => {
- return console.log('Cleaning up completed');
- },
- afterLaunch: (exitCode) => {
- failFast.clean();
- return new Promise(resolve => htmlReporter.afterLaunch(resolve.bind(this, exitCode)));
- },
- suites: {
- filter: suite([
- 'tests/filter.scenario.ts',
- ]),
- login: [
- 'tests/login.scenario.ts',
- ],
- quaylogin: [
- 'tests/quay-login.scenario.ts',
- ],
- },
-};
-
-export const checkLogs = async() => (await browser.manage().logs().get('browser'))
- .map(log => {
- browserLogs.push(log);
- return log;
- });
-
-function hasError() {
- return window.windowError;
-}
-export const checkErrors = async() => await browser.executeScript(hasError).then(err => {
- if (err) {
- fail(`omg js error: ${err}`);
- }
-});
-
-export const waitForCount = (elementArrayFinder, expectedCount) => {
- return async() => {
- const actualCount = await elementArrayFinder.count();
- return expectedCount >= actualCount;
- };
-};
-
-export const waitForNone = (elementArrayFinder) => {
- return async() => {
- const count = await elementArrayFinder.count();
- return count === 0;
- };
-};
diff --git a/integration_tests/tests/quay-login.scenario.ts b/integration_tests/tests/quay-login.scenario.ts
deleted file mode 100644
index 143d41535..000000000
--- a/integration_tests/tests/quay-login.scenario.ts
+++ /dev/null
@@ -1,49 +0,0 @@
-//import { $, browser, ExpectedConditions as until } from 'protractor';
-import { $, browser } from 'protractor';
-
-import { appHost } from '../protractor.conf';
-import * as loginView from '../views/quay-login.view';
-//import * as sidenavView from '../views/sidenav.view';
-//import * as clusterSettingsView from '../views/cluster-settings.view';
-
-const JASMINE_DEFAULT_TIMEOUT_INTERVAL = jasmine.DEFAULT_TIMEOUT_INTERVAL;
-const JASMINE_EXTENDED_TIMEOUT_INTERVAL = 1000 * 60 * 3;
-//const KUBEADMIN_IDP = 'kube:admin';
-//const KUBEADMIN_USERNAME = 'kubeadmin';
-const {
-// BRIDGE_HTPASSWD_IDP = 'test',
- QUAY_INTERNAL_USERNAME = 'test',
- QUAY_INTERNAL_PASSWORD = 'test',
-// BRIDGE_KUBEADMIN_PASSWORD,
-} = process.env;
-
-describe('Auth test', () => {
- beforeAll(async() => {
- await browser.get(appHost);
- await browser.sleep(3000); // Wait long enough for the login redirect to complete
- });
-
- describe('Login test', async() => {
- beforeAll(() => {
- // Extend the default jasmine timeout interval just in case it takes a while for the htpasswd idp to be ready
- jasmine.DEFAULT_TIMEOUT_INTERVAL = JASMINE_EXTENDED_TIMEOUT_INTERVAL;
- });
-
- afterAll(() => {
- // Set jasmine timeout interval back to the original value after these tests are done
- jasmine.DEFAULT_TIMEOUT_INTERVAL = JASMINE_DEFAULT_TIMEOUT_INTERVAL;
- });
-
- it('logs in', async() => {
- await loginView.login(QUAY_INTERNAL_USERNAME, QUAY_INTERNAL_PASSWORD);
- expect(browser.getCurrentUrl()).toContain(appHost);
- expect(loginView.userDropdown.getText()).toContain(QUAY_INTERNAL_USERNAME);
- });
-
- it('logs out', async() => {
- await loginView.logout();
- expect(browser.getCurrentUrl()).toContain('repository');
- expect($('.user-view').isPresent()).toBeTruthy();
- });
- });
-});
diff --git a/integration_tests/tsconfig.json b/integration_tests/tsconfig.json
deleted file mode 100644
index 75be14031..000000000
--- a/integration_tests/tsconfig.json
+++ /dev/null
@@ -1,26 +0,0 @@
-{
- "compilerOptions": {
- "baseUrl": ".",
- "module": "esnext",
- "moduleResolution": "node",
- "outDir": "./build/",
- "target": "es5",
- "lib": ["dom", "es2015", "es2016.array.include", "es2017.string"],
- "jsx": "react",
- "allowJs": true,
- "downlevelIteration": true,
- "experimentalDecorators": true,
- "sourceMap": true,
- "noUnusedLocals": true
- },
- "exclude": [
- ".yarn",
- "**/node_modules"
- ],
- "include": [
- "**/*.js",
- "**/*.jsx",
- "**/*.ts",
- "**/*.tsx"
- ]
-}
diff --git a/integration_tests/views/quay-login.view.ts b/integration_tests/views/quay-login.view.ts
deleted file mode 100644
index 02202e098..000000000
--- a/integration_tests/views/quay-login.view.ts
+++ /dev/null
@@ -1,28 +0,0 @@
-import { $, browser, ExpectedConditions as until, by, element } from 'protractor';
-//import { appHost } from '../protractor.conf';
-
-export const nameInput = $('#signin-username'); // signin-username
-export const passwordInput = $('#signin-password'); // signin-password
-//export const submitButton = $('button[type=submit]');
-export const submitButton = element(by.partialButtonText('Sign in to')); //.$('button[type=submit]');
-export const logOutLink = element(by.linkText('Sign out all sessions'));
-export const userDropdown = $('.dropdown-toggle.user-dropdown.user-view'); //$('[data-toggle=dropdown] .pf-c-dropdown__toggle');
-
-export const login = async(username: string, password: string) => {
-/* if (providerName) {
- await selectProvider(providerName);
-} */
- await browser.wait(until.visibilityOf(nameInput));
- await nameInput.sendKeys(username);
- await passwordInput.sendKeys(password);
- await submitButton.click();
- await browser.wait(until.presenceOf(userDropdown));
-};
-
-export const logout = async() => {
- await browser.wait(until.presenceOf(userDropdown));
- await userDropdown.click();
- await browser.wait(until.presenceOf(logOutLink));
- await logOutLink.click();
- await browser.wait(until.presenceOf($('.user-view')));
-};
diff --git a/karma.conf.js b/karma.conf.js
index 22b081f0c..3a917a709 100644
--- a/karma.conf.js
+++ b/karma.conf.js
@@ -1,7 +1,4 @@
-var webpackConfig = require('./webpack.config');
-
-
-module.exports = function(config) {
+module.exports = function (config) {
config.set({
basePath: '',
frameworks: ['jasmine'],
@@ -24,25 +21,23 @@ module.exports = function(config) {
'node_modules/raven-js/dist/raven.js',
'node_modules/cal-heatmap/cal-heatmap.js',
- // Polyfills
- 'node_modules/core-js/index.js',
-
// static/lib resources
'static/lib/**/*.js',
- // Single entrypoint for all tests
- 'static/test/test-index.ts',
+ // Application resources
+ 'static/js/**/*.js',
- // Tests utils
+ // Tests
'static/test/**/*.js',
],
- exclude: [],
+ exclude: [
+ 'static/js/build/bundle.js',
+ ],
preprocessors: {
+ 'static/lib/ngReact/react.ngReact.min.js': ['webpack'],
'static/lib/angular-moment.min.js': ['webpack'],
- 'node_modules/core-js/index.js': ['webpack'],
- 'static/test/test-index.ts': ['webpack'],
},
- webpack: webpackConfig,
+ webpack: {},
webpackMiddleware: {
stats: 'errors-only'
},
@@ -58,17 +53,8 @@ module.exports = function(config) {
colors: true,
logLevel: config.LOG_INFO,
autoWatch: true,
- browsers: ['ChromeNoSandbox'],
- customLaunchers: {
- ChromeNoSandbox: {
- base: 'ChromeHeadless',
- flags: ['--no-sandbox']
- }
- },
+ browsers: ['PhantomJS', 'Chrome'],
singleRun: false,
- concurrency: Infinity,
- mime: {
- 'text/x-typescript': ['ts','tsx']
- }
+ concurrency: Infinity
});
};
diff --git a/local-config-app.sh b/local-config-app.sh
deleted file mode 100755
index 9c6192200..000000000
--- a/local-config-app.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-cat << "EOF"
- __ __
- / \ / \ ______ _ _ __ __ __ _____ ____ _ _ _____ _____ _____
- / /\ / /\ \ / __ \ | | | | / \ \ \ / / / ____| / __ \ | \ | | | ___| |_ _| / ____|
-/ / / / \ \ | | | | | | | | / /\ \ \ / | | | | | | | \| | | |__ | | | | _
-\ \ \ \ / / | |__| | | |__| | / ____ \ | | | |____ | |__| | | . ` | | __| _| |_ | |__| |
- \ \/ \ \/ / \_ ___/ \____/ /_/ \_\ |_| \_____| \____/ |_| \_| |_| |_____| \_____|
- \__/ \__/ \ \__
- \___\ by Red Hat
-
- Build, Store, and Distribute your Containers
-
-
-EOF
-
-goreman -basedir "config_app" start
diff --git a/local-run.sh b/local-run.sh
index 4f4ee7d98..84679d83d 100755
--- a/local-run.sh
+++ b/local-run.sh
@@ -1,16 +1 @@
-cat << "EOF"
- __ __
- / \ / \ ______ _ _ __ __ __
- / /\ / /\ \ / __ \ | | | | / \ \ \ / /
-/ / / / \ \ | | | | | | | | / /\ \ \ /
-\ \ \ \ / / | |__| | | |__| | / ____ \ | |
- \ \/ \ \/ / \_ ___/ \____/ /_/ \_\ |_|
- \__/ \__/ \ \__
- \___\ by Red Hat
-
- Build, Store, and Distribute your Containers
-
-
-EOF
-
goreman start
diff --git a/local-test.sh b/local-test.sh
new file mode 100755
index 000000000..b7da1c3f3
--- /dev/null
+++ b/local-test.sh
@@ -0,0 +1,8 @@
+set -e
+
+export TEST=true
+export TROLLIUSDEBUG=1
+
+python -m unittest discover -f
+python -m test.registry_tests -f
+#python -m test.queue_threads -f
diff --git a/loghandler.py b/loghandler.py
deleted file mode 100755
index d3d9948cb..000000000
--- a/loghandler.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from __future__ import absolute_import
-
-import datetime
-import json
-import logging
-import re
-import traceback
-
-
-LOG_FORMAT_REGEXP = re.compile(r'\((.+?)\)', re.IGNORECASE)
-
-
-def _json_default(obj):
- """
- Coerce everything to strings.
- All objects representing time get output as ISO8601.
- """
- if isinstance(obj, (datetime.date, datetime.time, datetime.datetime)):
- return obj.isoformat()
-
- elif isinstance(obj, Exception):
- return "Exception: %s" % str(obj)
-
- return str(obj)
-
-
-# skip natural LogRecord attributes
-# http://docs.python.org/library/logging.html#logrecord-attributes
-RESERVED_ATTRS = set([
- 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename', 'funcName', 'levelname',
- 'levelno', 'lineno', 'module', 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
- 'processName', 'relativeCreated', 'stack_info', 'thread', 'threadName'
-])
-
-
-class JsonFormatter(logging.Formatter):
- """
- A custom formatter to format logging records as json strings.
- extra values will be formatted as str() if nor supported by
- json default encoder
- """
-
- def __init__(self, *args, **kwargs):
- """
- :param json_default: a function for encoding non-standard objects
- as outlined in http://docs.python.org/2/library/json.html
- :param json_encoder: optional custom encoder
- :param json_serializer: a :meth:`json.dumps`-compatible callable
- that will be used to serialize the log record.
- :param prefix: an optional key prefix to nest logs
- """
- self.json_default = kwargs.pop("json_default", _json_default)
- self.json_encoder = kwargs.pop("json_encoder", None)
- self.json_serializer = kwargs.pop("json_serializer", json.dumps)
- self.default_values = kwargs.pop("default_extra", {})
- self.prefix_key = kwargs.pop("prefix_key", "data")
-
- logging.Formatter.__init__(self, *args, **kwargs)
-
- self._fmt_parameters = self._parse_format_string()
- self._skip_fields = set(self._fmt_parameters)
- self._skip_fields.update(RESERVED_ATTRS)
-
- def _parse_format_string(self):
- """Parses format string looking for substitutions"""
- standard_formatters = LOG_FORMAT_REGEXP
- return standard_formatters.findall(self._fmt)
-
- def add_fields(self, log_record, record, message_dict):
- """
- Override this method to implement custom logic for adding fields.
- """
-
- target = log_record
- if self.prefix_key:
- log_record[self.prefix_key] = {}
- target = log_record[self.prefix_key]
-
- for field, value in record.__dict__.iteritems():
- if field in self._fmt_parameters and field in RESERVED_ATTRS:
- log_record[field] = value
- elif field not in RESERVED_ATTRS:
- target[field] = value
-
- target.update(message_dict)
- target.update(self.default_values)
-
- def format(self, record):
- """Formats a log record and serializes to json"""
- message_dict = {}
- if isinstance(record.msg, dict):
- message_dict = record.msg
- record.message = None
- if "message" in message_dict:
- record.message = message_dict.pop("message", "")
- else:
- record.message = record.getMessage()
-
- # only format time if needed
- if "asctime" in self._fmt_parameters:
- record.asctime = self.formatTime(record, self.datefmt)
-
- # Display formatted exception, but allow overriding it in the
- # user-supplied dict.
- if record.exc_info and not message_dict.get('exc_info'):
- message_dict['exc_info'] = traceback.format_list(traceback.extract_tb(record.exc_info[2]))
- log_record = {}
-
- self.add_fields(log_record, record, message_dict)
-
- return self.json_serializer(log_record, default=self.json_default, cls=self.json_encoder)
diff --git a/notifications/models_interface.py b/notifications/models_interface.py
deleted file mode 100644
index 734977c81..000000000
--- a/notifications/models_interface.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from collections import namedtuple
-
-
-class Repository(namedtuple('Repository', ['namespace_name', 'name'])):
- """
- Repository represents a repository.
- """
-
-
-class Notification(
- namedtuple('Notification', [
- 'uuid', 'event_name', 'method_name', 'event_config_dict', 'method_config_dict',
- 'repository'])):
- """
- Notification represents a registered notification of some kind.
- """
diff --git a/notifications/test/test_notificationevent.py b/notifications/test/test_notificationevent.py
deleted file mode 100644
index a3d533820..000000000
--- a/notifications/test/test_notificationevent.py
+++ /dev/null
@@ -1,189 +0,0 @@
-import pytest
-
-from notifications.notificationevent import (BuildSuccessEvent, NotificationEvent,
- VulnerabilityFoundEvent)
-from util.morecollections import AttrDict
-
-from test.fixtures import *
-
-
-@pytest.mark.parametrize('event_kind', NotificationEvent.event_names())
-def test_create_notifications(event_kind):
- assert NotificationEvent.get_event(event_kind) is not None
-
-
-@pytest.mark.parametrize('event_name', NotificationEvent.event_names())
-def test_build_notification(event_name, initialized_db):
- # Create the notification event.
- found = NotificationEvent.get_event(event_name)
- sample_data = found.get_sample_data('foo', 'bar', {'level': 'low'})
-
- # Make sure all calls succeed.
- notification_data = {
- 'performer_data': {},
- }
-
- found.get_level(sample_data, notification_data)
- found.get_summary(sample_data, notification_data)
- found.get_message(sample_data, notification_data)
-
-
-def test_build_emptyjson():
- notification_data = AttrDict({
- 'event_config_dict': None,
- })
-
- # No build data at all.
- assert BuildSuccessEvent().should_perform({}, notification_data)
-
-def test_build_nofilter():
- notification_data = AttrDict({
- 'event_config_dict': {},
- })
-
- # No build data at all.
- assert BuildSuccessEvent().should_perform({}, notification_data)
-
- # With trigger metadata but no ref.
- assert BuildSuccessEvent().should_perform({
- 'trigger_metadata': {},
- }, notification_data)
-
- # With trigger metadata and a ref.
- assert BuildSuccessEvent().should_perform({
- 'trigger_metadata': {
- 'ref': 'refs/heads/somebranch',
- },
- }, notification_data)
-
-
-def test_build_emptyfilter():
- notification_data = AttrDict({
- 'event_config_dict': {"ref-regex": ""},
- })
-
- # No build data at all.
- assert BuildSuccessEvent().should_perform({}, notification_data)
-
- # With trigger metadata but no ref.
- assert BuildSuccessEvent().should_perform({
- 'trigger_metadata': {},
- }, notification_data)
-
- # With trigger metadata and a ref.
- assert BuildSuccessEvent().should_perform({
- 'trigger_metadata': {
- 'ref': 'refs/heads/somebranch',
- },
- }, notification_data)
-
-
-def test_build_invalidfilter():
- notification_data = AttrDict({
- 'event_config_dict': {"ref-regex": "]["},
- })
-
- # No build data at all.
- assert not BuildSuccessEvent().should_perform({}, notification_data)
-
- # With trigger metadata but no ref.
- assert not BuildSuccessEvent().should_perform({
- 'trigger_metadata': {},
- }, notification_data)
-
- # With trigger metadata and a ref.
- assert not BuildSuccessEvent().should_perform({
- 'trigger_metadata': {
- 'ref': 'refs/heads/somebranch',
- },
- }, notification_data)
-
-
-def test_build_withfilter():
- notification_data = AttrDict({
- 'event_config_dict': {"ref-regex": "refs/heads/master"},
- })
-
- # No build data at all.
- assert not BuildSuccessEvent().should_perform({}, notification_data)
-
- # With trigger metadata but no ref.
- assert not BuildSuccessEvent().should_perform({
- 'trigger_metadata': {},
- }, notification_data)
-
- # With trigger metadata and a not-matching ref.
- assert not BuildSuccessEvent().should_perform({
- 'trigger_metadata': {
- 'ref': 'refs/heads/somebranch',
- },
- }, notification_data)
-
- # With trigger metadata and a matching ref.
- assert BuildSuccessEvent().should_perform({
- 'trigger_metadata': {
- 'ref': 'refs/heads/master',
- },
- }, notification_data)
-
-
-def test_build_withwildcardfilter():
- notification_data = AttrDict({
- 'event_config_dict': {"ref-regex": "refs/heads/.+"},
- })
-
- # No build data at all.
- assert not BuildSuccessEvent().should_perform({}, notification_data)
-
- # With trigger metadata but no ref.
- assert not BuildSuccessEvent().should_perform({
- 'trigger_metadata': {},
- }, notification_data)
-
- # With trigger metadata and a not-matching ref.
- assert not BuildSuccessEvent().should_perform({
- 'trigger_metadata': {
- 'ref': 'refs/tags/sometag',
- },
- }, notification_data)
-
- # With trigger metadata and a matching ref.
- assert BuildSuccessEvent().should_perform({
- 'trigger_metadata': {
- 'ref': 'refs/heads/master',
- },
- }, notification_data)
-
- # With trigger metadata and another matching ref.
- assert BuildSuccessEvent().should_perform({
- 'trigger_metadata': {
- 'ref': 'refs/heads/somebranch',
- },
- }, notification_data)
-
-
-def test_vulnerability_notification_nolevel():
- notification_data = AttrDict({
- 'event_config_dict': {},
- })
-
- # No level specified.
- assert VulnerabilityFoundEvent().should_perform({}, notification_data)
-
-
-def test_vulnerability_notification_nopvulninfo():
- notification_data = AttrDict({
- 'event_config_dict': {"level": 3},
- })
-
- # No vuln info.
- assert not VulnerabilityFoundEvent().should_perform({}, notification_data)
-
-
-def test_vulnerability_notification_normal():
- notification_data = AttrDict({
- 'event_config_dict': {"level": 3},
- })
-
- info = {"vulnerability": {"priority": "Critical"}}
- assert VulnerabilityFoundEvent().should_perform(info, notification_data)
diff --git a/notifications/test/test_notificationmethod.py b/notifications/test/test_notificationmethod.py
deleted file mode 100644
index bde33e574..000000000
--- a/notifications/test/test_notificationmethod.py
+++ /dev/null
@@ -1,156 +0,0 @@
-import pytest
-
-from mock import patch, Mock
-from httmock import urlmatch, HTTMock
-
-from data import model
-from notifications.notificationmethod import (QuayNotificationMethod, EmailMethod, WebhookMethod,
- FlowdockMethod, HipchatMethod, SlackMethod,
- CannotValidateNotificationMethodException)
-from notifications.notificationevent import NotificationEvent
-from notifications.models_interface import Repository, Notification
-
-from test.fixtures import *
-
-def assert_validated(method, method_config, error_message, namespace_name, repo_name):
- if error_message is None:
- method.validate(namespace_name, repo_name, method_config)
- else:
- with pytest.raises(CannotValidateNotificationMethodException) as ipe:
- method.validate(namespace_name, repo_name, method_config)
- assert str(ipe.value) == error_message
-
-
-@pytest.mark.parametrize('method_config,error_message', [
- ({}, 'Missing target'),
- ({'target': {'name': 'invaliduser', 'kind': 'user'}}, 'Unknown user invaliduser'),
- ({'target': {'name': 'invalidorg', 'kind': 'org'}}, 'Unknown organization invalidorg'),
- ({'target': {'name': 'invalidteam', 'kind': 'team'}}, 'Unknown team invalidteam'),
-
- ({'target': {'name': 'devtable', 'kind': 'user'}}, None),
- ({'target': {'name': 'buynlarge', 'kind': 'org'}}, None),
- ({'target': {'name': 'owners', 'kind': 'team'}}, None),
-])
-def test_validate_quay_notification(method_config, error_message, initialized_db):
- method = QuayNotificationMethod()
- assert_validated(method, method_config, error_message, 'buynlarge', 'orgrepo')
-
-
-@pytest.mark.parametrize('method_config,error_message', [
- ({}, 'Missing e-mail address'),
- ({'email': 'a@b.com'}, 'The specified e-mail address is not authorized to receive '
- 'notifications for this repository'),
-
- ({'email': 'jschorr@devtable.com'}, None),
-])
-def test_validate_email(method_config, error_message, initialized_db):
- method = EmailMethod()
- assert_validated(method, method_config, error_message, 'devtable', 'simple')
-
-
-@pytest.mark.parametrize('method_config,error_message', [
- ({}, 'Missing webhook URL'),
- ({'url': 'http://example.com'}, None),
-])
-def test_validate_webhook(method_config, error_message, initialized_db):
- method = WebhookMethod()
- assert_validated(method, method_config, error_message, 'devtable', 'simple')
-
-
-@pytest.mark.parametrize('method_config,error_message', [
- ({}, 'Missing Flowdock API Token'),
- ({'flow_api_token': 'sometoken'}, None),
-])
-def test_validate_flowdock(method_config, error_message, initialized_db):
- method = FlowdockMethod()
- assert_validated(method, method_config, error_message, 'devtable', 'simple')
-
-
-@pytest.mark.parametrize('method_config,error_message', [
- ({}, 'Missing Hipchat Room Notification Token'),
- ({'notification_token': 'sometoken'}, 'Missing Hipchat Room ID'),
- ({'notification_token': 'sometoken', 'room_id': 'foo'}, None),
-])
-def test_validate_hipchat(method_config, error_message, initialized_db):
- method = HipchatMethod()
- assert_validated(method, method_config, error_message, 'devtable', 'simple')
-
-
-@pytest.mark.parametrize('method_config,error_message', [
- ({}, 'Missing Slack Callback URL'),
- ({'url': 'http://example.com'}, None),
-])
-def test_validate_slack(method_config, error_message, initialized_db):
- method = SlackMethod()
- assert_validated(method, method_config, error_message, 'devtable', 'simple')
-
-
-@pytest.mark.parametrize('target,expected_users', [
- ({'name': 'devtable', 'kind': 'user'}, ['devtable']),
- ({'name': 'buynlarge', 'kind': 'org'}, ['buynlarge']),
- ({'name': 'creators', 'kind': 'team'}, ['creator']),
-])
-def test_perform_quay_notification(target, expected_users, initialized_db):
- repository = Repository('buynlarge', 'orgrepo')
- notification = Notification(uuid='fake', event_name='repo_push', method_name='quay',
- event_config_dict={}, method_config_dict={'target': target},
- repository=repository)
-
- event_handler = NotificationEvent.get_event('repo_push')
-
- sample_data = event_handler.get_sample_data(repository.namespace_name, repository.name, {})
-
- method = QuayNotificationMethod()
- method.perform(notification, event_handler, {'event_data': sample_data})
-
- # Ensure that the notification was written for all the expected users.
- if target['kind'] != 'team':
- user = model.user.get_namespace_user(target['name'])
- assert len(model.notification.list_notifications(user, kind_name='repo_push')) > 0
-
-
-def test_perform_email(initialized_db):
- repository = Repository('buynlarge', 'orgrepo')
- notification = Notification(uuid='fake', event_name='repo_push', method_name='email',
- event_config_dict={}, method_config_dict={'email': 'test@example.com'},
- repository=repository)
-
- event_handler = NotificationEvent.get_event('repo_push')
- sample_data = event_handler.get_sample_data(repository.namespace_name, repository.name, {})
-
- mock = Mock()
- def get_mock(*args, **kwargs):
- return mock
-
- with patch('notifications.notificationmethod.Message', get_mock):
- method = EmailMethod()
- method.perform(notification, event_handler, {'event_data': sample_data, 'performer_data': {}})
-
- mock.send.assert_called_once()
-
-
-@pytest.mark.parametrize('method, method_config, netloc', [
- (WebhookMethod, {'url': 'http://testurl'}, 'testurl'),
- (FlowdockMethod, {'flow_api_token': 'token'}, 'api.flowdock.com'),
- (HipchatMethod, {'notification_token': 'token', 'room_id': 'foo'}, 'api.hipchat.com'),
- (SlackMethod, {'url': 'http://example.com'}, 'example.com'),
-])
-def test_perform_http_call(method, method_config, netloc, initialized_db):
- repository = Repository('buynlarge', 'orgrepo')
- notification = Notification(uuid='fake', event_name='repo_push', method_name=method.method_name(),
- event_config_dict={}, method_config_dict=method_config,
- repository=repository)
-
- event_handler = NotificationEvent.get_event('repo_push')
- sample_data = event_handler.get_sample_data(repository.namespace_name, repository.name, {})
-
- url_hit = [False]
- @urlmatch(netloc=netloc)
- def url_handler(_, __):
- url_hit[0] = True
- return ''
-
- with HTTMock(url_handler):
- method().perform(notification, event_handler, {'event_data': sample_data, 'performer_data': {}})
-
- assert url_hit[0]
diff --git a/oauth/__init__.py b/oauth/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/oauth/base.py b/oauth/base.py
deleted file mode 100644
index 2ed0af706..000000000
--- a/oauth/base.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import copy
-import logging
-import urllib
-import urlparse
-
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-
-from util.config import URLSchemeAndHostname
-
-logger = logging.getLogger(__name__)
-
-
-class OAuthEndpoint(object):
- def __init__(self, base_url, params=None):
- self.base_url = base_url
- self.params = params or {}
-
- def with_param(self, name, value):
- params_copy = copy.copy(self.params)
- params_copy[name] = value
- return OAuthEndpoint(self.base_url, params_copy)
-
- def with_params(self, parameters):
- params_copy = copy.copy(self.params)
- params_copy.update(parameters)
- return OAuthEndpoint(self.base_url, params_copy)
-
- def to_url(self):
- (scheme, netloc, path, _, fragment) = urlparse.urlsplit(self.base_url)
- updated_query = urllib.urlencode(self.params)
- return urlparse.urlunsplit((scheme, netloc, path, updated_query, fragment))
-
-class OAuthExchangeCodeException(Exception):
- """ Exception raised if a code exchange fails. """
- pass
-
-class OAuthGetUserInfoException(Exception):
- """ Exception raised if a call to get user information fails. """
- pass
-
-@add_metaclass(ABCMeta)
-class OAuthService(object):
- """ A base class for defining an external service, exposed via OAuth. """
- def __init__(self, config, key_name):
- self.key_name = key_name
- self.config = config.get(key_name) or {}
-
- @abstractmethod
- def service_id(self):
- """ The internal ID for this service. Must match the URL portion for the service, e.g. `github`
- """
- pass
-
- @abstractmethod
- def service_name(self):
- """ The user-readable name for the service, e.g. `GitHub`"""
- pass
-
- @abstractmethod
- def token_endpoint(self):
- """ Returns the endpoint at which the OAuth code can be exchanged for a token. """
- pass
-
- @abstractmethod
- def user_endpoint(self):
- """ Returns the endpoint at which user information can be looked up. """
- pass
-
- @abstractmethod
- def authorize_endpoint(self):
- """ Returns the for authorization of the OAuth service. """
- pass
-
- @abstractmethod
- def validate_client_id_and_secret(self, http_client, url_scheme_and_hostname):
- """ Performs validation of the client ID and secret, raising an exception on failure. """
- pass
-
- def requires_form_encoding(self):
- """ Returns True if form encoding is necessary for the exchange_code_for_token call. """
- return False
-
- def client_id(self):
- return self.config.get('CLIENT_ID')
-
- def client_secret(self):
- return self.config.get('CLIENT_SECRET')
-
- def login_binding_field(self):
- """ Returns the name of the field (`username` or `email`) used for auto binding an external
- login service account to an *internal* login service account. For example, if the external
- login service is GitHub and the internal login service is LDAP, a value of `email` here
- will cause login-with-Github to conduct a search (via email) in LDAP for a user, an auto
- bind the external and internal users together. May return None, in which case no binding
- is performing, and login with this external account will simply create a new account in the
- database.
- """
- return self.config.get('LOGIN_BINDING_FIELD', None)
-
- def get_auth_url(self, url_scheme_and_hostname, redirect_suffix, csrf_token, scopes):
- """ Retrieves the authorization URL for this login service. """
- redirect_uri = '%s/oauth2/%s/callback%s' % (url_scheme_and_hostname.get_url(),
- self.service_id(),
- redirect_suffix)
- params = {
- 'client_id': self.client_id(),
- 'redirect_uri': redirect_uri,
- 'scope': ' '.join(scopes),
- 'state': csrf_token,
- }
-
- return self.authorize_endpoint().with_params(params).to_url()
-
- def get_redirect_uri(self, url_scheme_and_hostname, redirect_suffix=''):
- return '%s://%s/oauth2/%s/callback%s' % (url_scheme_and_hostname.url_scheme,
- url_scheme_and_hostname.hostname,
- self.service_id(),
- redirect_suffix)
-
- def get_user_info(self, http_client, token):
- token_param = {
- 'alt': 'json',
- }
-
- headers = {
- 'Authorization': 'Bearer %s' % token,
- }
-
- got_user = http_client.get(self.user_endpoint().to_url(), params=token_param, headers=headers)
- if got_user.status_code // 100 != 2:
- raise OAuthGetUserInfoException('Non-2XX response code for user_info call: %s' %
- got_user.status_code)
-
- user_info = got_user.json()
- if user_info is None:
- raise OAuthGetUserInfoException()
-
- return user_info
-
- def exchange_code_for_token(self, app_config, http_client, code, form_encode=False,
- redirect_suffix='', client_auth=False):
- """ Exchanges an OAuth access code for the associated OAuth token. """
- json_data = self.exchange_code(app_config, http_client, code, form_encode, redirect_suffix,
- client_auth)
-
- access_token = json_data.get('access_token', None)
- if access_token is None:
- logger.debug('Got successful get_access_token response with missing token: %s', json_data)
- raise OAuthExchangeCodeException('Missing `access_token` in OAuth response')
-
- return access_token
-
- def exchange_code(self, app_config, http_client, code, form_encode=False, redirect_suffix='',
- client_auth=False):
- """ Exchanges an OAuth access code for associated OAuth token and other data. """
- url_scheme_and_hostname = URLSchemeAndHostname.from_app_config(app_config)
- payload = {
- 'code': code,
- 'grant_type': 'authorization_code',
- 'redirect_uri': self.get_redirect_uri(url_scheme_and_hostname, redirect_suffix)
- }
-
- headers = {
- 'Accept': 'application/json'
- }
-
- auth = None
- if client_auth:
- auth = (self.client_id(), self.client_secret())
- else:
- payload['client_id'] = self.client_id()
- payload['client_secret'] = self.client_secret()
-
- token_url = self.token_endpoint().to_url()
- if form_encode:
- get_access_token = http_client.post(token_url, data=payload, headers=headers, auth=auth)
- else:
- get_access_token = http_client.post(token_url, params=payload, headers=headers, auth=auth)
-
- if get_access_token.status_code // 100 != 2:
- logger.debug('Got get_access_token response %s', get_access_token.text)
- raise OAuthExchangeCodeException('Got non-2XX response for code exchange: %s' %
- get_access_token.status_code)
-
- json_data = get_access_token.json()
- if not json_data:
- raise OAuthExchangeCodeException('Got non-JSON response for code exchange')
-
- if 'error' in json_data:
- raise OAuthExchangeCodeException(json_data.get('error_description', json_data['error']))
-
- return json_data
diff --git a/oauth/login.py b/oauth/login.py
deleted file mode 100644
index 12bae420b..000000000
--- a/oauth/login.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import logging
-
-from abc import ABCMeta, abstractmethod
-from six import add_metaclass
-
-import features
-
-from oauth.base import OAuthService, OAuthExchangeCodeException, OAuthGetUserInfoException
-
-logger = logging.getLogger(__name__)
-
-class OAuthLoginException(Exception):
- """ Exception raised if a login operation fails. """
- pass
-
-
-@add_metaclass(ABCMeta)
-class OAuthLoginService(OAuthService):
- """ A base class for defining an OAuth-compliant service that can be used for, amongst other
- things, login and authentication. """
-
- @abstractmethod
- def login_enabled(self):
- """ Returns true if the login service is enabled. """
- pass
-
- @abstractmethod
- def get_login_service_id(self, user_info):
- """ Returns the internal ID for the given user under this login service. """
- pass
-
- @abstractmethod
- def get_login_service_username(self, user_info):
- """ Returns the username for the given user under this login service. """
- pass
-
- @abstractmethod
- def get_verified_user_email(self, app_config, http_client, token, user_info):
- """ Returns the verified email address for the given user, if any or None if none. """
- pass
-
- @abstractmethod
- def get_icon(self):
- """ Returns the icon to display for this login service. """
- pass
-
- @abstractmethod
- def get_login_scopes(self):
- """ Returns the list of scopes for login for this service. """
- pass
-
- def service_verify_user_info_for_login(self, app_config, http_client, token, user_info):
- """ Performs service-specific verification of user information for login. On failure, a service
- should raise a OAuthLoginService.
- """
- # By default, does nothing.
- pass
-
- def exchange_code_for_login(self, app_config, http_client, code, redirect_suffix):
- """ Exchanges the given OAuth access code for user information on behalf of a user trying to
- login or attach their account. Raises a OAuthLoginService exception on failure. Returns
- a tuple consisting of (service_id, service_username, email)
- """
-
- # Retrieve the token for the OAuth code.
- try:
- token = self.exchange_code_for_token(app_config, http_client, code,
- redirect_suffix=redirect_suffix,
- form_encode=self.requires_form_encoding())
- except OAuthExchangeCodeException as oce:
- raise OAuthLoginException(str(oce))
-
- # Retrieve the user's information with the token.
- try:
- user_info = self.get_user_info(http_client, token)
- except OAuthGetUserInfoException as oge:
- raise OAuthLoginException(str(oge))
-
- if user_info.get('id', None) is None:
- logger.debug('Got user info response %s', user_info)
- raise OAuthLoginException('Missing `id` column in returned user information')
-
- # Perform any custom verification for this login service.
- self.service_verify_user_info_for_login(app_config, http_client, token, user_info)
-
- # Retrieve the user's email address (if necessary).
- email_address = self.get_verified_user_email(app_config, http_client, token, user_info)
- if features.MAILING and email_address is None:
- raise OAuthLoginException('A verified email address is required to login with this service')
-
- service_user_id = self.get_login_service_id(user_info)
- service_username = self.get_login_service_username(user_info)
-
- logger.debug('Completed successful exchange for service %s: %s, %s, %s',
- self.service_id(), service_user_id, service_username, email_address)
- return (service_user_id, service_username, email_address)
diff --git a/oauth/loginmanager.py b/oauth/loginmanager.py
deleted file mode 100644
index ea45890ea..000000000
--- a/oauth/loginmanager.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from oauth.services.github import GithubOAuthService
-from oauth.services.google import GoogleOAuthService
-from oauth.oidc import OIDCLoginService
-
-CUSTOM_LOGIN_SERVICES = {
- 'GITHUB_LOGIN_CONFIG': GithubOAuthService,
- 'GOOGLE_LOGIN_CONFIG': GoogleOAuthService,
-}
-
-PREFIX_BLACKLIST = ['ldap', 'jwt', 'keystone']
-
-class OAuthLoginManager(object):
- """ Helper class which manages all registered OAuth login services. """
- def __init__(self, config, client=None):
- self.services = []
-
- # Register the endpoints for each of the OAuth login services.
- for key in config.keys():
- # All keys which end in _LOGIN_CONFIG setup a login service.
- if key.endswith('_LOGIN_CONFIG'):
- if key in CUSTOM_LOGIN_SERVICES:
- custom_service = CUSTOM_LOGIN_SERVICES[key](config, key)
- if custom_service.login_enabled(config):
- self.services.append(custom_service)
- else:
- prefix = key.rstrip('_LOGIN_CONFIG').lower()
- if prefix in PREFIX_BLACKLIST:
- raise Exception('Cannot use reserved config name %s' % key)
-
- self.services.append(OIDCLoginService(config, key, client=client))
-
- def get_service(self, service_id):
- for service in self.services:
- if service.service_id() == service_id:
- return service
-
- return None
\ No newline at end of file
diff --git a/oauth/oidc.py b/oauth/oidc.py
deleted file mode 100644
index c7273035b..000000000
--- a/oauth/oidc.py
+++ /dev/null
@@ -1,328 +0,0 @@
-import time
-import json
-import logging
-import urlparse
-
-import jwt
-
-from cachetools.func import lru_cache
-from cachetools.ttl import TTLCache
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives.serialization import load_der_public_key
-from jwkest.jwk import KEYS
-
-from oauth.base import (OAuthService, OAuthExchangeCodeException, OAuthGetUserInfoException,
- OAuthEndpoint)
-from oauth.login import OAuthLoginException
-from util.security.jwtutil import decode, InvalidTokenError
-
-logger = logging.getLogger(__name__)
-
-
-OIDC_WELLKNOWN = ".well-known/openid-configuration"
-PUBLIC_KEY_CACHE_TTL = 3600 # 1 hour
-ALLOWED_ALGORITHMS = ['RS256']
-JWT_CLOCK_SKEW_SECONDS = 30
-
-class DiscoveryFailureException(Exception):
- """ Exception raised when OIDC discovery fails. """
- pass
-
-class PublicKeyLoadException(Exception):
- """ Exception raised if loading the OIDC public key fails. """
- pass
-
-
-class OIDCLoginService(OAuthService):
- """ Defines a generic service for all OpenID-connect compatible login services. """
- def __init__(self, config, key_name, client=None):
- super(OIDCLoginService, self).__init__(config, key_name)
-
- self._id = key_name[0:key_name.find('_')].lower()
- self._http_client = client or config.get('HTTPCLIENT')
- self._mailing = config.get('FEATURE_MAILING', False)
- self._public_key_cache = _PublicKeyCache(self, 1, PUBLIC_KEY_CACHE_TTL)
-
- def service_id(self):
- return self._id
-
- def service_name(self):
- return self.config.get('SERVICE_NAME', self.service_id())
-
- def get_icon(self):
- return self.config.get('SERVICE_ICON', 'fa-user-circle')
-
- def get_login_scopes(self):
- default_scopes = ['openid']
-
- if self.user_endpoint() is not None:
- default_scopes.append('profile')
-
- if self._mailing:
- default_scopes.append('email')
-
- supported_scopes = self._oidc_config().get('scopes_supported', default_scopes)
- login_scopes = self.config.get('LOGIN_SCOPES') or supported_scopes
- return list(set(login_scopes) & set(supported_scopes))
-
- def authorize_endpoint(self):
- return self._get_endpoint('authorization_endpoint').with_param('response_type', 'code')
-
- def token_endpoint(self):
- return self._get_endpoint('token_endpoint')
-
- def user_endpoint(self):
- return self._get_endpoint('userinfo_endpoint')
-
- def _get_endpoint(self, endpoint_key, **kwargs):
- """ Returns the OIDC endpoint with the given key found in the OIDC discovery
- document, with the given kwargs added as query parameters. Additionally,
- any defined parameters found in the OIDC configuration block are also
- added.
- """
- endpoint = self._oidc_config().get(endpoint_key, '')
- if not endpoint:
- return None
-
- (scheme, netloc, path, query, fragment) = urlparse.urlsplit(endpoint)
-
- # Add the query parameters from the kwargs and the config.
- custom_parameters = self.config.get('OIDC_ENDPOINT_CUSTOM_PARAMS', {}).get(endpoint_key, {})
-
- query_params = urlparse.parse_qs(query, keep_blank_values=True)
- query_params.update(kwargs)
- query_params.update(custom_parameters)
- return OAuthEndpoint(urlparse.urlunsplit((scheme, netloc, path, {}, fragment)), query_params)
-
- def validate(self):
- return bool(self.get_login_scopes())
-
- def validate_client_id_and_secret(self, http_client, url_scheme_and_hostname):
- # TODO: find a way to verify client secret too.
- check_auth_url = http_client.get(self.get_auth_url(url_scheme_and_hostname, '', '', []))
- if check_auth_url.status_code // 100 != 2:
- raise Exception('Got non-200 status code for authorization endpoint')
-
- def requires_form_encoding(self):
- return True
-
- def get_public_config(self):
- return {
- 'CLIENT_ID': self.client_id(),
- 'OIDC': True,
- }
-
- def exchange_code_for_tokens(self, app_config, http_client, code, redirect_suffix):
- # Exchange the code for the access token and id_token
- try:
- json_data = self.exchange_code(app_config, http_client, code,
- redirect_suffix=redirect_suffix,
- form_encode=self.requires_form_encoding())
- except OAuthExchangeCodeException as oce:
- raise OAuthLoginException(str(oce))
-
- # Make sure we received both.
- access_token = json_data.get('access_token', None)
- if access_token is None:
- logger.debug('Missing access_token in response: %s', json_data)
- raise OAuthLoginException('Missing `access_token` in OIDC response')
-
- id_token = json_data.get('id_token', None)
- if id_token is None:
- logger.debug('Missing id_token in response: %s', json_data)
- raise OAuthLoginException('Missing `id_token` in OIDC response')
-
- return id_token, access_token
-
- def exchange_code_for_login(self, app_config, http_client, code, redirect_suffix):
- # Exchange the code for the access token and id_token
- id_token, access_token = self.exchange_code_for_tokens(app_config, http_client, code,
- redirect_suffix)
-
- # Decode the id_token.
- try:
- decoded_id_token = self.decode_user_jwt(id_token)
- except InvalidTokenError as ite:
- logger.exception('Got invalid token error on OIDC decode: %s', ite)
- raise OAuthLoginException('Could not decode OIDC token')
- except PublicKeyLoadException as pke:
- logger.exception('Could not load public key during OIDC decode: %s', pke)
- raise OAuthLoginException('Could find public OIDC key')
-
- # If there is a user endpoint, use it to retrieve the user's information. Otherwise, we use
- # the decoded ID token.
- if self.user_endpoint():
- # Retrieve the user information.
- try:
- user_info = self.get_user_info(http_client, access_token)
- except OAuthGetUserInfoException as oge:
- raise OAuthLoginException(str(oge))
- else:
- user_info = decoded_id_token
-
- # Verify subs.
- if user_info['sub'] != decoded_id_token['sub']:
- logger.debug('Mismatch in `sub` returned by OIDC user info endpoint: %s vs %s',
- user_info['sub'], decoded_id_token['sub'])
- raise OAuthLoginException('Mismatch in `sub` returned by OIDC user info endpoint')
-
- # Check if we have a verified email address.
- if self.config.get('VERIFIED_EMAIL_CLAIM_NAME'):
- email_address = user_info.get(self.config['VERIFIED_EMAIL_CLAIM_NAME'])
- else:
- email_address = user_info.get('email') if user_info.get('email_verified') else None
-
- logger.debug('Found e-mail address `%s` for sub `%s`', email_address, user_info['sub'])
- if self._mailing:
- if email_address is None:
- raise OAuthLoginException('A verified email address is required to login with this service')
-
- # Check for a preferred username.
- if self.config.get('PREFERRED_USERNAME_CLAIM_NAME'):
- lusername = user_info.get(self.config['PREFERRED_USERNAME_CLAIM_NAME'])
- else:
- lusername = user_info.get('preferred_username')
- if lusername is None:
- # Note: Active Directory provides `unique_name` and `upn`.
- # https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-id-and-access-tokens
- lusername = user_info.get('unique_name', user_info.get('upn'))
-
- if lusername is None:
- lusername = user_info['sub']
-
- if lusername.find('@') >= 0:
- lusername = lusername[0:lusername.find('@')]
-
- return decoded_id_token['sub'], lusername, email_address
-
- @property
- def _issuer(self):
- # Read the issuer from the OIDC config, falling back to the configured OIDC server.
- issuer = self._oidc_config().get('issuer', self.config['OIDC_SERVER'])
-
- # If specified, use the overridden OIDC issuer.
- return self.config.get('OIDC_ISSUER', issuer)
-
- @lru_cache(maxsize=1)
- def _oidc_config(self):
- if self.config.get('OIDC_SERVER'):
- return self._load_oidc_config_via_discovery(self.config.get('DEBUGGING', False))
- else:
- return {}
-
- def _load_oidc_config_via_discovery(self, is_debugging):
- """ Attempts to load the OIDC config via the OIDC discovery mechanism. If is_debugging is True,
- non-secure connections are alllowed. Raises an DiscoveryFailureException on failure.
- """
- oidc_server = self.config['OIDC_SERVER']
- if not oidc_server.startswith('https://') and not is_debugging:
- raise DiscoveryFailureException('OIDC server must be accessed over SSL')
-
- discovery_url = urlparse.urljoin(oidc_server, OIDC_WELLKNOWN)
- discovery = self._http_client.get(discovery_url, timeout=5, verify=is_debugging is False)
- if discovery.status_code // 100 != 2:
- logger.debug('Got %s response for OIDC discovery: %s', discovery.status_code, discovery.text)
- raise DiscoveryFailureException("Could not load OIDC discovery information")
-
- try:
- return json.loads(discovery.text)
- except ValueError:
- logger.exception('Could not parse OIDC discovery for url: %s', discovery_url)
- raise DiscoveryFailureException("Could not parse OIDC discovery information")
-
- def decode_user_jwt(self, token):
- """ Decodes the given JWT under the given provider and returns it. Raises an InvalidTokenError
- exception on an invalid token or a PublicKeyLoadException if the public key could not be
- loaded for decoding.
- """
- # Find the key to use.
- headers = jwt.get_unverified_header(token)
- kid = headers.get('kid', None)
- if kid is None:
- raise InvalidTokenError('Missing `kid` header')
-
- logger.debug('Using key `%s`, attempting to decode token `%s` with aud `%s` and iss `%s`',
- kid, token, self.client_id(), self._issuer)
- try:
- return decode(token, self._get_public_key(kid), algorithms=ALLOWED_ALGORITHMS,
- audience=self.client_id(),
- issuer=self._issuer,
- leeway=JWT_CLOCK_SKEW_SECONDS,
- options=dict(require_nbf=False))
- except InvalidTokenError as ite:
- logger.warning('Could not decode token `%s` for OIDC: %s. Will attempt again after ' +
- 'retrieving public keys.', token, ite)
-
- # Public key may have expired. Try to retrieve an updated public key and use it to decode.
- try:
- return decode(token, self._get_public_key(kid, force_refresh=True),
- algorithms=ALLOWED_ALGORITHMS,
- audience=self.client_id(),
- issuer=self._issuer,
- leeway=JWT_CLOCK_SKEW_SECONDS,
- options=dict(require_nbf=False))
- except InvalidTokenError as ite:
- logger.warning('Could not decode token `%s` for OIDC: %s. Attempted again after ' +
- 'retrieving public keys.', token, ite)
-
- # Decode again with verify=False, and log the decoded token to allow for easier debugging.
- nonverified = decode(token, self._get_public_key(kid, force_refresh=True),
- algorithms=ALLOWED_ALGORITHMS,
- audience=self.client_id(),
- issuer=self._issuer,
- leeway=JWT_CLOCK_SKEW_SECONDS,
- options=dict(require_nbf=False, verify=False))
- logger.debug('Got an error when trying to verify OIDC JWT: %s', nonverified)
- raise ite
-
- def _get_public_key(self, kid, force_refresh=False):
- """ Retrieves the public key for this handler with the given kid. Raises a
- PublicKeyLoadException on failure. """
-
- # If force_refresh is true, we expire all the items in the cache by setting the time to
- # the current time + the expiration TTL.
- if force_refresh:
- self._public_key_cache.expire(time=time.time() + PUBLIC_KEY_CACHE_TTL)
-
- # Retrieve the public key from the cache. If the cache does not contain the public key,
- # it will internally call _load_public_key to retrieve it and then save it.
- return self._public_key_cache[kid]
-
-
-class _PublicKeyCache(TTLCache):
- def __init__(self, login_service, *args, **kwargs):
- super(_PublicKeyCache, self).__init__(*args, **kwargs)
-
- self._login_service = login_service
-
- def __missing__(self, kid):
- """ Loads the public key for this handler from the OIDC service. Raises PublicKeyLoadException
- on failure.
- """
- keys_url = self._login_service._oidc_config()['jwks_uri']
-
- # Load the keys.
- try:
- keys = KEYS()
- keys.load_from_url(keys_url, verify=not self._login_service.config.get('DEBUGGING', False))
- except Exception as ex:
- logger.exception('Exception loading public key')
- raise PublicKeyLoadException(str(ex))
-
- # Find the matching key.
- keys_found = keys.by_kid(kid)
- if len(keys_found) == 0:
- raise PublicKeyLoadException('Public key %s not found' % kid)
-
- rsa_keys = [key for key in keys_found if key.kty == 'RSA']
- if len(rsa_keys) == 0:
- raise PublicKeyLoadException('No RSA form of public key %s not found' % kid)
-
- matching_key = rsa_keys[0]
- matching_key.deserialize()
-
- # Reload the key so that we can give a key *instance* to PyJWT to work around its weird parsing
- # issues.
- final_key = load_der_public_key(matching_key.key.exportKey('DER'), backend=default_backend())
- self[kid] = final_key
- return final_key
diff --git a/oauth/services/__init__.py b/oauth/services/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/oauth/services/github.py b/oauth/services/github.py
deleted file mode 100644
index f503a75f2..000000000
--- a/oauth/services/github.py
+++ /dev/null
@@ -1,180 +0,0 @@
-import logging
-
-from oauth.base import OAuthEndpoint
-from oauth.login import OAuthLoginService, OAuthLoginException
-from util import slash_join
-
-logger = logging.getLogger(__name__)
-
-class GithubOAuthService(OAuthLoginService):
- def __init__(self, config, key_name):
- super(GithubOAuthService, self).__init__(config, key_name)
-
- def login_enabled(self, config):
- return config.get('FEATURE_GITHUB_LOGIN', False)
-
- def service_id(self):
- return 'github'
-
- def service_name(self):
- if self.is_enterprise():
- return 'GitHub Enterprise'
-
- return 'GitHub'
-
- def get_icon(self):
- return 'fa-github'
-
- def get_login_scopes(self):
- if self.config.get('ORG_RESTRICT'):
- return ['user:email', 'read:org']
-
- return ['user:email']
-
- def allowed_organizations(self):
- if not self.config.get('ORG_RESTRICT', False):
- return None
-
- allowed = self.config.get('ALLOWED_ORGANIZATIONS', None)
- if allowed is None:
- return None
-
- return [org.lower() for org in allowed]
-
- def get_public_url(self, suffix):
- return slash_join(self._endpoint(), suffix)
-
- def _endpoint(self):
- return self.config.get('GITHUB_ENDPOINT', 'https://github.com')
-
- def is_enterprise(self):
- return self._api_endpoint().find('.github.com') < 0
-
- def authorize_endpoint(self):
- return OAuthEndpoint(slash_join(self._endpoint(), '/login/oauth/authorize'))
-
- def token_endpoint(self):
- return OAuthEndpoint(slash_join(self._endpoint(), '/login/oauth/access_token'))
-
- def user_endpoint(self):
- return OAuthEndpoint(slash_join(self._api_endpoint(), 'user'))
-
- def _api_endpoint(self):
- return self.config.get('API_ENDPOINT', slash_join(self._endpoint(), '/api/v3/'))
-
- def api_endpoint(self):
- endpoint = self._api_endpoint()
- if endpoint.endswith('/'):
- return endpoint[0:-1]
-
- return endpoint
-
- def email_endpoint(self):
- return slash_join(self._api_endpoint(), 'user/emails')
-
- def orgs_endpoint(self):
- return slash_join(self._api_endpoint(), 'user/orgs')
-
- def validate_client_id_and_secret(self, http_client, url_scheme_and_hostname):
- # First: Verify that the github endpoint is actually Github by checking for the
- # X-GitHub-Request-Id here.
- api_endpoint = self._api_endpoint()
- result = http_client.get(api_endpoint, auth=(self.client_id(), self.client_secret()), timeout=5)
- if not 'X-GitHub-Request-Id' in result.headers:
- raise Exception('Endpoint is not a Github (Enterprise) installation')
-
- # Next: Verify the client ID and secret.
- # Note: The following code is a hack until such time as Github officially adds an API endpoint
- # for verifying a {client_id, client_secret} pair. This workaround was given to us
- # *by a Github Engineer* (Jan 8, 2015).
- #
- # TODO: Replace with the real API call once added.
- #
- # Hitting the endpoint applications/{client_id}/tokens/foo will result in the following
- # behavior IF the client_id is given as the HTTP username and the client_secret as the HTTP
- # password:
- # - If the {client_id, client_secret} pair is invalid in some way, we get a 401 error.
- # - If the pair is valid, then we get a 404 because the 'foo' token does not exists.
- validate_endpoint = slash_join(api_endpoint, 'applications/%s/tokens/foo' % self.client_id())
- result = http_client.get(validate_endpoint, auth=(self.client_id(), self.client_secret()),
- timeout=5)
- return result.status_code == 404
-
- def validate_organization(self, organization_id, http_client):
- org_endpoint = slash_join(self._api_endpoint(), 'orgs/%s' % organization_id.lower())
-
- result = http_client.get(org_endpoint,
- headers={'Accept': 'application/vnd.github.moondragon+json'},
- timeout=5)
-
- return result.status_code == 200
-
-
- def get_public_config(self):
- return {
- 'CLIENT_ID': self.client_id(),
- 'AUTHORIZE_ENDPOINT': self.authorize_endpoint().to_url(),
- 'GITHUB_ENDPOINT': self._endpoint(),
- 'ORG_RESTRICT': self.config.get('ORG_RESTRICT', False)
- }
-
- def get_login_service_id(self, user_info):
- return user_info['id']
-
- def get_login_service_username(self, user_info):
- return user_info['login']
-
- def get_verified_user_email(self, app_config, http_client, token, user_info):
- v3_media_type = {
- 'Accept': 'application/vnd.github.v3'
- }
-
- token_param = {
- 'access_token': token,
- }
-
- # Find the e-mail address for the user: we will accept any email, but we prefer the primary
- get_email = http_client.get(self.email_endpoint(), params=token_param, headers=v3_media_type)
- if get_email.status_code // 100 != 2:
- raise OAuthLoginException('Got non-2XX status code for emails endpoint: %s' %
- get_email.status_code)
-
- verified_emails = [email for email in get_email.json() if email['verified']]
- primary_emails = [email for email in get_email.json() if email['primary']]
-
- # Special case: We don't care about whether an e-mail address is "verified" under GHE.
- if self.is_enterprise() and not verified_emails:
- verified_emails = primary_emails
-
- allowed_emails = (primary_emails or verified_emails or [])
- return allowed_emails[0]['email'] if len(allowed_emails) > 0 else None
-
- def service_verify_user_info_for_login(self, app_config, http_client, token, user_info):
- # Retrieve the user's orgnizations (if organization filtering is turned on)
- if self.allowed_organizations() is None:
- return
-
- moondragon_media_type = {
- 'Accept': 'application/vnd.github.moondragon+json'
- }
-
- token_param = {
- 'access_token': token,
- }
-
- get_orgs = http_client.get(self.orgs_endpoint(), params=token_param,
- headers=moondragon_media_type)
-
- if get_orgs.status_code // 100 != 2:
- logger.debug('get_orgs response: %s', get_orgs.json())
- raise OAuthLoginException('Got non-2XX response for org lookup: %s' %
- get_orgs.status_code)
-
- organizations = set([org.get('login').lower() for org in get_orgs.json()])
- matching_organizations = organizations & set(self.allowed_organizations())
- if not matching_organizations:
- logger.debug('Found organizations %s, but expected one of %s', organizations,
- self.allowed_organizations())
- err = """You are not a member of an allowed GitHub organization.
- Please contact your system administrator if you believe this is in error."""
- raise OAuthLoginException(err)
diff --git a/oauth/services/gitlab.py b/oauth/services/gitlab.py
deleted file mode 100644
index 9b0dcc2ec..000000000
--- a/oauth/services/gitlab.py
+++ /dev/null
@@ -1,60 +0,0 @@
-from oauth.base import OAuthService, OAuthEndpoint
-from util import slash_join
-
-class GitLabOAuthService(OAuthService):
- def __init__(self, config, key_name):
- super(GitLabOAuthService, self).__init__(config, key_name)
-
- def service_id(self):
- return 'gitlab'
-
- def service_name(self):
- return 'GitLab'
-
- def _endpoint(self):
- return self.config.get('GITLAB_ENDPOINT', 'https://gitlab.com')
-
- def user_endpoint(self):
- raise NotImplementedError
-
- def api_endpoint(self):
- return self._endpoint()
-
- def get_public_url(self, suffix):
- return slash_join(self._endpoint(), suffix)
-
- def authorize_endpoint(self):
- return OAuthEndpoint(slash_join(self._endpoint(), '/oauth/authorize'))
-
- def token_endpoint(self):
- return OAuthEndpoint(slash_join(self._endpoint(), '/oauth/token'))
-
- def validate_client_id_and_secret(self, http_client, url_scheme_and_hostname):
- # We validate the client ID and secret by hitting the OAuth token exchange endpoint with
- # the real client ID and secret, but a fake auth code to exchange. Gitlab's implementation will
- # return `invalid_client` as the `error` if the client ID or secret is invalid; otherwise, it
- # will return another error.
- url = self.token_endpoint().to_url()
- redirect_uri = self.get_redirect_uri(url_scheme_and_hostname, redirect_suffix='trigger')
- data = {
- 'code': 'fakecode',
- 'client_id': self.client_id(),
- 'client_secret': self.client_secret(),
- 'grant_type': 'authorization_code',
- 'redirect_uri': redirect_uri
- }
-
- # We validate by checking the error code we receive from this call.
- result = http_client.post(url, data=data, timeout=5)
- value = result.json()
- if not value:
- return False
-
- return value.get('error', '') != 'invalid_client'
-
- def get_public_config(self):
- return {
- 'CLIENT_ID': self.client_id(),
- 'AUTHORIZE_ENDPOINT': self.authorize_endpoint().to_url(),
- 'GITLAB_ENDPOINT': self._endpoint(),
- }
diff --git a/oauth/services/google.py b/oauth/services/google.py
deleted file mode 100644
index 515a5b5dd..000000000
--- a/oauth/services/google.py
+++ /dev/null
@@ -1,81 +0,0 @@
-from oauth.base import OAuthEndpoint
-from oauth.login import OAuthLoginService
-
-def _get_email_username(email_address):
- username = email_address
- at = username.find('@')
- if at > 0:
- username = username[0:at]
-
- return username
-
-class GoogleOAuthService(OAuthLoginService):
- def __init__(self, config, key_name):
- super(GoogleOAuthService, self).__init__(config, key_name)
-
- def login_enabled(self, config):
- return config.get('FEATURE_GOOGLE_LOGIN', False)
-
- def service_id(self):
- return 'google'
-
- def service_name(self):
- return 'Google'
-
- def get_icon(self):
- return 'fa-google'
-
- def get_login_scopes(self):
- return ['openid', 'email']
-
- def authorize_endpoint(self):
- return OAuthEndpoint('https://accounts.google.com/o/oauth2/auth',
- params=dict(response_type='code'))
-
- def token_endpoint(self):
- return OAuthEndpoint('https://accounts.google.com/o/oauth2/token')
-
- def user_endpoint(self):
- return OAuthEndpoint('https://www.googleapis.com/oauth2/v1/userinfo')
-
- def requires_form_encoding(self):
- return True
-
- def validate_client_id_and_secret(self, http_client, url_scheme_and_hostname):
- # To verify the Google client ID and secret, we hit the
- # https://www.googleapis.com/oauth2/v3/token endpoint with an invalid request. If the client
- # ID or secret are invalid, we get returned a 403 Unauthorized. Otherwise, we get returned
- # another response code.
- url = 'https://www.googleapis.com/oauth2/v3/token'
- data = {
- 'code': 'fakecode',
- 'client_id': self.client_id(),
- 'client_secret': self.client_secret(),
- 'grant_type': 'authorization_code',
- 'redirect_uri': 'http://example.com'
- }
-
- result = http_client.post(url, data=data, timeout=5)
- return result.status_code != 401
-
- def get_public_config(self):
- return {
- 'CLIENT_ID': self.client_id(),
- 'AUTHORIZE_ENDPOINT': self.authorize_endpoint().to_url()
- }
-
- def get_login_service_id(self, user_info):
- return user_info['id']
-
- def get_login_service_username(self, user_info):
- return _get_email_username(user_info['email'])
-
- def get_verified_user_email(self, app_config, http_client, token, user_info):
- if not user_info.get('verified_email', False):
- return None
-
- return user_info['email']
-
- def service_verify_user_info_for_login(self, app_config, http_client, token, user_info):
- # Nothing to do.
- pass
diff --git a/oauth/services/test/test_github.py b/oauth/services/test/test_github.py
deleted file mode 100644
index b14ac4952..000000000
--- a/oauth/services/test/test_github.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import pytest
-
-from oauth.services.github import GithubOAuthService
-
-@pytest.mark.parametrize('trigger_config, domain, api_endpoint, is_enterprise', [
- ({
- 'CLIENT_ID': 'someclientid',
- 'CLIENT_SECRET': 'someclientsecret',
- 'API_ENDPOINT': 'https://api.github.com/v3',
- }, 'https://github.com', 'https://api.github.com/v3', False),
- ({
- 'GITHUB_ENDPOINT': 'https://github.somedomain.com/',
- 'CLIENT_ID': 'someclientid',
- 'CLIENT_SECRET': 'someclientsecret',
- }, 'https://github.somedomain.com', 'https://github.somedomain.com/api/v3', True),
- ({
- 'GITHUB_ENDPOINT': 'https://github.somedomain.com/',
- 'API_ENDPOINT': 'http://somedomain.com/api/',
- 'CLIENT_ID': 'someclientid',
- 'CLIENT_SECRET': 'someclientsecret',
- }, 'https://github.somedomain.com', 'http://somedomain.com/api', True),
-])
-def test_basic_enterprise_config(trigger_config, domain, api_endpoint, is_enterprise):
- config = {
- 'GITHUB_TRIGGER_CONFIG': trigger_config
- }
-
- github_trigger = GithubOAuthService(config, 'GITHUB_TRIGGER_CONFIG')
- assert github_trigger.is_enterprise() == is_enterprise
-
- assert github_trigger.authorize_endpoint().to_url() == '%s/login/oauth/authorize' % domain
-
- assert github_trigger.token_endpoint().to_url() == '%s/login/oauth/access_token' % domain
-
- assert github_trigger.api_endpoint() == api_endpoint
- assert github_trigger.user_endpoint().to_url() == '%s/user' % api_endpoint
- assert github_trigger.email_endpoint() == '%s/user/emails' % api_endpoint
- assert github_trigger.orgs_endpoint() == '%s/user/orgs' % api_endpoint
diff --git a/oauth/test/__init__.py b/oauth/test/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/oauth/test/test_loginmanager.py b/oauth/test/test_loginmanager.py
deleted file mode 100644
index 491216104..000000000
--- a/oauth/test/test_loginmanager.py
+++ /dev/null
@@ -1,62 +0,0 @@
-from oauth.loginmanager import OAuthLoginManager
-from oauth.services.github import GithubOAuthService
-from oauth.services.google import GoogleOAuthService
-from oauth.oidc import OIDCLoginService
-
-def test_login_manager_github():
- config = {
- 'FEATURE_GITHUB_LOGIN': True,
- 'GITHUB_LOGIN_CONFIG': {},
- }
-
- loginmanager = OAuthLoginManager(config)
- assert len(loginmanager.services) == 1
- assert isinstance(loginmanager.services[0], GithubOAuthService)
-
-def test_github_disabled():
- config = {
- 'GITHUB_LOGIN_CONFIG': {},
- }
-
- loginmanager = OAuthLoginManager(config)
- assert len(loginmanager.services) == 0
-
-def test_login_manager_google():
- config = {
- 'FEATURE_GOOGLE_LOGIN': True,
- 'GOOGLE_LOGIN_CONFIG': {},
- }
-
- loginmanager = OAuthLoginManager(config)
- assert len(loginmanager.services) == 1
- assert isinstance(loginmanager.services[0], GoogleOAuthService)
-
-def test_google_disabled():
- config = {
- 'GOOGLE_LOGIN_CONFIG': {},
- }
-
- loginmanager = OAuthLoginManager(config)
- assert len(loginmanager.services) == 0
-
-def test_oidc():
- config = {
- 'SOMECOOL_LOGIN_CONFIG': {},
- 'HTTPCLIENT': None,
- }
-
- loginmanager = OAuthLoginManager(config)
- assert len(loginmanager.services) == 1
- assert isinstance(loginmanager.services[0], OIDCLoginService)
-
-def test_multiple_oidc():
- config = {
- 'SOMECOOL_LOGIN_CONFIG': {},
- 'ANOTHER_LOGIN_CONFIG': {},
- 'HTTPCLIENT': None,
- }
-
- loginmanager = OAuthLoginManager(config)
- assert len(loginmanager.services) == 2
- assert isinstance(loginmanager.services[0], OIDCLoginService)
- assert isinstance(loginmanager.services[1], OIDCLoginService)
diff --git a/oauth/test/test_oidc.py b/oauth/test/test_oidc.py
deleted file mode 100644
index 3ab184593..000000000
--- a/oauth/test/test_oidc.py
+++ /dev/null
@@ -1,342 +0,0 @@
-# pylint: disable=redefined-outer-name, unused-argument, invalid-name, missing-docstring, too-many-arguments
-
-import json
-import time
-import urlparse
-
-import jwt
-import pytest
-import requests
-
-from httmock import urlmatch, HTTMock
-from Crypto.PublicKey import RSA
-from jwkest.jwk import RSAKey
-
-from oauth.oidc import OIDCLoginService, OAuthLoginException
-from util.config import URLSchemeAndHostname
-
-
-@pytest.fixture(scope='module') # Slow to generate, only do it once.
-def signing_key():
- private_key = RSA.generate(2048)
- jwk = RSAKey(key=private_key.publickey()).serialize()
- return {
- 'id': 'somekey',
- 'private_key': private_key.exportKey('PEM'),
- 'jwk': jwk,
- }
-
-@pytest.fixture(scope="module")
-def http_client():
- sess = requests.Session()
- adapter = requests.adapters.HTTPAdapter(pool_connections=100,
- pool_maxsize=100)
- sess.mount('http://', adapter)
- sess.mount('https://', adapter)
- return sess
-
-@pytest.fixture(scope="module")
-def valid_code():
- return 'validcode'
-
-@pytest.fixture(params=[True, False])
-def mailing_feature(request):
- return request.param
-
-@pytest.fixture(params=[True, False])
-def email_verified(request):
- return request.param
-
-@pytest.fixture(params=[True, False])
-def userinfo_supported(request):
- return request.param
-
-@pytest.fixture(params=["someusername", "foo@bar.com", None])
-def preferred_username(request):
- return request.param
-
-@pytest.fixture()
-def app_config(http_client, mailing_feature):
- return {
- 'PREFERRED_URL_SCHEME': 'http',
- 'SERVER_HOSTNAME': 'localhost',
- 'FEATURE_MAILING': mailing_feature,
-
- 'SOMEOIDC_LOGIN_CONFIG': {
- 'CLIENT_ID': 'foo',
- 'CLIENT_SECRET': 'bar',
- 'SERVICE_NAME': 'Some Cool Service',
- 'SERVICE_ICON': 'http://some/icon',
- 'OIDC_SERVER': 'http://fakeoidc',
- 'DEBUGGING': True,
- },
-
- 'ANOTHEROIDC_LOGIN_CONFIG': {
- 'CLIENT_ID': 'foo',
- 'CLIENT_SECRET': 'bar',
- 'SERVICE_NAME': 'Some Other Service',
- 'SERVICE_ICON': 'http://some/icon',
- 'OIDC_SERVER': 'http://fakeoidc',
- 'LOGIN_SCOPES': ['openid'],
- 'DEBUGGING': True,
- },
-
- 'OIDCWITHPARAMS_LOGIN_CONFIG': {
- 'CLIENT_ID': 'foo',
- 'CLIENT_SECRET': 'bar',
- 'SERVICE_NAME': 'Some Other Service',
- 'SERVICE_ICON': 'http://some/icon',
- 'OIDC_SERVER': 'http://fakeoidc',
- 'DEBUGGING': True,
- 'OIDC_ENDPOINT_CUSTOM_PARAMS': {
- 'authorization_endpoint': {
- 'some': 'param',
- },
- },
- },
-
- 'HTTPCLIENT': http_client,
- }
-
-@pytest.fixture()
-def oidc_service(app_config):
- return OIDCLoginService(app_config, 'SOMEOIDC_LOGIN_CONFIG')
-
-@pytest.fixture()
-def another_oidc_service(app_config):
- return OIDCLoginService(app_config, 'ANOTHEROIDC_LOGIN_CONFIG')
-
-@pytest.fixture()
-def oidc_withparams_service(app_config):
- return OIDCLoginService(app_config, 'OIDCWITHPARAMS_LOGIN_CONFIG')
-
-@pytest.fixture()
-def discovery_content(userinfo_supported):
- return {
- 'scopes_supported': ['openid', 'profile', 'somescope'],
- 'authorization_endpoint': 'http://fakeoidc/authorize',
- 'token_endpoint': 'http://fakeoidc/token',
- 'userinfo_endpoint': 'http://fakeoidc/userinfo' if userinfo_supported else None,
- 'jwks_uri': 'http://fakeoidc/jwks',
- }
-
-@pytest.fixture()
-def userinfo_content(preferred_username, email_verified):
- return {
- 'sub': 'cooluser',
- 'preferred_username': preferred_username,
- 'email': 'foo@example.com',
- 'email_verified': email_verified,
- }
-
-@pytest.fixture()
-def id_token(oidc_service, signing_key, userinfo_content, app_config):
- token_data = {
- 'iss': oidc_service.config['OIDC_SERVER'],
- 'aud': oidc_service.client_id(),
- 'nbf': int(time.time()),
- 'iat': int(time.time()),
- 'exp': int(time.time() + 600),
- 'sub': 'cooluser',
- }
-
- token_data.update(userinfo_content)
-
- token_headers = {
- 'kid': signing_key['id'],
- }
-
- return jwt.encode(token_data, signing_key['private_key'], 'RS256', headers=token_headers)
-
-@pytest.fixture()
-def discovery_handler(discovery_content):
- @urlmatch(netloc=r'fakeoidc', path=r'.+openid.+')
- def handler(_, __):
- return json.dumps(discovery_content)
-
- return handler
-
-@pytest.fixture()
-def authorize_handler(discovery_content):
- @urlmatch(netloc=r'fakeoidc', path=r'/authorize')
- def handler(_, request):
- parsed = urlparse.urlparse(request.url)
- params = urlparse.parse_qs(parsed.query)
- return json.dumps({'authorized': True, 'scope': params['scope'][0], 'state': params['state'][0]})
-
- return handler
-
-@pytest.fixture()
-def token_handler(oidc_service, id_token, valid_code):
- @urlmatch(netloc=r'fakeoidc', path=r'/token')
- def handler(_, request):
- params = urlparse.parse_qs(request.body)
- if params.get('redirect_uri')[0] != 'http://localhost/oauth2/someoidc/callback':
- return {'status_code': 400, 'content': 'Invalid redirect URI'}
-
- if params.get('client_id')[0] != oidc_service.client_id():
- return {'status_code': 401, 'content': 'Invalid client id'}
-
- if params.get('client_secret')[0] != oidc_service.client_secret():
- return {'status_code': 401, 'content': 'Invalid client secret'}
-
- if params.get('code')[0] != valid_code:
- return {'status_code': 401, 'content': 'Invalid code'}
-
- if params.get('grant_type')[0] != 'authorization_code':
- return {'status_code': 400, 'content': 'Invalid authorization type'}
-
- content = {
- 'access_token': 'sometoken',
- 'id_token': id_token,
- }
- return {'status_code': 200, 'content': json.dumps(content)}
-
- return handler
-
-@pytest.fixture()
-def jwks_handler(signing_key):
- def jwk_with_kid(kid, jwk):
- jwk = jwk.copy()
- jwk.update({'kid': kid})
- return jwk
-
- @urlmatch(netloc=r'fakeoidc', path=r'/jwks')
- def handler(_, __):
- content = {'keys': [jwk_with_kid(signing_key['id'], signing_key['jwk'])]}
- return {'status_code': 200, 'content': json.dumps(content)}
-
- return handler
-
-@pytest.fixture()
-def emptykeys_jwks_handler():
- @urlmatch(netloc=r'fakeoidc', path=r'/jwks')
- def handler(_, __):
- content = {'keys': []}
- return {'status_code': 200, 'content': json.dumps(content)}
-
- return handler
-
-@pytest.fixture
-def userinfo_handler(oidc_service, userinfo_content):
- @urlmatch(netloc=r'fakeoidc', path=r'/userinfo')
- def handler(_, req):
- if req.headers.get('Authorization') != 'Bearer sometoken':
- return {'status_code': 401, 'content': 'Missing expected header'}
-
- return {'status_code': 200, 'content': json.dumps(userinfo_content)}
-
- return handler
-
-@pytest.fixture()
-def invalidsub_userinfo_handler(oidc_service):
- @urlmatch(netloc=r'fakeoidc', path=r'/userinfo')
- def handler(_, __):
- content = {
- 'sub': 'invalidsub',
- }
-
- return {'status_code': 200, 'content': json.dumps(content)}
-
- return handler
-
-
-def test_basic_config(oidc_service):
- assert oidc_service.service_id() == 'someoidc'
- assert oidc_service.service_name() == 'Some Cool Service'
- assert oidc_service.get_icon() == 'http://some/icon'
-
-def test_discovery(oidc_service, http_client, discovery_content, discovery_handler):
- with HTTMock(discovery_handler):
- auth = discovery_content['authorization_endpoint'] + '?response_type=code'
- assert oidc_service.authorize_endpoint().to_url() == auth
- assert oidc_service.token_endpoint().to_url() == discovery_content['token_endpoint']
-
- if discovery_content['userinfo_endpoint'] is None:
- assert oidc_service.user_endpoint() is None
- else:
- assert oidc_service.user_endpoint().to_url() == discovery_content['userinfo_endpoint']
-
- assert set(oidc_service.get_login_scopes()) == set(discovery_content['scopes_supported'])
-
-def test_discovery_with_params(oidc_withparams_service, http_client, discovery_content, discovery_handler):
- with HTTMock(discovery_handler):
- assert 'some=param' in oidc_withparams_service.authorize_endpoint().to_url()
-
-def test_filtered_discovery(another_oidc_service, http_client, discovery_content, discovery_handler):
- with HTTMock(discovery_handler):
- assert another_oidc_service.get_login_scopes() == ['openid']
-
-def test_public_config(oidc_service, discovery_handler):
- with HTTMock(discovery_handler):
- assert oidc_service.get_public_config()['OIDC']
- assert oidc_service.get_public_config()['CLIENT_ID'] == 'foo'
-
- assert 'CLIENT_SECRET' not in oidc_service.get_public_config()
- assert 'bar' not in oidc_service.get_public_config().values()
-
-def test_auth_url(oidc_service, discovery_handler, http_client, authorize_handler):
- config = {'PREFERRED_URL_SCHEME': 'https', 'SERVER_HOSTNAME': 'someserver'}
-
- with HTTMock(discovery_handler, authorize_handler):
- url_scheme_and_hostname = URLSchemeAndHostname.from_app_config(config)
- auth_url = oidc_service.get_auth_url(url_scheme_and_hostname, '', 'some csrf token', ['one', 'two'])
-
- # Hit the URL and ensure it works.
- result = http_client.get(auth_url).json()
- assert result['state'] == 'some csrf token'
- assert result['scope'] == 'one two'
-
-def test_exchange_code_invalidcode(oidc_service, discovery_handler, app_config, http_client,
- token_handler):
- with HTTMock(token_handler, discovery_handler):
- with pytest.raises(OAuthLoginException):
- oidc_service.exchange_code_for_login(app_config, http_client, 'testcode', '')
-
-def test_exchange_code_invalidsub(oidc_service, discovery_handler, app_config, http_client,
- token_handler, invalidsub_userinfo_handler, jwks_handler,
- valid_code, userinfo_supported):
- # Skip when userinfo is not supported.
- if not userinfo_supported:
- return
-
- with HTTMock(jwks_handler, token_handler, invalidsub_userinfo_handler, discovery_handler):
- # Should fail because the sub of the user info doesn't match that returned by the id_token.
- with pytest.raises(OAuthLoginException):
- oidc_service.exchange_code_for_login(app_config, http_client, valid_code, '')
-
-def test_exchange_code_missingkey(oidc_service, discovery_handler, app_config, http_client,
- token_handler, userinfo_handler, emptykeys_jwks_handler,
- valid_code):
- with HTTMock(emptykeys_jwks_handler, token_handler, userinfo_handler, discovery_handler):
- # Should fail because the key is missing.
- with pytest.raises(OAuthLoginException):
- oidc_service.exchange_code_for_login(app_config, http_client, valid_code, '')
-
-def test_exchange_code_validcode(oidc_service, discovery_handler, app_config, http_client,
- token_handler, userinfo_handler, jwks_handler, valid_code,
- preferred_username, mailing_feature, email_verified):
- with HTTMock(jwks_handler, token_handler, userinfo_handler, discovery_handler):
- if mailing_feature and not email_verified:
- # Should fail because there isn't a verified email address.
- with pytest.raises(OAuthLoginException):
- oidc_service.exchange_code_for_login(app_config, http_client, valid_code, '')
- else:
- # Should succeed.
- lid, lusername, lemail = oidc_service.exchange_code_for_login(app_config, http_client,
- valid_code, '')
-
- assert lid == 'cooluser'
-
- if email_verified:
- assert lemail == 'foo@example.com'
- else:
- assert lemail is None
-
- if preferred_username is not None:
- if preferred_username.find('@') >= 0:
- preferred_username = preferred_username[0:preferred_username.find('@')]
-
- assert lusername == preferred_username
- else:
- assert lusername == lid
diff --git a/package.json b/package.json
index 4d9ce6da0..36de3d949 100644
--- a/package.json
+++ b/package.json
@@ -4,18 +4,9 @@
"private": true,
"version": "1.0.0",
"scripts": {
- "dev": "karma start --browsers ChromeHeadless",
- "test": "karma start --single-run --browsers ChromeNoSandbox",
- "test:node": "JASMINE_CONFIG_PATH=static/test/jasmine.json jasmine-ts './static/js/**/*.spec.ts'",
- "e2e": "ts-node ./node_modules/.bin/protractor static/test/protractor.conf.ts",
- "build": "npm run clean && NODE_ENV=production webpack --mode=production --progress",
- "watch": "npm run clean && webpack --watch --mode=development",
- "lint": "tslint --type-check -p tsconfig.json -e **/*.spec.ts",
- "analyze": "npm run clean && NODE_ENV=production webpack --profile --mode=production --json > static/build/stats.json && webpack-bundle-analyzer --mode static -r static/build/report.html static/build/stats.json",
- "clean": "rm -f static/build/*",
- "clean-config-app": "rm -f config_app/static/build/*",
- "watch-config-app": "npm run clean-config-app && cd config_app && webpack --watch",
- "build-config-app": "npm run clean-config-app && cd config_app && NODE_ENV=production webpack --mode=production --progress"
+ "test": "./node_modules/.bin/karma start --single-run --browsers PhantomJS",
+ "build": "./node_modules/.bin/webpack --progress -p -v",
+ "watch": "./node_modules/.bin/webpack --watch"
},
"repository": {
"type": "git",
@@ -23,65 +14,46 @@
},
"homepage": "https://github.com/coreos-inc/quay#readme",
"dependencies": {
- "angular": "1.6.2",
- "angular-animate": "1.6.2",
- "angular-cookies": "1.6.2",
- "angular-route": "1.6.2",
- "angular-sanitize": "1.6.2",
+ "@types/angular": "1.5.16",
+ "@types/react": "0.14.39",
+ "@types/react-dom": "0.14.17",
+ "angular": "1.5.3",
+ "angular-animate": "^1.5.3",
+ "angular-cookies": "^1.5.3",
+ "angular-route": "^1.5.3",
+ "angular-sanitize": "^1.5.3",
"bootbox": "^4.1.0",
"bootstrap": "^3.3.2",
"bootstrap-datepicker": "^1.6.4",
"cal-heatmap": "^3.3.10",
- "clipboard": "^1.6.1",
- "core-js": "^2.4.1",
"d3": "^3.3.3",
"eonasdan-bootstrap-datetimepicker": "^4.17.43",
- "file-saver": "^1.3.3",
- "highlight.js": "^9.12.0",
"jquery": "1.12.4",
- "ng-metadata": "^4.0.1",
"raven-js": "^3.1.0",
+ "react": "^15.3.2",
+ "react-dom": "^15.3.2",
"restangular": "^1.2.0",
- "rxjs": "5.5.7",
- "showdown": "^1.6.4",
- "underscore": "^1.5.2",
- "urijs": "^1.18.10",
- "url-parse": "^1.4.0",
- "zeroclipboard": "^2.3.0"
+ "underscore": "^1.5.2"
},
"devDependencies": {
- "@types/angular": "1.6.2",
- "@types/angular-mocks": "^1.5.8",
- "@types/angular-route": "^1.3.3",
- "@types/angular-sanitize": "^1.3.4",
- "@types/core-js": "^0.9.39",
- "@types/jasmine": "^2.5.41",
- "@types/jquery": "^2.0.40",
- "@types/showdown": "^1.4.32",
- "angular-mocks": "1.6.2",
- "css-loader": "0.25.0",
- "html-loader": "^0.4.5",
+ "angular-mocks": "^1.5.3",
"jasmine-core": "^2.5.2",
- "jasmine-ts": "0.0.3",
- "karma": "^1.7.0",
- "karma-chrome-launcher": "^2.1.1",
+ "karma": "^0.13.22",
+ "karma-chrome-launcher": "^2.0.0",
"karma-coverage": "^0.5.5",
"karma-es6-shim": "^1.0.0",
"karma-jasmine": "^0.3.8",
+ "karma-phantomjs-launcher": "^1.0.0",
"karma-webpack": "^1.8.1",
- "ngtemplate-loader": "^1.3.1",
- "protractor": "^5.1.2",
- "script-loader": "^0.7.0",
+ "css-loader": "0.25.0",
+ "node-sass": "3.10.1",
+ "sass-loader": "4.0.2",
"source-map-loader": "0.1.5",
"style-loader": "0.13.1",
- "terser-webpack-plugin": "^2.1.2",
- "ts-loader": "6.2.0",
- "ts-mocks": "^0.2.2",
- "ts-node": "^3.0.6",
- "tslint": "^5.4.3",
- "typescript": "3.6.3",
- "webpack": "4.41.0",
- "webpack-bundle-analyzer": "3.5.2",
- "webpack-cli": "3.3.9"
+ "phantomjs-prebuilt": "^2.1.7",
+ "ts-loader": "0.9.5",
+ "typescript": "2.0.3",
+ "typings": "1.4.0",
+ "webpack": "1.13.3"
}
}
diff --git a/path_converters.py b/path_converters.py
deleted file mode 100644
index cd5740186..000000000
--- a/path_converters.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from werkzeug.routing import BaseConverter
-
-import features
-
-
-class APIRepositoryPathConverter(BaseConverter):
- """ Converter for handling repository paths. Does not handle library paths.
- """
- def __init__(self, url_map):
- super(APIRepositoryPathConverter, self).__init__(url_map)
- self.weight = 200
- self.regex = r'([^/]+/[^/]+)'
-
-
-class RepositoryPathConverter(BaseConverter):
- """ Converter for handling repository paths. Handles both library and non-library paths (if
- configured).
- """
- def __init__(self, url_map):
- super(RepositoryPathConverter, self).__init__(url_map)
- self.weight = 200
-
- if features.LIBRARY_SUPPORT:
- # Allow names without namespaces.
- self.regex = r'[^/]+(/[^/]+)?'
- else:
- self.regex = r'([^/]+/[^/]+)'
-
-
-class RegexConverter(BaseConverter):
- """ Converter for handling custom regular expression patterns in paths. """
- def __init__(self, url_map, regex_value):
- super(RegexConverter, self).__init__(url_map)
- self.regex = regex_value
diff --git a/project_quay_logo.png b/project_quay_logo.png
deleted file mode 100644
index 8ad64a11b..000000000
Binary files a/project_quay_logo.png and /dev/null differ
diff --git a/quay-base.dockerfile b/quay-base.dockerfile
deleted file mode 100644
index f23d12d81..000000000
--- a/quay-base.dockerfile
+++ /dev/null
@@ -1,104 +0,0 @@
-# vim:ft=dockerfile
-
-FROM phusion/baseimage:0.10.0
-
-ENV DEBIAN_FRONTEND noninteractive
-ENV HOME /root
-ENV QUAYDIR /quay-registry
-ENV QUAYCONF /quay-registry/conf
-ENV QUAYPATH "."
-
-RUN mkdir $QUAYDIR
-WORKDIR $QUAYDIR
-
-# This is so we don't break http golang/go#17066
-# When Ubuntu has nginx >= 1.11.0 we can switch back.
-RUN add-apt-repository ppa:nginx/development
-
-# Add Yarn repository until it is officially added to Ubuntu
-RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
- && echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
-RUN curl -sL https://deb.nodesource.com/setup_8.x | bash -
-# Install system packages
-RUN apt-get update && apt-get upgrade -y \
- && apt-get install -y \
- dnsmasq \
- g++ \
- gdb \
- gdebi-core \
- git \
- jpegoptim \
- libevent-2.0.5 \
- libevent-dev \
- libffi-dev \
- libfreetype6-dev \
- libgpgme11 \
- libgpgme11-dev \
- libjpeg62 \
- libjpeg62-dev \
- libjpeg8 \
- libldap-2.4-2 \
- libldap2-dev \
- libmagic1 \
- libpq-dev \
- libpq5 \
- libsasl2-dev \
- libsasl2-modules \
- memcached \
- nginx \
- nodejs \
- optipng \
- openssl \
- python-dbg \
- python-dev \
- python-pip \
- python-virtualenv \
- yarn=0.22.0-1 \
- w3m # 27MAR2018
-
-# Install cfssl
-RUN mkdir /gocode
-ENV GOPATH /gocode
-RUN curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz && \
- tar -xvf go1.10.linux-amd64.tar.gz && \
- mv go /usr/local && \
- rm -rf go1.10.linux-amd64.tar.gz && \
- /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssl && \
- /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssljson && \
- cp /gocode/bin/cfssljson /bin/cfssljson && \
- cp /gocode/bin/cfssl /bin/cfssl && \
- rm -rf /gocode && rm -rf /usr/local/go
-
-# Install jwtproxy
-RUN curl -L -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.1/jwtproxy-linux-x64 \
- && chmod +x /usr/local/bin/jwtproxy
-
-# Install prometheus-aggregator
-RUN curl -L -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator \
- && chmod +x /usr/local/bin/prometheus-aggregator
-
-# Install python dependencies
-COPY requirements.txt requirements-tests.txt ./
-RUN virtualenv --distribute venv \
- && venv/bin/pip install -r requirements.txt \
- && venv/bin/pip install -r requirements-tests.txt \
- && venv/bin/pip freeze
-
-# Install front-end dependencies
-COPY static/ package.json tsconfig.json webpack.config.js tslint.json yarn.lock ./
-RUN yarn install --ignore-engines
-
-
-RUN mkdir -p /etc/my_init.d /etc/systlog-ng /usr/local/bin $QUAYDIR/static/fonts $QUAYDIR/static/ldn /usr/local/nginx/logs/
-
-COPY external_libraries.py _init.py ./
-
-RUN venv/bin/python -m external_libraries
-
-RUN rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /root/.cache
-VOLUME ["/var/log", "/datastorage", "/tmp"]
-
-RUN mkdir scripts
-ADD scripts/detect-config.sh scripts/.
-RUN ./scripts/detect-config.sh
-EXPOSE 443 8443 80
diff --git a/quay-entrypoint.sh b/quay-entrypoint.sh
deleted file mode 100755
index 59573b83a..000000000
--- a/quay-entrypoint.sh
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/usr/bin/env bash
-
-QUAYENTRY=${QUAYENTRY:=$1}
-QUAYENTRY=${QUAYENTRY:=registry}
-
-if ! whoami &> /dev/null; then
- if [ -w /etc/passwd ]; then
- echo "${USER_NAME:-default}:x:$(id -u):0:${USER_NAME:-default} user:${HOME}:/sbin/nologin" >> /etc/passwd
- fi
-fi
-
-display_usage() {
- echo "Usage: ${0} "
- echo
- echo "If the first argument isn't one of the above modes,"
- echo "the arguments will be exec'd directly, i.e.:"
- echo
- echo " ${0} uptime"
-}
-
-if [[ "${QUAYENTRY}" = "help" ]]
-then
- display_usage
- exit 0
-fi
-
-
-cat << "EOF"
- __ __
- / \ / \ ______ _ _ __ __ __
- / /\ / /\ \ / __ \ | | | | / \ \ \ / /
-/ / / / \ \ | | | | | | | | / /\ \ \ /
-\ \ \ \ / / | |__| | | |__| | / ____ \ | |
- \ \/ \ \/ / \_ ___/ \____/ /_/ \_\ |_|
- \__/ \__/ \ \__
- \___\ by Red Hat
-
- Build, Store, and Distribute your Containers
-
-
-EOF
-
-# Custom environment variables for use in conf/supervisord.conf
-# The gunicorn-registry process DB_CONNECTION_POOLING must default to true
-export DB_CONNECTION_POOLING_REGISTRY=${DB_CONNECTION_POOLING:-"true"}
-
-case "$QUAYENTRY" in
- "shell")
- echo "Entering shell mode"
- exec /usr/bin/scl enable python27 rh-nginx112 /bin/bash
- ;;
- "config")
- echo "Entering config mode, only copying config-app entrypoints"
- if [ -z "$2" ]
- then
- if [ -z "${CONFIG_APP_PASSWORD}" ]
- then
- echo "Missing password for configuration tool"
- exit
- else
- openssl passwd -apr1 "${CONFIG_APP_PASSWORD}" >> $QUAYDIR/config_app/conf/htpasswd
- fi
- else
- openssl passwd -apr1 "$2" >> $QUAYDIR/config_app/conf/htpasswd
- fi
-
- /usr/bin/scl enable python27 rh-nginx112 "${QUAYPATH}/config_app/init/certs_create.sh"
- /usr/bin/scl enable python27 rh-nginx112 "supervisord -c ${QUAYPATH}/config_app/conf/supervisord.conf 2>&1"
- ;;
- "migrate")
- echo "Entering migration mode to version: ${2}"
- exec /usr/bin/scl enable python27 rh-nginx112 "PYTHONPATH=${QUAYPATH} ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields alembic upgrade ${2}"
- ;;
- "repomirror")
- echo "Entering repository mirroring mode"
- if [ -z "${QUAY_SERVICES}" ]
- then
- export QUAY_SERVICES=repomirrorworker,prometheus-aggregator
- else
- export QUAY_SERVICES=${QUAY_SERVICES},repomirrorworker,prometheus-aggregator
- fi
- ;&
- "registry")
- if [ -z "${QUAY_SERVICES}" ]
- then
- echo "Running all default registry services"
- else
- echo "Running services ${QUAY_SERVICES}"
- fi
- for f in $(ls ${QUAYCONF}/init/*.sh); do
- echo "Running init script '$f'"
- /usr/bin/scl enable python27 rh-nginx112 "$f" || exit -1;
- done
- /usr/bin/scl enable python27 rh-nginx112 "supervisord -c ${QUAYCONF}/supervisord.conf 2>&1"
- ;;
- *)
- echo "Running '$QUAYENTRY'"
- /usr/bin/scl enable python27 rh-nginx112 "$QUAYENTRY" || exit -1;
- ;;
-esac
-
diff --git a/registry.py b/registry.py
index 0947f00c9..df868242c 100644
--- a/registry.py
+++ b/registry.py
@@ -1,19 +1,17 @@
-# NOTE: Must be before we import or call anything that may be synchronous.
-from gevent import monkey
-monkey.patch_all()
-
-import endpoints.decorated # Note: We need to import this module to make sure the decorators are registered.
-import features
+import logging
+import logging.config
+import os
from app import app as application
-from endpoints.appr import appr_bp, registry # registry needed to ensure routes registered
+# Note: We need to import this module to make sure the decorators are registered.
+import endpoints.decorated
+
from endpoints.v1 import v1_bp
from endpoints.v2 import v2_bp
+if os.environ.get('DEBUGLOG') == 'true':
+ logging.config.fileConfig('conf/logging_debug.conf', disable_existing_loggers=False)
application.register_blueprint(v1_bp, url_prefix='/v1')
application.register_blueprint(v2_bp, url_prefix='/v2')
-
-if features.APP_REGISTRY:
- application.register_blueprint(appr_bp, url_prefix='/cnr')
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 6165eb863..916966dbc 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -2,5 +2,3 @@ ipython
pylint
ipdb
tqdm
-yapf==0.15.2
-ffctl>=0.1.2
diff --git a/requirements-nover.txt b/requirements-nover.txt
index 6ec23a485..80a9efcb0 100644
--- a/requirements-nover.txt
+++ b/requirements-nover.txt
@@ -5,64 +5,51 @@
-e git+https://github.com/NateFerrero/oauth2lib.git#egg=oauth2lib
-e git+https://github.com/coreos/mockldap.git@v0.1.x#egg=mockldap
-e git+https://github.com/coreos/py-bitbucket.git#egg=py-bitbucket
+-e git+https://github.com/coreos/pyapi-gitlab.git@timeout#egg=pyapi-gitlab
-e git+https://github.com/coreos/resumablehashlib.git#egg=resumablehashlib
-e git+https://github.com/jepcastelein/marketo-rest-python.git#egg=marketorestpython
--e git+https://github.com/app-registry/appr-server.git@c2ef3b88afe926a92ef5f2e11e7d4a259e286a17#egg=cnr_server # naming has changed
--e git+https://github.com/DevTable/boto.git@a6a5c00bd199b1492e99199251b10451970b5b08#egg=boto
-APScheduler
+APScheduler==3.0.5
Flask-Login
Flask-Mail
Flask-Principal
-Flask-Testing
+-e git+https://github.com/jarus/flask-testing.git#egg=Flask-Testing
PyGithub
aiowsgi
alembic
autobahn==0.9.3-3
-azure-common
-azure-storage-blob
beautifulsoup4
bencode
bintrees
-bitmath
-boto3
-cachetools
+boto
+cachetools==1.1.6
cryptography
-elasticsearch>=7.0.4,<8.0.0
-elasticsearch-dsl>=7.0.0,<8.0.0
flask
-flask-restful
-geoip2
+flask-restful==0.2.12
gevent
gipc
-gunicorn
+gunicorn<19.0
hiredis
html5lib==0.9999999 # pinned due to xhtml2pdf
httmock
jsonpath-rw
jsonschema
-kafka-python
marisa-trie
mixpanel
mock
-moto
+moto==0.4.25 # remove when 0.4.28+ is out
namedlist
-netaddr
pathvalidate
-peewee
-prometheus-client
+peewee==2.8.1
psutil
-psycopg2-binary
-pyasn1
+psycopg2
py-bcrypt
pyOpenSSL
pycryptodome
pygpgme
pyjwkest
pyjwt
-pymemcache
-pymysql
+pymysql==0.6.7 # Remove version when baseimage has Python 2.7.9+
python-dateutil
-python-gitlab
python-keystoneclient
python-ldap
python-magic
@@ -72,18 +59,12 @@ raven
redis
redlock
reportlab==2.7
-requests-aws4auth
semantic-version
sqlalchemy
stringscore
stripe
-supervisor
-supervisor-stdout
-tldextract
toposort
trollius
tzlocal
xhtml2pdf
recaptcha2
-mockredispy
-yapf
diff --git a/requirements-tests.txt b/requirements-tests.txt
index 87db9ed2a..bb613f9cc 100644
--- a/requirements-tests.txt
+++ b/requirements-tests.txt
@@ -1,11 +1,8 @@
--e git+https://github.com/ant31/pytest-sugar.git#egg=pytest-sugar
-backports.tempfile
-freezegun==0.3.12
-mockldap
pytest
pytest-cov
+python-coveralls
pytest-flask
pytest-runner
-pytest-timeout
pytest-xdist
-python-coveralls
+pytest-timeout
+-e git+https://github.com/ant31/pytest-sugar.git#egg=pytest-sugar
diff --git a/requirements.txt b/requirements.txt
index 4a3e317d0..3ff9ac3fc 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,187 +1,125 @@
--e git+https://github.com/app-registry/appr-server.git@c2ef3b88afe926a92ef5f2e11e7d4a259e286a17#egg=cnr_server
--e git+https://github.com/coreos/mockldap.git@59a46efbe8c7cd8146a87a7c4f2b09746b953e11#egg=mockldap
--e git+https://github.com/coreos/py-bitbucket.git@55a1ada645f2fb6369147996ec71edd7828d91c8#egg=py_bitbucket
--e git+https://github.com/coreos/resumablehashlib.git@b1b631249589b07adf40e0ee545b323a501340b4#egg=resumablehashlib
+aiowsgi==0.6
+alembic==0.8.8
-e git+https://github.com/DevTable/aniso8601-fake.git@bd7762c7dea0498706d3f57db60cd8a8af44ba90#egg=aniso8601
-e git+https://github.com/DevTable/anunidecode.git@d59236a822e578ba3a0e5e5abbd3855873fa7a88#egg=anunidecode
--e git+https://github.com/DevTable/boto.git@a6a5c00bd199b1492e99199251b10451970b5b08#egg=boto
--e git+https://github.com/DevTable/container-cloud-config.git@44e06879a710661f01054b300dc78a12a268b6b5#egg=container_cloud_config
--e git+https://github.com/DevTable/python-etcd.git@f1168cb02a2a8c83bec1108c6fcd8615ef463b14#egg=python_etcd
--e git+https://github.com/jarus/flask-testing.git@17f19d7fee0e1e176703fc7cb04917a77913ba1a#egg=Flask_Testing
--e git+https://github.com/jepcastelein/marketo-rest-python.git@d151205aaa70fa69c14cf3ed5d8f3162e99b8236#egg=marketorestpython
--e git+https://github.com/NateFerrero/oauth2lib.git@d161b010f8a596826050a09e5e94d59443cc12d9#egg=oauth2lib
-aiowsgi==0.7
-alembic==1.1.0
-APScheduler==3.6.1
-asn1crypto==0.24.0
-attrs==19.1.0
+APScheduler==3.0.5
autobahn==0.9.3.post3
-aws-sam-translator==1.14.0
-aws-xray-sdk==2.4.2
-azure-common==1.1.23
-azure-nspkg==3.0.2
-azure-storage-blob==2.1.0
-azure-storage-common==2.1.0
-azure-storage-nspkg==3.1.0
-Babel==2.7.0
-backoff==1.8.0
-backports.functools-lru-cache==1.5
-backports.ssl-match-hostname==3.7.0.1
-backports.tempfile==1.0
-backports.weakref==1.0.post1
-beautifulsoup4==4.8.0
+Babel==2.3.4
+beautifulsoup4==4.5.1
bencode==1.0
-bintrees==2.0.7
-bitmath==1.3.3.1
+bintrees==2.0.4
blinker==1.4
-boto3==1.9.228
-botocore==1.12.228
-cachetools==3.1.1
-certifi==2019.6.16
-cffi==1.12.3
-cfn-lint==0.24.1
-chardet==3.0.4
-Click==7.0
-contextlib2==0.5.5
-cookies==2.2.1
-cryptography==2.7
-DateTime==4.3
-debtcollector==1.22.0
-decorator==4.4.0
-Deprecated==1.2.6
-docker==4.0.2
-# docutils==0.15.2 # Disabled due to build conflict and GPL
-ecdsa==0.13.2
-elasticsearch-dsl==7.0.0
-elasticsearch==7.0.4
+boto==2.43.0
+cachetools==1.1.6
+cffi==1.8.3
+click==6.6
+-e git+https://github.com/DevTable/container-cloud-config.git@bce675537904175f6975024a4c89269027ea6792#egg=container_cloud_config
+contextlib2==0.5.4
+cryptography==1.5.2
+debtcollector==1.8.0
+decorator==4.0.10
enum34==1.1.6
-Flask-Cors==3.0.8
-Flask-Login==0.4.1
+Flask==0.11.1
+Flask-Login==0.3.2
Flask-Mail==0.9.1
Flask-Principal==0.4.0
-Flask-RESTful==0.3.7
-Flask==1.1.1
+Flask-RESTful==0.2.12
+-e git+https://github.com/jarus/flask-testing.git@d60d431b3f2bc1b4b335579633e65978efa0a755#egg=Flask-Testing
funcparserlib==0.3.6
funcsigs==1.0.2
functools32==3.2.3.post2
-furl==2.0.0
-future==0.17.1
-futures==3.3.0
-geoip2==2.9.0
-gevent==1.4.0
-gipc==1.0.1
-greenlet==0.4.15
-gunicorn==19.9.0
-hiredis==1.0.0
+future==0.15.2
+futures==3.0.5
+gevent==1.1.2
+gipc==0.6.0
+greenlet==0.4.10
+gunicorn==18.0
+hiredis==0.2.0
html5lib==0.9999999
-httmock==1.3.0
+httmock==1.2.5
httpretty==0.8.10
-idna==2.8
-ipaddress==1.0.22
-iso8601==0.1.12
-itsdangerous==1.1.0
-Jinja2==2.10.1
-jmespath==0.9.4
-jsondiff==1.1.2
-jsonpatch==1.24
+idna==2.1
+ipaddress==1.0.17
+iso8601==0.1.11
+itsdangerous==0.24
+Jinja2==2.8
jsonpath-rw==1.4.0
-jsonpickle==1.2
-jsonpointer==2.0
-jsonschema==3.0.2
-kafka-python==1.4.6
-keystoneauth1==3.17.1
-Mako==1.1.0
-marisa-trie==0.7.5
-MarkupSafe==1.1.1
-maxminddb==1.4.1
-meld3==2.0.0
-mixpanel==4.5.0
-mock==3.0.5
-mockredispy==2.9.3
-monotonic==1.5
-moto==1.3.13
-msgpack==0.6.1
+jsonschema==2.5.1
+keystoneauth1==2.14.0
+Mako==1.0.4
+marisa-trie==0.7.2
+-e git+https://github.com/jepcastelein/marketo-rest-python.git@1ba6dfee030b192f0930dd8c3b6d53b52d886c65#egg=marketorestpython-master
+MarkupSafe==0.23
+mixpanel==4.3.1
+mock==2.0.0
+-e git+https://github.com/coreos/mockldap.git@59a46efbe8c7cd8146a87a7c4f2b09746b953e11#egg=mockldap
+monotonic==1.2
+moto==0.4.25
+msgpack-python==0.4.8
namedlist==1.7
-ndg-httpsclient==0.5.1
-netaddr==0.7.19
-netifaces==0.10.9
-oauthlib==3.1.0
-orderedmultidict==1.0.1
-os-service-types==1.7.0
-oslo.config==6.11.1
-oslo.i18n==3.24.0
-oslo.serialization==2.29.2
-oslo.utils==3.41.1
-pathlib2==2.3.4
-pathvalidate==0.29.0
-pbr==5.4.3
-peewee==3.11.2
-Pillow==6.1.0
-ply==3.11
-prometheus-client==0.7.1
-psutil==5.6.3
-psycopg2-binary==2.8.3
+ndg-httpsclient==0.4.2
+netaddr==0.7.18
+netifaces==0.10.5
+-e git+https://github.com/NateFerrero/oauth2lib.git@d161b010f8a596826050a09e5e94d59443cc12d9#egg=oauth2lib
+oauthlib==2.0.0
+oslo.config==3.17.0
+oslo.i18n==3.9.0
+oslo.serialization==2.13.0
+oslo.utils==3.16.0
+pathvalidate==0.13.0
+pbr==1.10.0
+peewee==2.8.1
+Pillow==3.4.2
+ply==3.9
+positional==1.1.1
+psutil==4.3.1
+psycopg2==2.6.2
py-bcrypt==0.4
-pyasn1-modules==0.2.6
-pyasn1==0.4.7
-pycparser==2.19
-pycryptodome==3.9.0
-pycryptodomex==3.9.0
-PyGithub==1.43.8
+-e git+https://github.com/coreos/py-bitbucket.git@07a80f63388d004f05f58441983bdf195f9b666e#egg=py_bitbucket
+-e git+https://github.com/coreos/pyapi-gitlab.git@136c3970d591136a4f766a846c5d22aad52e124f#egg=pyapi_gitlab
+pyasn1==0.1.9
+pycparser==2.16
+pycryptodome==3.4.3
+pycryptodomex==3.4.3
+PyGithub==1.29
pygpgme==0.3
-pyjwkest==1.4.2
-PyJWT==1.7.1
-pymemcache==2.2.2
-PyMySQL==0.9.3
-pyOpenSSL==19.0.0
-pyparsing==2.4.2
+pyjwkest==1.3.1
+PyJWT==1.4.2
+PyMySQL==0.6.7
+pyOpenSSL==16.2.0
+pyparsing==2.1.10
PyPDF2==1.26.0
-pyrsistent==0.15.4
-python-dateutil==2.8.0
-python-editor==1.0.4
-python-gitlab==1.11.0
-python-jose==3.0.1
-python-keystoneclient==3.21.0
-python-ldap==3.2.0
-python-magic==0.4.15
-python-swiftclient==3.8.0
-pytz==2019.2
-PyYAML==5.1.2
-raven==6.10.0
+python-dateutil==2.5.3
+python-editor==1.0.1
+-e git+https://github.com/DevTable/python-etcd.git@f1168cb02a2a8c83bec1108c6fcd8615ef463b14#egg=python_etcd
+python-keystoneclient==3.6.0
+python-ldap==2.4.27
+python-magic==0.4.12
+python-swiftclient==3.1.0
+pytz==2016.7
+PyYAML==3.12
+raven==5.29.0
recaptcha2==0.1
-redis==3.3.8
+redis==2.10.5
redlock==1.2.0
reportlab==2.7
-requests-aws4auth==0.9
-requests-file==1.4.3
-requests-oauthlib==1.2.0
-requests==2.22.0
-responses==0.10.6
-rfc3986==1.3.2
-rsa==4.0
-s3transfer==0.2.1
-scandir==1.10.0
-semantic-version==2.8.2
-six==1.12.0
-soupsieve==1.9.3
-SQLAlchemy==1.3.8
-sshpubkeys==3.1.0
-stevedore==1.31.0
+requests==2.11.1
+requests-oauthlib==0.7.0
+-e git+https://github.com/coreos/resumablehashlib.git@b1b631249589b07adf40e0ee545b323a501340b4#egg=resumablehashlib
+rfc3986==0.4.1
+semantic-version==2.6.0
+six==1.10.0
+SQLAlchemy==1.1.2
+stevedore==1.17.1
stringscore==0.1.0
-stripe==2.36.2
-supervisor-stdout==0.1.1
-supervisor==4.0.4
-tldextract==2.2.1
-toposort==1.5
-trollius==2.2.post1
-tzlocal==2.0.0
-urllib3==1.25.3
-waitress==1.3.1
-WebOb==1.8.5
-websocket-client==0.56.0
-Werkzeug==0.15.6
-wrapt==1.11.2
-xhtml2pdf==0.2.3
-xmltodict==0.12.0
-yapf==0.28.0
-zope.interface==4.6.0
+stripe==1.41.0
+toposort==1.4
+trollius==2.1
+tzlocal==1.3
+urllib3==1.18
+waitress==1.0.0
+WebOb==1.6.2
+Werkzeug==0.11.11
+wrapt==1.10.8
+xhtml2pdf==0.0.6
+xmltodict==0.10.2
diff --git a/scripts/app_sre_build_deploy.sh b/scripts/app_sre_build_deploy.sh
deleted file mode 100755
index d4b350aa1..000000000
--- a/scripts/app_sre_build_deploy.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# AppSRE team CD
-
-set -exv
-
-CURRENT_DIR=$(dirname $0)
-
-BASE_IMG="quay"
-QUAY_IMAGE="quay.io/app-sre/${BASE_IMG}"
-IMG="${BASE_IMG}:latest"
-
-GIT_HASH=`git rev-parse --short=7 HEAD`
-
-# build the image
-BUILD_CMD="docker build" IMG="$IMG" make app-sre-docker-build
-
-# push the image
-skopeo copy --dest-creds "${QUAY_USER}:${QUAY_TOKEN}" \
- "docker-daemon:${IMG}" \
- "docker://${QUAY_IMAGE}:latest"
-
-skopeo copy --dest-creds "${QUAY_USER}:${QUAY_TOKEN}" \
- "docker-daemon:${IMG}" \
- "docker://${QUAY_IMAGE}:${GIT_HASH}"
diff --git a/scripts/app_sre_pr_check.sh b/scripts/app_sre_pr_check.sh
deleted file mode 100755
index 9ede1ec38..000000000
--- a/scripts/app_sre_pr_check.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-set -exv
-
-BASE_IMG="quay"
-
-IMG="${BASE_IMG}:latest"
-
-BUILD_CMD="docker build" IMG="$IMG" make app-sre-docker-build
diff --git a/scripts/ci b/scripts/ci
deleted file mode 100755
index 80f803736..000000000
--- a/scripts/ci
+++ /dev/null
@@ -1,230 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-
-IMAGE="quay-ci"
-
-CACHE_DIR="${HOME}/docker"
-SHORT_SHA="${TRAVIS_COMMIT:0:7}"
-IMAGE_TAG="${SHORT_SHA}-${TRAVIS_BUILD_NUMBER}"
-IMAGE_TAR="${CACHE_DIR}/${IMAGE}-${IMAGE_TAG}.tar.gz"
-
-MYSQL_IMAGE="mysql:5.7"
-POSTGRES_IMAGE="postgres:9.6"
-POSTGRES_CONTAINER="test_postgres"
-
-export MYSQL_ROOT_PASSWORD="quay"
-export MYSQL_USER="quay"
-export MYSQL_PASSWORD="quay"
-export MYSQL_DATABASE="quay_ci"
-
-export POSTGRES_USER="quay"
-export POSTGRES_PASSWORD="quay"
-export POSTGRES_DB="quay_ci"
-
-
-build_image() {
- # Build the image and save it to the shared cache.
- docker build -t "${IMAGE}:${IMAGE_TAG}" .
-
- echo "Exporting Docker image to cache..."
- time (docker save "${IMAGE}:${IMAGE_TAG}" | gzip -2 > "${IMAGE_TAR}")
-}
-
-
-load_image() {
- # Load our cached Docker image.
- echo "Loading Docker image from cache..."
- time (zcat "${IMAGE_TAR}" | docker load)
-}
-
-
-clean_cache() {
- rm -f "${IMAGE_TAR}"
-}
-
-fail_clean() {
- if [[ $TRAVIS_TEST_RESULT -ne 0 ]]; then
- echo "Job failed. Cleaning cache..."
- clean_cache
- fi
-}
-
-quay_run() {
- docker run --net=host -e TEST_DATABASE_URI -ti "${IMAGE}:${IMAGE_TAG}" "$@"
-}
-
-
-unit() {
- MARK=${1:-"shard_1_of_1"}
- load_image && quay_run "make -f Makefile.ci unit-test PYTEST_MARK=$MARK"
-}
-
-
-registry() {
- MARK=${1:-"shard_1_of_1"}
- load_image && quay_run "make -f Makefile.ci registry-test PYTEST_MARK=$MARK"
-}
-
-
-registry_old() {
- load_image && quay_run "make -f Makefile.ci registry-test-old"
-}
-
-
-quay_ping() {
- curl --fail http://localhost:8080/v1/_internal_ping
-}
-
-gunicorn_test() {
- load_image
- docker tag "${IMAGE}:${IMAGE_TAG}" quay-ci-base
-
- echo "Building CI run image"
- docker build -t "${IMAGE}:${IMAGE_TAG}-ci-run" -f Dockerfile.cirun .
-
- echo "Running CI run image"
- docker run -d --name=quay-gunicorn-test --net=host -ti "${IMAGE}:${IMAGE_TAG}-ci-run"
-
- echo "Sleeping for CI image start"
- if ! (sleep 120 && quay_ping); then
- echo "Quay container logs:"
- docker log quay-gunicorn-test
- echo "Quay failed to respond in time."
- exit 1
- fi
-
- echo ""
- echo "Testing gunicorn"
- echo ""
- ./test/test_gunicorn_running.sh
-
- echo "Done testing gunicorn"
- echo ""
- docker kill quay-gunicorn-test
-}
-
-
-certs_test() {
- load_image && quay_run "make -f Makefile.ci certs-test"
-}
-
-
-mysql_ping() {
- mysqladmin --connect-timeout=2 --wait=60 --host=127.0.0.1 \
- --user=root --password="${MYSQL_ROOT_PASSWORD}" ping
-}
-
-
-mysql_start() {
- docker run --net=host -d -e MYSQL_ROOT_PASSWORD -e MYSQL_USER \
- -e MYSQL_PASSWORD -e MYSQL_DATABASE "${MYSQL_IMAGE}"
-
- if ! (sleep 20 && mysql_ping); then
- echo "MySQL failed to respond in time."
- exit 1
- fi
-}
-
-
-mysql() {
- MARK=${1:-"shard_1_of_1"}
- TEST_DATABASE_URI="mysql+pymysql://"
- TEST_DATABASE_URI+="${MYSQL_USER}:${MYSQL_PASSWORD}"
- TEST_DATABASE_URI+="@127.0.0.1/${MYSQL_DATABASE}"
-
- export TEST_DATABASE_URI
-
- load_image
- mysql_start
- quay_run "make -f Makefile.ci full-db-test PYTEST_MARK=$MARK"
-}
-
-
-postgres_ping() {
- pg_isready --timeout=30 --dbname="${TEST_DATABASE_URI}"
-}
-
-
-postgres_start() {
- docker run --name="${POSTGRES_CONTAINER}" --net=host -d -e POSTGRES_USER -e POSTGRES_PASSWORD \
- -e POSTGRES_DB "${POSTGRES_IMAGE}"
-
- if ! (sleep 10 && postgres_ping); then
- echo "PostgreSQL failed to respond in time."
- exit 1
- fi
-}
-
-
-postgres_init() {
- docker exec "${POSTGRES_CONTAINER}" psql -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" -c 'CREATE EXTENSION IF NOT EXISTS pg_trgm;'
-}
-
-
-postgres() {
- MARK=${1:-"shard_1_of_1"}
- TEST_DATABASE_URI="postgresql://"
- TEST_DATABASE_URI+="${POSTGRES_USER}:${POSTGRES_PASSWORD}"
- TEST_DATABASE_URI+="@127.0.0.1/${POSTGRES_DB}"
-
- export TEST_DATABASE_URI
-
- load_image
- postgres_start
- postgres_init
- quay_run "make -f Makefile.ci full-db-test PYTEST_MARK=$MARK"
-}
-
-
-case "$1" in
- build)
- build_image
- ;;
-
- unit)
- shift
- unit "$@"
- ;;
-
- registry)
- shift
- registry "$@"
- ;;
-
- registry_old)
- registry_old
- ;;
-
- certs_test)
- certs_test
- ;;
-
- gunicorn_test)
- gunicorn_test
- ;;
-
- mysql)
- shift
- mysql "$@"
- ;;
-
- postgres)
- shift
- postgres "$@"
- ;;
-
- fail-clean)
- fail_clean
- ;;
-
- clean)
- clean_cache
- ;;
-
- *)
- echo "Usage: $0 {build|unit|registry|registry_old|mysql|postgres|clean}"
- exit 1
- ;;
-esac
diff --git a/scripts/detect-config.sh b/scripts/detect-config.sh
deleted file mode 100755
index f426396c0..000000000
--- a/scripts/detect-config.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-if find . -name "config.yaml" -exec false {} +
-then
- exit 0
-else
- echo '!!! config.yaml found in container !!!'
- find . -name "config.yaml"
- exit -1
-fi
\ No newline at end of file
diff --git a/secscan.py b/secscan.py
index 0059547c7..a2ea9753f 100644
--- a/secscan.py
+++ b/secscan.py
@@ -1,9 +1,11 @@
-# NOTE: Must be before we import or call anything that may be synchronous.
-from gevent import monkey
-monkey.patch_all()
+import os
+import logging.config
from app import app as application
+
from endpoints.secscan import secscan
+if os.environ.get('DEBUGLOG') == 'true':
+ logging.config.fileConfig('conf/logging_debug.conf', disable_existing_loggers=False)
application.register_blueprint(secscan, url_prefix='/secscan')
diff --git a/setup.cfg b/setup.cfg
index 9e47e9a61..2710b40f0 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,5 +1,5 @@
[tool:pytest]
-norecursedirs = .* *.egg build dist docs bin boot core dev etc home lib lib64 media mnt node_modules opt proc root run sbin src srv sys tmp usr var venv
+norecursedirs = .* *.egg build dist docs bin boot core dev etc home lib lib64 media mnt node_modules opt proc root run sbin srv sys tmp usr var venv
testpaths = ./
python_files = **/test/test*.py
confcutdir = test
@@ -9,14 +9,4 @@ branch = True
[coverage:report]
omit =
- test/**
- venv/**
- **/test/**
-
-[pep8]
-ignore = E111,E114
-max-line-length = 100
-
-[flake8]
-ignore = E111,E114
-max-line-length = 100
+ test/*
diff --git a/static/502.html b/static/502.html
index 920c68dc8..ab9680ed4 100644
--- a/static/502.html
+++ b/static/502.html
@@ -17,6 +17,7 @@
|