diff --git a/_init.py b/_init.py index 84a574b0f..216f47e15 100644 --- a/_init.py +++ b/_init.py @@ -7,40 +7,45 @@ from util.config.provider import get_config_provider ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/")) -STATIC_DIR = os.path.join(ROOT_DIR, 'static/') -STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/') -STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/') -STATIC_WEBFONTS_DIR = os.path.join(STATIC_DIR, 'webfonts/') -TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/') +STATIC_DIR = os.path.join(ROOT_DIR, "static/") +STATIC_LDN_DIR = os.path.join(STATIC_DIR, "ldn/") +STATIC_FONTS_DIR = os.path.join(STATIC_DIR, "fonts/") +STATIC_WEBFONTS_DIR = os.path.join(STATIC_DIR, "webfonts/") +TEMPLATE_DIR = os.path.join(ROOT_DIR, "templates/") -IS_TESTING = 'TEST' in os.environ -IS_BUILDING = 'BUILDING' in os.environ -IS_KUBERNETES = 'KUBERNETES_SERVICE_HOST' in os.environ -OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/') +IS_TESTING = "TEST" in os.environ +IS_BUILDING = "BUILDING" in os.environ +IS_KUBERNETES = "KUBERNETES_SERVICE_HOST" in os.environ +OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, "stack/") -config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', - testing=IS_TESTING, kubernetes=IS_KUBERNETES) +config_provider = get_config_provider( + OVERRIDE_CONFIG_DIRECTORY, + "config.yaml", + "config.py", + testing=IS_TESTING, + kubernetes=IS_KUBERNETES, +) def _get_version_number_changelog(): - try: - with open(os.path.join(ROOT_DIR, 'CHANGELOG.md')) as f: - return re.search(r'(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0) - except IOError: - return '' + try: + with open(os.path.join(ROOT_DIR, "CHANGELOG.md")) as f: + return re.search(r"(v[0-9]+\.[0-9]+\.[0-9]+)", f.readline()).group(0) + except IOError: + return "" def _get_git_sha(): - if os.path.exists("GIT_HEAD"): - with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f: - return f.read() - else: - try: - return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8] - except (OSError, subprocess.CalledProcessError, Exception): - pass - return "unknown" + if os.path.exists("GIT_HEAD"): + with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f: + return f.read() + else: + try: + return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8] + except (OSError, subprocess.CalledProcessError, Exception): + pass + return "unknown" __version__ = _get_version_number_changelog() diff --git a/active_migration.py b/active_migration.py index 693bcaac6..c80e239ed 100644 --- a/active_migration.py +++ b/active_migration.py @@ -1,22 +1,30 @@ from enum import Enum, unique from data.migrationutil import DefinedDataMigration, MigrationPhase + @unique class ERTMigrationFlags(Enum): - """ Flags for the encrypted robot token migration. """ - READ_OLD_FIELDS = 'read-old' - WRITE_OLD_FIELDS = 'write-old' + """ Flags for the encrypted robot token migration. """ + + READ_OLD_FIELDS = "read-old" + WRITE_OLD_FIELDS = "write-old" ActiveDataMigration = DefinedDataMigration( - 'encrypted_robot_tokens', - 'ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE', - [ - MigrationPhase('add-new-fields', 'c13c8052f7a6', [ERTMigrationFlags.READ_OLD_FIELDS, - ERTMigrationFlags.WRITE_OLD_FIELDS]), - MigrationPhase('backfill-then-read-only-new', - '703298a825c2', [ERTMigrationFlags.WRITE_OLD_FIELDS]), - MigrationPhase('stop-writing-both', '703298a825c2', []), - MigrationPhase('remove-old-fields', 'c059b952ed76', []), - ] + "encrypted_robot_tokens", + "ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE", + [ + MigrationPhase( + "add-new-fields", + "c13c8052f7a6", + [ERTMigrationFlags.READ_OLD_FIELDS, ERTMigrationFlags.WRITE_OLD_FIELDS], + ), + MigrationPhase( + "backfill-then-read-only-new", + "703298a825c2", + [ERTMigrationFlags.WRITE_OLD_FIELDS], + ), + MigrationPhase("stop-writing-both", "703298a825c2", []), + MigrationPhase("remove-old-fields", "c059b952ed76", []), + ], ) diff --git a/app.py b/app.py index 33245bee1..677ab9f95 100644 --- a/app.py +++ b/app.py @@ -16,8 +16,14 @@ from werkzeug.exceptions import HTTPException import features -from _init import (config_provider, CONF_DIR, IS_KUBERNETES, IS_TESTING, OVERRIDE_CONFIG_DIRECTORY, - IS_BUILDING) +from _init import ( + config_provider, + CONF_DIR, + IS_KUBERNETES, + IS_TESTING, + OVERRIDE_CONFIG_DIRECTORY, + IS_BUILDING, +) from auth.auth_context import get_authenticated_user from avatars.avatars import Avatar @@ -35,7 +41,11 @@ from data.userevent import UserEventsBuilderModule from data.userfiles import Userfiles from data.users import UserAuthentication from data.registry_model import registry_model -from path_converters import RegexConverter, RepositoryPathConverter, APIRepositoryPathConverter +from path_converters import ( + RegexConverter, + RepositoryPathConverter, + APIRepositoryPathConverter, +) from oauth.services.github import GithubOAuthService from oauth.services.gitlab import GitLabOAuthService from oauth.loginmanager import OAuthLoginManager @@ -62,13 +72,13 @@ from util.security.instancekeys import InstanceKeys from util.security.signing import Signer -OVERRIDE_CONFIG_YAML_FILENAME = os.path.join(CONF_DIR, 'stack/config.yaml') -OVERRIDE_CONFIG_PY_FILENAME = os.path.join(CONF_DIR, 'stack/config.py') +OVERRIDE_CONFIG_YAML_FILENAME = os.path.join(CONF_DIR, "stack/config.yaml") +OVERRIDE_CONFIG_PY_FILENAME = os.path.join(CONF_DIR, "stack/config.py") -OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG' +OVERRIDE_CONFIG_KEY = "QUAY_OVERRIDE_CONFIG" -DOCKER_V2_SIGNINGKEY_FILENAME = 'docker_v2.pem' -INIT_SCRIPTS_LOCATION = '/conf/init/' +DOCKER_V2_SIGNINGKEY_FILENAME = "docker_v2.pem" +INIT_SCRIPTS_LOCATION = "/conf/init/" app = Flask(__name__) logger = logging.getLogger(__name__) @@ -79,62 +89,75 @@ is_kubernetes = IS_KUBERNETES is_building = IS_BUILDING if is_testing: - from test.testconfig import TestConfig - logger.debug('Loading test config.') - app.config.from_object(TestConfig()) + from test.testconfig import TestConfig + + logger.debug("Loading test config.") + app.config.from_object(TestConfig()) else: - from config import DefaultConfig - logger.debug('Loading default config.') - app.config.from_object(DefaultConfig()) - app.teardown_request(database.close_db_filter) + from config import DefaultConfig + + logger.debug("Loading default config.") + app.config.from_object(DefaultConfig()) + app.teardown_request(database.close_db_filter) # Load the override config via the provider. config_provider.update_app_config(app.config) # Update any configuration found in the override environment variable. -environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}')) +environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, "{}")) app.config.update(environ_config) # Fix remote address handling for Flask. -if app.config.get('PROXY_COUNT', 1): - app.wsgi_app = ProxyFix(app.wsgi_app, num_proxies=app.config.get('PROXY_COUNT', 1)) +if app.config.get("PROXY_COUNT", 1): + app.wsgi_app = ProxyFix(app.wsgi_app, num_proxies=app.config.get("PROXY_COUNT", 1)) # Ensure the V3 upgrade key is specified correctly. If not, simply fail. # TODO: Remove for V3.1. -if not is_testing and not is_building and app.config.get('SETUP_COMPLETE', False): - v3_upgrade_mode = app.config.get('V3_UPGRADE_MODE') - if v3_upgrade_mode is None: - raise Exception('Configuration flag `V3_UPGRADE_MODE` must be set. Please check the upgrade docs') +if not is_testing and not is_building and app.config.get("SETUP_COMPLETE", False): + v3_upgrade_mode = app.config.get("V3_UPGRADE_MODE") + if v3_upgrade_mode is None: + raise Exception( + "Configuration flag `V3_UPGRADE_MODE` must be set. Please check the upgrade docs" + ) - if (v3_upgrade_mode != 'background' - and v3_upgrade_mode != 'complete' - and v3_upgrade_mode != 'production-transition' - and v3_upgrade_mode != 'post-oci-rollout' - and v3_upgrade_mode != 'post-oci-roll-back-compat'): - raise Exception('Invalid value for config `V3_UPGRADE_MODE`. Please check the upgrade docs') + if ( + v3_upgrade_mode != "background" + and v3_upgrade_mode != "complete" + and v3_upgrade_mode != "production-transition" + and v3_upgrade_mode != "post-oci-rollout" + and v3_upgrade_mode != "post-oci-roll-back-compat" + ): + raise Exception( + "Invalid value for config `V3_UPGRADE_MODE`. Please check the upgrade docs" + ) # Split the registry model based on config. # TODO: Remove once we are fully on the OCI data model. -registry_model.setup_split(app.config.get('OCI_NAMESPACE_PROPORTION') or 0, - app.config.get('OCI_NAMESPACE_WHITELIST') or set(), - app.config.get('V22_NAMESPACE_WHITELIST') or set(), - app.config.get('V3_UPGRADE_MODE')) +registry_model.setup_split( + app.config.get("OCI_NAMESPACE_PROPORTION") or 0, + app.config.get("OCI_NAMESPACE_WHITELIST") or set(), + app.config.get("V22_NAMESPACE_WHITELIST") or set(), + app.config.get("V3_UPGRADE_MODE"), +) # Allow user to define a custom storage preference for the local instance. -_distributed_storage_preference = os.environ.get('QUAY_DISTRIBUTED_STORAGE_PREFERENCE', '').split() +_distributed_storage_preference = os.environ.get( + "QUAY_DISTRIBUTED_STORAGE_PREFERENCE", "" +).split() if _distributed_storage_preference: - app.config['DISTRIBUTED_STORAGE_PREFERENCE'] = _distributed_storage_preference + app.config["DISTRIBUTED_STORAGE_PREFERENCE"] = _distributed_storage_preference # Generate a secret key if none was specified. -if app.config['SECRET_KEY'] is None: - logger.debug('Generating in-memory secret key') - app.config['SECRET_KEY'] = generate_secret_key() +if app.config["SECRET_KEY"] is None: + logger.debug("Generating in-memory secret key") + app.config["SECRET_KEY"] = generate_secret_key() # If the "preferred" scheme is https, then http is not allowed. Therefore, ensure we have a secure # session cookie. -if (app.config['PREFERRED_URL_SCHEME'] == 'https' and - not app.config.get('FORCE_NONSECURE_SESSION_COOKIE', False)): - app.config['SESSION_COOKIE_SECURE'] = True +if app.config["PREFERRED_URL_SCHEME"] == "https" and not app.config.get( + "FORCE_NONSECURE_SESSION_COOKIE", False +): + app.config["SESSION_COOKIE_SECURE"] = True # Load features from config. features.import_features(app.config) @@ -145,65 +168,77 @@ logger.debug("Loaded config", extra={"config": app.config}) class RequestWithId(Request): - request_gen = staticmethod(urn_generator(['request'])) + request_gen = staticmethod(urn_generator(["request"])) - def __init__(self, *args, **kwargs): - super(RequestWithId, self).__init__(*args, **kwargs) - self.request_id = self.request_gen() + def __init__(self, *args, **kwargs): + super(RequestWithId, self).__init__(*args, **kwargs) + self.request_id = self.request_gen() @app.before_request def _request_start(): - if os.getenv('PYDEV_DEBUG', None): - import pydevd - host, port = os.getenv('PYDEV_DEBUG').split(':') - pydevd.settrace(host, port=int(port), stdoutToServer=True, stderrToServer=True, suspend=False) + if os.getenv("PYDEV_DEBUG", None): + import pydevd - logger.debug('Starting request: %s (%s)', request.request_id, request.path, - extra={"request_id": request.request_id}) + host, port = os.getenv("PYDEV_DEBUG").split(":") + pydevd.settrace( + host, + port=int(port), + stdoutToServer=True, + stderrToServer=True, + suspend=False, + ) + + logger.debug( + "Starting request: %s (%s)", + request.request_id, + request.path, + extra={"request_id": request.request_id}, + ) -DEFAULT_FILTER = lambda x: '[FILTERED]' +DEFAULT_FILTER = lambda x: "[FILTERED]" FILTERED_VALUES = [ - {'key': ['password'], 'fn': DEFAULT_FILTER}, - {'key': ['user', 'password'], 'fn': DEFAULT_FILTER}, - {'key': ['blob'], 'fn': lambda x: x[0:8]} + {"key": ["password"], "fn": DEFAULT_FILTER}, + {"key": ["user", "password"], "fn": DEFAULT_FILTER}, + {"key": ["blob"], "fn": lambda x: x[0:8]}, ] @app.after_request def _request_end(resp): - try: - jsonbody = request.get_json(force=True, silent=True) - except HTTPException: - jsonbody = None + try: + jsonbody = request.get_json(force=True, silent=True) + except HTTPException: + jsonbody = None - values = request.values.to_dict() + values = request.values.to_dict() - if jsonbody and not isinstance(jsonbody, dict): - jsonbody = {'_parsererror': jsonbody} + if jsonbody and not isinstance(jsonbody, dict): + jsonbody = {"_parsererror": jsonbody} - if isinstance(values, dict): - filter_logs(values, FILTERED_VALUES) + if isinstance(values, dict): + filter_logs(values, FILTERED_VALUES) - extra = { - "endpoint": request.endpoint, - "request_id" : request.request_id, - "remote_addr": request.remote_addr, - "http_method": request.method, - "original_url": request.url, - "path": request.path, - "parameters": values, - "json_body": jsonbody, - "confsha": CONFIG_DIGEST, - } + extra = { + "endpoint": request.endpoint, + "request_id": request.request_id, + "remote_addr": request.remote_addr, + "http_method": request.method, + "original_url": request.url, + "path": request.path, + "parameters": values, + "json_body": jsonbody, + "confsha": CONFIG_DIGEST, + } - if request.user_agent is not None: - extra["user-agent"] = request.user_agent.string - - logger.debug('Ending request: %s (%s)', request.request_id, request.path, extra=extra) - return resp + if request.user_agent is not None: + extra["user-agent"] = request.user_agent.string + logger.debug( + "Ending request: %s (%s)", request.request_id, request.path, extra=extra + ) + return resp root_logger = logging.getLogger() @@ -211,13 +246,13 @@ root_logger = logging.getLogger() app.request_class = RequestWithId # Register custom converters. -app.url_map.converters['regex'] = RegexConverter -app.url_map.converters['repopath'] = RepositoryPathConverter -app.url_map.converters['apirepopath'] = APIRepositoryPathConverter +app.url_map.converters["regex"] = RegexConverter +app.url_map.converters["repopath"] = RepositoryPathConverter +app.url_map.converters["apirepopath"] = APIRepositoryPathConverter Principal(app, use_sessions=False) -tf = app.config['DB_TRANSACTION_FACTORY'] +tf = app.config["DB_TRANSACTION_FACTORY"] model_cache = get_model_cache(app.config) avatar = Avatar(app) @@ -225,10 +260,14 @@ login_manager = LoginManager(app) mail = Mail(app) prometheus = PrometheusPlugin(app) metric_queue = MetricQueue(prometheus) -chunk_cleanup_queue = WorkQueue(app.config['CHUNK_CLEANUP_QUEUE_NAME'], tf, metric_queue=metric_queue) +chunk_cleanup_queue = WorkQueue( + app.config["CHUNK_CLEANUP_QUEUE_NAME"], tf, metric_queue=metric_queue +) instance_keys = InstanceKeys(app) ip_resolver = IPResolver(app) -storage = Storage(app, metric_queue, chunk_cleanup_queue, instance_keys, config_provider, ip_resolver) +storage = Storage( + app, metric_queue, chunk_cleanup_queue, instance_keys, config_provider, ip_resolver +) userfiles = Userfiles(app, storage) log_archive = LogArchive(app, storage) analytics = Analytics(app) @@ -246,55 +285,99 @@ build_canceller = BuildCanceller(app) start_cloudwatch_sender(metric_queue, app) -github_trigger = GithubOAuthService(app.config, 'GITHUB_TRIGGER_CONFIG') -gitlab_trigger = GitLabOAuthService(app.config, 'GITLAB_TRIGGER_CONFIG') +github_trigger = GithubOAuthService(app.config, "GITHUB_TRIGGER_CONFIG") +gitlab_trigger = GitLabOAuthService(app.config, "GITLAB_TRIGGER_CONFIG") oauth_login = OAuthLoginManager(app.config) oauth_apps = [github_trigger, gitlab_trigger] -image_replication_queue = WorkQueue(app.config['REPLICATION_QUEUE_NAME'], tf, - has_namespace=False, metric_queue=metric_queue) -dockerfile_build_queue = WorkQueue(app.config['DOCKERFILE_BUILD_QUEUE_NAME'], tf, - metric_queue=metric_queue, - reporter=BuildMetricQueueReporter(metric_queue), - has_namespace=True) -notification_queue = WorkQueue(app.config['NOTIFICATION_QUEUE_NAME'], tf, has_namespace=True, - metric_queue=metric_queue) -secscan_notification_queue = WorkQueue(app.config['SECSCAN_NOTIFICATION_QUEUE_NAME'], tf, - has_namespace=False, - metric_queue=metric_queue) -export_action_logs_queue = WorkQueue(app.config['EXPORT_ACTION_LOGS_QUEUE_NAME'], tf, - has_namespace=True, - metric_queue=metric_queue) +image_replication_queue = WorkQueue( + app.config["REPLICATION_QUEUE_NAME"], + tf, + has_namespace=False, + metric_queue=metric_queue, +) +dockerfile_build_queue = WorkQueue( + app.config["DOCKERFILE_BUILD_QUEUE_NAME"], + tf, + metric_queue=metric_queue, + reporter=BuildMetricQueueReporter(metric_queue), + has_namespace=True, +) +notification_queue = WorkQueue( + app.config["NOTIFICATION_QUEUE_NAME"], + tf, + has_namespace=True, + metric_queue=metric_queue, +) +secscan_notification_queue = WorkQueue( + app.config["SECSCAN_NOTIFICATION_QUEUE_NAME"], + tf, + has_namespace=False, + metric_queue=metric_queue, +) +export_action_logs_queue = WorkQueue( + app.config["EXPORT_ACTION_LOGS_QUEUE_NAME"], + tf, + has_namespace=True, + metric_queue=metric_queue, +) # Note: We set `has_namespace` to `False` here, as we explicitly want this queue to not be emptied # when a namespace is marked for deletion. -namespace_gc_queue = WorkQueue(app.config['NAMESPACE_GC_QUEUE_NAME'], tf, has_namespace=False, - metric_queue=metric_queue) +namespace_gc_queue = WorkQueue( + app.config["NAMESPACE_GC_QUEUE_NAME"], + tf, + has_namespace=False, + metric_queue=metric_queue, +) -all_queues = [image_replication_queue, dockerfile_build_queue, notification_queue, - secscan_notification_queue, chunk_cleanup_queue, namespace_gc_queue] +all_queues = [ + image_replication_queue, + dockerfile_build_queue, + notification_queue, + secscan_notification_queue, + chunk_cleanup_queue, + namespace_gc_queue, +] -url_scheme_and_hostname = URLSchemeAndHostname(app.config['PREFERRED_URL_SCHEME'], app.config['SERVER_HOSTNAME']) -secscan_api = SecurityScannerAPI(app.config, storage, app.config['SERVER_HOSTNAME'], app.config['HTTPCLIENT'], - uri_creator=get_blob_download_uri_getter(app.test_request_context('/'), url_scheme_and_hostname), - instance_keys=instance_keys) +url_scheme_and_hostname = URLSchemeAndHostname( + app.config["PREFERRED_URL_SCHEME"], app.config["SERVER_HOSTNAME"] +) +secscan_api = SecurityScannerAPI( + app.config, + storage, + app.config["SERVER_HOSTNAME"], + app.config["HTTPCLIENT"], + uri_creator=get_blob_download_uri_getter( + app.test_request_context("/"), url_scheme_and_hostname + ), + instance_keys=instance_keys, +) -repo_mirror_api = RepoMirrorAPI(app.config, app.config['SERVER_HOSTNAME'], app.config['HTTPCLIENT'], - instance_keys=instance_keys) +repo_mirror_api = RepoMirrorAPI( + app.config, + app.config["SERVER_HOSTNAME"], + app.config["HTTPCLIENT"], + instance_keys=instance_keys, +) tuf_metadata_api = TUFMetadataAPI(app, app.config) # Check for a key in config. If none found, generate a new signing key for Docker V2 manifests. _v2_key_path = os.path.join(OVERRIDE_CONFIG_DIRECTORY, DOCKER_V2_SIGNINGKEY_FILENAME) if os.path.exists(_v2_key_path): - docker_v2_signing_key = RSAKey().load(_v2_key_path) + docker_v2_signing_key = RSAKey().load(_v2_key_path) else: - docker_v2_signing_key = RSAKey(key=RSA.generate(2048)) + docker_v2_signing_key = RSAKey(key=RSA.generate(2048)) # Configure the database. -if app.config.get('DATABASE_SECRET_KEY') is None and app.config.get('SETUP_COMPLETE', False): - raise Exception('Missing DATABASE_SECRET_KEY in config; did you perhaps forget to add it?') +if app.config.get("DATABASE_SECRET_KEY") is None and app.config.get( + "SETUP_COMPLETE", False +): + raise Exception( + "Missing DATABASE_SECRET_KEY in config; did you perhaps forget to add it?" + ) database.configure(app.config) @@ -306,8 +389,9 @@ model.config.register_repo_cleanup_callback(tuf_metadata_api.delete_metadata) @login_manager.user_loader def load_user(user_uuid): - logger.debug('User loader loading deferred user with uuid: %s', user_uuid) - return LoginWrappedDBUser(user_uuid) + logger.debug("User loader loading deferred user with uuid: %s", user_uuid) + return LoginWrappedDBUser(user_uuid) + logs_model.configure(app.config) diff --git a/application.py b/application.py index b7f478841..1a0c799fa 100644 --- a/application.py +++ b/application.py @@ -1,5 +1,6 @@ # NOTE: Must be before we import or call anything that may be synchronous. from gevent import monkey + monkey.patch_all() import os @@ -17,6 +18,6 @@ import registry import secscan -if __name__ == '__main__': - logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) - application.run(port=5000, debug=True, threaded=True, host='0.0.0.0') +if __name__ == "__main__": + logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) + application.run(port=5000, debug=True, threaded=True, host="0.0.0.0") diff --git a/auth/auth_context.py b/auth/auth_context.py index 8cb57f691..375d3d62a 100644 --- a/auth/auth_context.py +++ b/auth/auth_context.py @@ -1,21 +1,25 @@ from flask import _request_ctx_stack + def get_authenticated_context(): - """ Returns the auth context for the current request context, if any. """ - return getattr(_request_ctx_stack.top, 'authenticated_context', None) + """ Returns the auth context for the current request context, if any. """ + return getattr(_request_ctx_stack.top, "authenticated_context", None) + def get_authenticated_user(): - """ Returns the authenticated user, if any, or None if none. """ - context = get_authenticated_context() - return context.authed_user if context else None + """ Returns the authenticated user, if any, or None if none. """ + context = get_authenticated_context() + return context.authed_user if context else None + def get_validated_oauth_token(): - """ Returns the authenticated and validated OAuth access token, if any, or None if none. """ - context = get_authenticated_context() - return context.authed_oauth_token if context else None + """ Returns the authenticated and validated OAuth access token, if any, or None if none. """ + context = get_authenticated_context() + return context.authed_oauth_token if context else None + def set_authenticated_context(auth_context): - """ Sets the auth context for the current request context to that given. """ - ctx = _request_ctx_stack.top - ctx.authenticated_context = auth_context - return auth_context + """ Sets the auth context for the current request context to that given. """ + ctx = _request_ctx_stack.top + ctx.authenticated_context = auth_context + return auth_context diff --git a/auth/auth_context_type.py b/auth/auth_context_type.py index 012222243..c35083f8d 100644 --- a/auth/auth_context_type.py +++ b/auth/auth_context_type.py @@ -16,422 +16,446 @@ from auth.scopes import scopes_from_scope_string logger = logging.getLogger(__name__) + @add_metaclass(ABCMeta) class AuthContext(object): - """ + """ Interface that represents the current context of authentication. """ - @property - @abstractmethod - def entity_kind(self): - """ Returns the kind of the entity in this auth context. """ - pass + @property + @abstractmethod + def entity_kind(self): + """ Returns the kind of the entity in this auth context. """ + pass - @property - @abstractmethod - def is_anonymous(self): - """ Returns true if this is an anonymous context. """ - pass + @property + @abstractmethod + def is_anonymous(self): + """ Returns true if this is an anonymous context. """ + pass - @property - @abstractmethod - def authed_oauth_token(self): - """ Returns the authenticated OAuth token, if any. """ - pass + @property + @abstractmethod + def authed_oauth_token(self): + """ Returns the authenticated OAuth token, if any. """ + pass - @property - @abstractmethod - def authed_user(self): - """ Returns the authenticated user, whether directly, or via an OAuth or access token. Note that + @property + @abstractmethod + def authed_user(self): + """ Returns the authenticated user, whether directly, or via an OAuth or access token. Note that this property will also return robot accounts. """ - pass + pass - @property - @abstractmethod - def has_nonrobot_user(self): - """ Returns whether a user (not a robot) was authenticated successfully. """ - pass + @property + @abstractmethod + def has_nonrobot_user(self): + """ Returns whether a user (not a robot) was authenticated successfully. """ + pass - @property - @abstractmethod - def identity(self): - """ Returns the identity for the auth context. """ - pass + @property + @abstractmethod + def identity(self): + """ Returns the identity for the auth context. """ + pass - @property - @abstractmethod - def description(self): - """ Returns a human-readable and *public* description of the current auth context. """ - pass + @property + @abstractmethod + def description(self): + """ Returns a human-readable and *public* description of the current auth context. """ + pass - @property - @abstractmethod - def credential_username(self): - """ Returns the username to create credentials for this context's entity, if any. """ - pass + @property + @abstractmethod + def credential_username(self): + """ Returns the username to create credentials for this context's entity, if any. """ + pass - @abstractmethod - def analytics_id_and_public_metadata(self): - """ Returns the analytics ID and public log metadata for this auth context. """ - pass + @abstractmethod + def analytics_id_and_public_metadata(self): + """ Returns the analytics ID and public log metadata for this auth context. """ + pass - @abstractmethod - def apply_to_request_context(self): - """ Applies this auth result to the auth context and Flask-Principal. """ - pass + @abstractmethod + def apply_to_request_context(self): + """ Applies this auth result to the auth context and Flask-Principal. """ + pass - @abstractmethod - def to_signed_dict(self): - """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other + @abstractmethod + def to_signed_dict(self): + """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other form of signed serialization. """ - pass + pass - @property - @abstractmethod - def unique_key(self): - """ Returns a key that is unique to this auth context type and its data. For example, an + @property + @abstractmethod + def unique_key(self): + """ Returns a key that is unique to this auth context type and its data. For example, an instance of the auth context type for the user might be a string of the form `user-{user-uuid}`. Callers should treat this key as opaque and not rely on the contents for anything besides uniqueness. This is typically used by callers when they'd like to check cache but not hit the database to get a fully validated auth context. """ - pass + pass class ValidatedAuthContext(AuthContext): - """ ValidatedAuthContext represents the loaded, authenticated and validated auth information + """ ValidatedAuthContext represents the loaded, authenticated and validated auth information for the current request context. """ - def __init__(self, user=None, token=None, oauthtoken=None, robot=None, appspecifictoken=None, - signed_data=None): - # Note: These field names *MUST* match the string values of the kinds defined in - # ContextEntityKind. - self.user = user - self.robot = robot - self.token = token - self.oauthtoken = oauthtoken - self.appspecifictoken = appspecifictoken - self.signed_data = signed_data - def tuple(self): - return vars(self).values() + def __init__( + self, + user=None, + token=None, + oauthtoken=None, + robot=None, + appspecifictoken=None, + signed_data=None, + ): + # Note: These field names *MUST* match the string values of the kinds defined in + # ContextEntityKind. + self.user = user + self.robot = robot + self.token = token + self.oauthtoken = oauthtoken + self.appspecifictoken = appspecifictoken + self.signed_data = signed_data - def __eq__(self, other): - return self.tuple() == other.tuple() + def tuple(self): + return vars(self).values() - @property - def entity_kind(self): - """ Returns the kind of the entity in this auth context. """ - for kind in ContextEntityKind: - if hasattr(self, kind.value) and getattr(self, kind.value): - return kind + def __eq__(self, other): + return self.tuple() == other.tuple() - return ContextEntityKind.anonymous + @property + def entity_kind(self): + """ Returns the kind of the entity in this auth context. """ + for kind in ContextEntityKind: + if hasattr(self, kind.value) and getattr(self, kind.value): + return kind - @property - def authed_user(self): - """ Returns the authenticated user, whether directly, or via an OAuth token. Note that this + return ContextEntityKind.anonymous + + @property + def authed_user(self): + """ Returns the authenticated user, whether directly, or via an OAuth token. Note that this will also return robot accounts. """ - authed_user = self._authed_user() - if authed_user is not None and not authed_user.enabled: - logger.warning('Attempt to reference a disabled user/robot: %s', authed_user.username) - return None + authed_user = self._authed_user() + if authed_user is not None and not authed_user.enabled: + logger.warning( + "Attempt to reference a disabled user/robot: %s", authed_user.username + ) + return None - return authed_user + return authed_user - @property - def authed_oauth_token(self): - return self.oauthtoken + @property + def authed_oauth_token(self): + return self.oauthtoken - def _authed_user(self): - if self.oauthtoken: - return self.oauthtoken.authorized_user + def _authed_user(self): + if self.oauthtoken: + return self.oauthtoken.authorized_user - if self.appspecifictoken: - return self.appspecifictoken.user + if self.appspecifictoken: + return self.appspecifictoken.user - if self.signed_data: - return model.user.get_user(self.signed_data['user_context']) + if self.signed_data: + return model.user.get_user(self.signed_data["user_context"]) - return self.user if self.user else self.robot + return self.user if self.user else self.robot - @property - def is_anonymous(self): - """ Returns true if this is an anonymous context. """ - return not self.authed_user and not self.token and not self.signed_data + @property + def is_anonymous(self): + """ Returns true if this is an anonymous context. """ + return not self.authed_user and not self.token and not self.signed_data - @property - def has_nonrobot_user(self): - """ Returns whether a user (not a robot) was authenticated successfully. """ - return bool(self.authed_user and not self.robot) + @property + def has_nonrobot_user(self): + """ Returns whether a user (not a robot) was authenticated successfully. """ + return bool(self.authed_user and not self.robot) - @property - def identity(self): - """ Returns the identity for the auth context. """ - if self.oauthtoken: - scope_set = scopes_from_scope_string(self.oauthtoken.scope) - return QuayDeferredPermissionUser.for_user(self.oauthtoken.authorized_user, scope_set) + @property + def identity(self): + """ Returns the identity for the auth context. """ + if self.oauthtoken: + scope_set = scopes_from_scope_string(self.oauthtoken.scope) + return QuayDeferredPermissionUser.for_user( + self.oauthtoken.authorized_user, scope_set + ) - if self.authed_user: - return QuayDeferredPermissionUser.for_user(self.authed_user) + if self.authed_user: + return QuayDeferredPermissionUser.for_user(self.authed_user) - if self.token: - return Identity(self.token.get_code(), 'token') + if self.token: + return Identity(self.token.get_code(), "token") - if self.signed_data: - identity = Identity(None, 'signed_grant') - identity.provides.update(self.signed_data['grants']) - return identity + if self.signed_data: + identity = Identity(None, "signed_grant") + identity.provides.update(self.signed_data["grants"]) + return identity - return None + return None - @property - def entity_reference(self): - """ Returns the DB object reference for this context's entity. """ - if self.entity_kind == ContextEntityKind.anonymous: - return None + @property + def entity_reference(self): + """ Returns the DB object reference for this context's entity. """ + if self.entity_kind == ContextEntityKind.anonymous: + return None - return getattr(self, self.entity_kind.value) + return getattr(self, self.entity_kind.value) - @property - def description(self): - """ Returns a human-readable and *public* description of the current auth context. """ - handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() - return handler.description(self.entity_reference) + @property + def description(self): + """ Returns a human-readable and *public* description of the current auth context. """ + handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() + return handler.description(self.entity_reference) - @property - def credential_username(self): - """ Returns the username to create credentials for this context's entity, if any. """ - handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() - return handler.credential_username(self.entity_reference) + @property + def credential_username(self): + """ Returns the username to create credentials for this context's entity, if any. """ + handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() + return handler.credential_username(self.entity_reference) - def analytics_id_and_public_metadata(self): - """ Returns the analytics ID and public log metadata for this auth context. """ - handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() - return handler.analytics_id_and_public_metadata(self.entity_reference) + def analytics_id_and_public_metadata(self): + """ Returns the analytics ID and public log metadata for this auth context. """ + handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() + return handler.analytics_id_and_public_metadata(self.entity_reference) - def apply_to_request_context(self): - """ Applies this auth result to the auth context and Flask-Principal. """ - # Save to the request context. - set_authenticated_context(self) + def apply_to_request_context(self): + """ Applies this auth result to the auth context and Flask-Principal. """ + # Save to the request context. + set_authenticated_context(self) - # Set the identity for Flask-Principal. - if self.identity: - identity_changed.send(app, identity=self.identity) + # Set the identity for Flask-Principal. + if self.identity: + identity_changed.send(app, identity=self.identity) - @property - def unique_key(self): - signed_dict = self.to_signed_dict() - return '%s-%s' % (signed_dict['entity_kind'], signed_dict.get('entity_reference', '(anon)')) + @property + def unique_key(self): + signed_dict = self.to_signed_dict() + return "%s-%s" % ( + signed_dict["entity_kind"], + signed_dict.get("entity_reference", "(anon)"), + ) - def to_signed_dict(self): - """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other + def to_signed_dict(self): + """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other form of signed serialization. """ - dict_data = { - 'version': 2, - 'entity_kind': self.entity_kind.value, - } + dict_data = {"version": 2, "entity_kind": self.entity_kind.value} - if self.entity_kind != ContextEntityKind.anonymous: - handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() - dict_data.update({ - 'entity_reference': handler.get_serialized_entity_reference(self.entity_reference), - }) + if self.entity_kind != ContextEntityKind.anonymous: + handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() + dict_data.update( + { + "entity_reference": handler.get_serialized_entity_reference( + self.entity_reference + ) + } + ) - # Add legacy information. - # TODO: Remove this all once the new code is fully deployed. - if self.token: - dict_data.update({ - 'kind': 'token', - 'token': self.token.code, - }) + # Add legacy information. + # TODO: Remove this all once the new code is fully deployed. + if self.token: + dict_data.update({"kind": "token", "token": self.token.code}) - if self.oauthtoken: - dict_data.update({ - 'kind': 'oauth', - 'oauth': self.oauthtoken.uuid, - 'user': self.authed_user.username, - }) + if self.oauthtoken: + dict_data.update( + { + "kind": "oauth", + "oauth": self.oauthtoken.uuid, + "user": self.authed_user.username, + } + ) - if self.user or self.robot: - dict_data.update({ - 'kind': 'user', - 'user': self.authed_user.username, - }) + if self.user or self.robot: + dict_data.update({"kind": "user", "user": self.authed_user.username}) - if self.appspecifictoken: - dict_data.update({ - 'kind': 'user', - 'user': self.authed_user.username, - }) + if self.appspecifictoken: + dict_data.update({"kind": "user", "user": self.authed_user.username}) - if self.is_anonymous: - dict_data.update({ - 'kind': 'anonymous', - }) + if self.is_anonymous: + dict_data.update({"kind": "anonymous"}) + + # End of legacy information. + return dict_data - # End of legacy information. - return dict_data class SignedAuthContext(AuthContext): - """ SignedAuthContext represents an auth context loaded from a signed token of some kind, + """ SignedAuthContext represents an auth context loaded from a signed token of some kind, such as a JWT. Unlike ValidatedAuthContext, SignedAuthContext operates lazily, only loading the actual {user, robot, token, etc} when requested. This allows registry operations that only need to check if *some* entity is present to do so, without hitting the database. """ - def __init__(self, kind, signed_data, v1_dict_format): - self.kind = kind - self.signed_data = signed_data - self.v1_dict_format = v1_dict_format - @property - def unique_key(self): - if self.v1_dict_format: - # Since V1 data format is verbose, just use the validated version to get the key. - return self._get_validated().unique_key + def __init__(self, kind, signed_data, v1_dict_format): + self.kind = kind + self.signed_data = signed_data + self.v1_dict_format = v1_dict_format - signed_dict = self.signed_data - return '%s-%s' % (signed_dict['entity_kind'], signed_dict.get('entity_reference', '(anon)')) + @property + def unique_key(self): + if self.v1_dict_format: + # Since V1 data format is verbose, just use the validated version to get the key. + return self._get_validated().unique_key - @classmethod - def build_from_signed_dict(cls, dict_data, v1_dict_format=False): - if not v1_dict_format: - entity_kind = ContextEntityKind(dict_data.get('entity_kind', 'anonymous')) - return SignedAuthContext(entity_kind, dict_data, v1_dict_format) + signed_dict = self.signed_data + return "%s-%s" % ( + signed_dict["entity_kind"], + signed_dict.get("entity_reference", "(anon)"), + ) - # Legacy handling. - # TODO: Remove this all once the new code is fully deployed. - kind_string = dict_data.get('kind', 'anonymous') - if kind_string == 'oauth': - kind_string = 'oauthtoken' + @classmethod + def build_from_signed_dict(cls, dict_data, v1_dict_format=False): + if not v1_dict_format: + entity_kind = ContextEntityKind(dict_data.get("entity_kind", "anonymous")) + return SignedAuthContext(entity_kind, dict_data, v1_dict_format) - kind = ContextEntityKind(kind_string) - return SignedAuthContext(kind, dict_data, v1_dict_format) + # Legacy handling. + # TODO: Remove this all once the new code is fully deployed. + kind_string = dict_data.get("kind", "anonymous") + if kind_string == "oauth": + kind_string = "oauthtoken" - @lru_cache(maxsize=1) - def _get_validated(self): - """ Returns a ValidatedAuthContext for this signed context, resolving all the necessary + kind = ContextEntityKind(kind_string) + return SignedAuthContext(kind, dict_data, v1_dict_format) + + @lru_cache(maxsize=1) + def _get_validated(self): + """ Returns a ValidatedAuthContext for this signed context, resolving all the necessary references. """ - if not self.v1_dict_format: - if self.kind == ContextEntityKind.anonymous: - return ValidatedAuthContext() + if not self.v1_dict_format: + if self.kind == ContextEntityKind.anonymous: + return ValidatedAuthContext() - serialized_entity_reference = self.signed_data['entity_reference'] - handler = CONTEXT_ENTITY_HANDLERS[self.kind]() - entity_reference = handler.deserialize_entity_reference(serialized_entity_reference) - if entity_reference is None: - logger.debug('Could not deserialize entity reference `%s` under kind `%s`', - serialized_entity_reference, self.kind) - return ValidatedAuthContext() + serialized_entity_reference = self.signed_data["entity_reference"] + handler = CONTEXT_ENTITY_HANDLERS[self.kind]() + entity_reference = handler.deserialize_entity_reference( + serialized_entity_reference + ) + if entity_reference is None: + logger.debug( + "Could not deserialize entity reference `%s` under kind `%s`", + serialized_entity_reference, + self.kind, + ) + return ValidatedAuthContext() - return ValidatedAuthContext(**{self.kind.value: entity_reference}) + return ValidatedAuthContext(**{self.kind.value: entity_reference}) - # Legacy handling. - # TODO: Remove this all once the new code is fully deployed. - kind_string = self.signed_data.get('kind', 'anonymous') - if kind_string == 'oauth': - kind_string = 'oauthtoken' + # Legacy handling. + # TODO: Remove this all once the new code is fully deployed. + kind_string = self.signed_data.get("kind", "anonymous") + if kind_string == "oauth": + kind_string = "oauthtoken" - kind = ContextEntityKind(kind_string) - if kind == ContextEntityKind.anonymous: - return ValidatedAuthContext() + kind = ContextEntityKind(kind_string) + if kind == ContextEntityKind.anonymous: + return ValidatedAuthContext() - if kind == ContextEntityKind.user or kind == ContextEntityKind.robot: - user = model.user.get_user(self.signed_data.get('user', '')) - if not user: - return None + if kind == ContextEntityKind.user or kind == ContextEntityKind.robot: + user = model.user.get_user(self.signed_data.get("user", "")) + if not user: + return None - return ValidatedAuthContext(robot=user) if user.robot else ValidatedAuthContext(user=user) + return ( + ValidatedAuthContext(robot=user) + if user.robot + else ValidatedAuthContext(user=user) + ) - if kind == ContextEntityKind.token: - token = model.token.load_token_data(self.signed_data.get('token')) - if not token: - return None + if kind == ContextEntityKind.token: + token = model.token.load_token_data(self.signed_data.get("token")) + if not token: + return None - return ValidatedAuthContext(token=token) + return ValidatedAuthContext(token=token) - if kind == ContextEntityKind.oauthtoken: - user = model.user.get_user(self.signed_data.get('user', '')) - if not user: - return None + if kind == ContextEntityKind.oauthtoken: + user = model.user.get_user(self.signed_data.get("user", "")) + if not user: + return None - token_uuid = self.signed_data.get('oauth', '') - oauthtoken = model.oauth.lookup_access_token_for_user(user, token_uuid) - if not oauthtoken: - return None + token_uuid = self.signed_data.get("oauth", "") + oauthtoken = model.oauth.lookup_access_token_for_user(user, token_uuid) + if not oauthtoken: + return None - return ValidatedAuthContext(oauthtoken=oauthtoken) + return ValidatedAuthContext(oauthtoken=oauthtoken) - raise Exception('Unknown auth context kind `%s` when deserializing %s' % (kind, - self.signed_data)) - # End of legacy handling. + raise Exception( + "Unknown auth context kind `%s` when deserializing %s" + % (kind, self.signed_data) + ) + # End of legacy handling. - @property - def entity_kind(self): - """ Returns the kind of the entity in this auth context. """ - return self.kind + @property + def entity_kind(self): + """ Returns the kind of the entity in this auth context. """ + return self.kind - @property - def is_anonymous(self): - """ Returns true if this is an anonymous context. """ - return self.kind == ContextEntityKind.anonymous + @property + def is_anonymous(self): + """ Returns true if this is an anonymous context. """ + return self.kind == ContextEntityKind.anonymous - @property - def authed_user(self): - """ Returns the authenticated user, whether directly, or via an OAuth or access token. Note that + @property + def authed_user(self): + """ Returns the authenticated user, whether directly, or via an OAuth or access token. Note that this property will also return robot accounts. """ - if self.kind == ContextEntityKind.anonymous: - return None + if self.kind == ContextEntityKind.anonymous: + return None - return self._get_validated().authed_user + return self._get_validated().authed_user - @property - def authed_oauth_token(self): - if self.kind == ContextEntityKind.anonymous: - return None + @property + def authed_oauth_token(self): + if self.kind == ContextEntityKind.anonymous: + return None - return self._get_validated().authed_oauth_token + return self._get_validated().authed_oauth_token - @property - def has_nonrobot_user(self): - """ Returns whether a user (not a robot) was authenticated successfully. """ - if self.kind == ContextEntityKind.anonymous: - return False + @property + def has_nonrobot_user(self): + """ Returns whether a user (not a robot) was authenticated successfully. """ + if self.kind == ContextEntityKind.anonymous: + return False - return self._get_validated().has_nonrobot_user + return self._get_validated().has_nonrobot_user - @property - def identity(self): - """ Returns the identity for the auth context. """ - return self._get_validated().identity + @property + def identity(self): + """ Returns the identity for the auth context. """ + return self._get_validated().identity - @property - def description(self): - """ Returns a human-readable and *public* description of the current auth context. """ - return self._get_validated().description + @property + def description(self): + """ Returns a human-readable and *public* description of the current auth context. """ + return self._get_validated().description - @property - def credential_username(self): - """ Returns the username to create credentials for this context's entity, if any. """ - return self._get_validated().credential_username + @property + def credential_username(self): + """ Returns the username to create credentials for this context's entity, if any. """ + return self._get_validated().credential_username - def analytics_id_and_public_metadata(self): - """ Returns the analytics ID and public log metadata for this auth context. """ - return self._get_validated().analytics_id_and_public_metadata() + def analytics_id_and_public_metadata(self): + """ Returns the analytics ID and public log metadata for this auth context. """ + return self._get_validated().analytics_id_and_public_metadata() - def apply_to_request_context(self): - """ Applies this auth result to the auth context and Flask-Principal. """ - return self._get_validated().apply_to_request_context() + def apply_to_request_context(self): + """ Applies this auth result to the auth context and Flask-Principal. """ + return self._get_validated().apply_to_request_context() - def to_signed_dict(self): - """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other + def to_signed_dict(self): + """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other form of signed serialization. """ - return self.signed_data + return self.signed_data diff --git a/auth/basic.py b/auth/basic.py index 926450ad6..49d0150a4 100644 --- a/auth/basic.py +++ b/auth/basic.py @@ -8,51 +8,54 @@ from auth.validateresult import ValidateResult, AuthKind logger = logging.getLogger(__name__) + def has_basic_auth(username): - """ Returns true if a basic auth header exists with a username and password pair that validates + """ Returns true if a basic auth header exists with a username and password pair that validates against the internal authentication system. Returns True on full success and False on any failure (missing header, invalid header, invalid credentials, etc). """ - auth_header = request.headers.get('authorization', '') - result = validate_basic_auth(auth_header) - return result.has_nonrobot_user and result.context.user.username == username + auth_header = request.headers.get("authorization", "") + result = validate_basic_auth(auth_header) + return result.has_nonrobot_user and result.context.user.username == username def validate_basic_auth(auth_header): - """ Validates the specified basic auth header, returning whether its credentials point + """ Validates the specified basic auth header, returning whether its credentials point to a valid user or token. """ - if not auth_header: - return ValidateResult(AuthKind.basic, missing=True) + if not auth_header: + return ValidateResult(AuthKind.basic, missing=True) - logger.debug('Attempt to process basic auth header') + logger.debug("Attempt to process basic auth header") - # Parse the basic auth header. - assert isinstance(auth_header, basestring) - credentials, err = _parse_basic_auth_header(auth_header) - if err is not None: - logger.debug('Got invalid basic auth header: %s', auth_header) - return ValidateResult(AuthKind.basic, missing=True) + # Parse the basic auth header. + assert isinstance(auth_header, basestring) + credentials, err = _parse_basic_auth_header(auth_header) + if err is not None: + logger.debug("Got invalid basic auth header: %s", auth_header) + return ValidateResult(AuthKind.basic, missing=True) - auth_username, auth_password_or_token = credentials - result, _ = validate_credentials(auth_username, auth_password_or_token) - return result.with_kind(AuthKind.basic) + auth_username, auth_password_or_token = credentials + result, _ = validate_credentials(auth_username, auth_password_or_token) + return result.with_kind(AuthKind.basic) def _parse_basic_auth_header(auth): - """ Parses the given basic auth header, returning the credentials found inside. + """ Parses the given basic auth header, returning the credentials found inside. """ - normalized = [part.strip() for part in auth.split(' ') if part] - if normalized[0].lower() != 'basic' or len(normalized) != 2: - return None, 'Invalid basic auth header' + normalized = [part.strip() for part in auth.split(" ") if part] + if normalized[0].lower() != "basic" or len(normalized) != 2: + return None, "Invalid basic auth header" - try: - credentials = [part.decode('utf-8') for part in b64decode(normalized[1]).split(':', 1)] - except (TypeError, UnicodeDecodeError, ValueError): - logger.exception('Exception when parsing basic auth header: %s', auth) - return None, 'Could not parse basic auth header' + try: + credentials = [ + part.decode("utf-8") for part in b64decode(normalized[1]).split(":", 1) + ] + except (TypeError, UnicodeDecodeError, ValueError): + logger.exception("Exception when parsing basic auth header: %s", auth) + return None, "Could not parse basic auth header" - if len(credentials) != 2: - return None, 'Unexpected number of credentials found in basic auth header' + if len(credentials) != 2: + return None, "Unexpected number of credentials found in basic auth header" - return credentials, None + return credentials, None diff --git a/auth/context_entity.py b/auth/context_entity.py index 038624b0c..7c52dbe8d 100644 --- a/auth/context_entity.py +++ b/auth/context_entity.py @@ -4,200 +4,210 @@ from enum import Enum from data import model -from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME, - APP_SPECIFIC_TOKEN_USERNAME) +from auth.credential_consts import ( + ACCESS_TOKEN_USERNAME, + OAUTH_TOKEN_USERNAME, + APP_SPECIFIC_TOKEN_USERNAME, +) + class ContextEntityKind(Enum): - """ Defines the various kinds of entities in an auth context. Note that the string values of + """ Defines the various kinds of entities in an auth context. Note that the string values of these fields *must* match the names of the fields in the ValidatedAuthContext class, as we fill them in directly based on the string names here. """ - anonymous = 'anonymous' - user = 'user' - robot = 'robot' - token = 'token' - oauthtoken = 'oauthtoken' - appspecifictoken = 'appspecifictoken' - signed_data = 'signed_data' + + anonymous = "anonymous" + user = "user" + robot = "robot" + token = "token" + oauthtoken = "oauthtoken" + appspecifictoken = "appspecifictoken" + signed_data = "signed_data" @add_metaclass(ABCMeta) class ContextEntityHandler(object): - """ + """ Interface that represents handling specific kinds of entities under an auth context. """ - @abstractmethod - def credential_username(self, entity_reference): - """ Returns the username to create credentials for this entity, if any. """ - pass + @abstractmethod + def credential_username(self, entity_reference): + """ Returns the username to create credentials for this entity, if any. """ + pass - @abstractmethod - def get_serialized_entity_reference(self, entity_reference): - """ Returns the entity reference for this kind of auth context, serialized into a form that can + @abstractmethod + def get_serialized_entity_reference(self, entity_reference): + """ Returns the entity reference for this kind of auth context, serialized into a form that can be placed into a JSON object and put into a JWT. This is typically a DB UUID or another unique identifier for the object in the DB. """ - pass + pass - @abstractmethod - def deserialize_entity_reference(self, serialized_entity_reference): - """ Returns the deserialized reference to the entity in the database, or None if none. """ - pass + @abstractmethod + def deserialize_entity_reference(self, serialized_entity_reference): + """ Returns the deserialized reference to the entity in the database, or None if none. """ + pass - @abstractmethod - def description(self, entity_reference): - """ Returns a human-readable and *public* description of the current entity. """ - pass + @abstractmethod + def description(self, entity_reference): + """ Returns a human-readable and *public* description of the current entity. """ + pass - @abstractmethod - def analytics_id_and_public_metadata(self, entity_reference): - """ Returns the analyitics ID and a dict of public metadata for the current entity. """ - pass + @abstractmethod + def analytics_id_and_public_metadata(self, entity_reference): + """ Returns the analyitics ID and a dict of public metadata for the current entity. """ + pass class AnonymousEntityHandler(ContextEntityHandler): - def credential_username(self, entity_reference): - return None + def credential_username(self, entity_reference): + return None - def get_serialized_entity_reference(self, entity_reference): - return None + def get_serialized_entity_reference(self, entity_reference): + return None - def deserialize_entity_reference(self, serialized_entity_reference): - return None + def deserialize_entity_reference(self, serialized_entity_reference): + return None - def description(self, entity_reference): - return "anonymous" + def description(self, entity_reference): + return "anonymous" - def analytics_id_and_public_metadata(self, entity_reference): - return "anonymous", {} + def analytics_id_and_public_metadata(self, entity_reference): + return "anonymous", {} class UserEntityHandler(ContextEntityHandler): - def credential_username(self, entity_reference): - return entity_reference.username + def credential_username(self, entity_reference): + return entity_reference.username - def get_serialized_entity_reference(self, entity_reference): - return entity_reference.uuid + def get_serialized_entity_reference(self, entity_reference): + return entity_reference.uuid - def deserialize_entity_reference(self, serialized_entity_reference): - return model.user.get_user_by_uuid(serialized_entity_reference) + def deserialize_entity_reference(self, serialized_entity_reference): + return model.user.get_user_by_uuid(serialized_entity_reference) - def description(self, entity_reference): - return "user %s" % entity_reference.username + def description(self, entity_reference): + return "user %s" % entity_reference.username - def analytics_id_and_public_metadata(self, entity_reference): - return entity_reference.username, { - 'username': entity_reference.username, - } + def analytics_id_and_public_metadata(self, entity_reference): + return entity_reference.username, {"username": entity_reference.username} class RobotEntityHandler(ContextEntityHandler): - def credential_username(self, entity_reference): - return entity_reference.username + def credential_username(self, entity_reference): + return entity_reference.username - def get_serialized_entity_reference(self, entity_reference): - return entity_reference.username + def get_serialized_entity_reference(self, entity_reference): + return entity_reference.username - def deserialize_entity_reference(self, serialized_entity_reference): - return model.user.lookup_robot(serialized_entity_reference) + def deserialize_entity_reference(self, serialized_entity_reference): + return model.user.lookup_robot(serialized_entity_reference) - def description(self, entity_reference): - return "robot %s" % entity_reference.username + def description(self, entity_reference): + return "robot %s" % entity_reference.username - def analytics_id_and_public_metadata(self, entity_reference): - return entity_reference.username, { - 'username': entity_reference.username, - 'is_robot': True, - } + def analytics_id_and_public_metadata(self, entity_reference): + return ( + entity_reference.username, + {"username": entity_reference.username, "is_robot": True}, + ) class TokenEntityHandler(ContextEntityHandler): - def credential_username(self, entity_reference): - return ACCESS_TOKEN_USERNAME + def credential_username(self, entity_reference): + return ACCESS_TOKEN_USERNAME - def get_serialized_entity_reference(self, entity_reference): - return entity_reference.get_code() + def get_serialized_entity_reference(self, entity_reference): + return entity_reference.get_code() - def deserialize_entity_reference(self, serialized_entity_reference): - return model.token.load_token_data(serialized_entity_reference) + def deserialize_entity_reference(self, serialized_entity_reference): + return model.token.load_token_data(serialized_entity_reference) - def description(self, entity_reference): - return "token %s" % entity_reference.friendly_name + def description(self, entity_reference): + return "token %s" % entity_reference.friendly_name - def analytics_id_and_public_metadata(self, entity_reference): - return 'token:%s' % entity_reference.id, { - 'token': entity_reference.friendly_name, - } + def analytics_id_and_public_metadata(self, entity_reference): + return ( + "token:%s" % entity_reference.id, + {"token": entity_reference.friendly_name}, + ) class OAuthTokenEntityHandler(ContextEntityHandler): - def credential_username(self, entity_reference): - return OAUTH_TOKEN_USERNAME + def credential_username(self, entity_reference): + return OAUTH_TOKEN_USERNAME - def get_serialized_entity_reference(self, entity_reference): - return entity_reference.uuid + def get_serialized_entity_reference(self, entity_reference): + return entity_reference.uuid - def deserialize_entity_reference(self, serialized_entity_reference): - return model.oauth.lookup_access_token_by_uuid(serialized_entity_reference) + def deserialize_entity_reference(self, serialized_entity_reference): + return model.oauth.lookup_access_token_by_uuid(serialized_entity_reference) - def description(self, entity_reference): - return "oauthtoken for user %s" % entity_reference.authorized_user.username + def description(self, entity_reference): + return "oauthtoken for user %s" % entity_reference.authorized_user.username - def analytics_id_and_public_metadata(self, entity_reference): - return 'oauthtoken:%s' % entity_reference.id, { - 'oauth_token_id': entity_reference.id, - 'oauth_token_application_id': entity_reference.application.client_id, - 'oauth_token_application': entity_reference.application.name, - 'username': entity_reference.authorized_user.username, - } + def analytics_id_and_public_metadata(self, entity_reference): + return ( + "oauthtoken:%s" % entity_reference.id, + { + "oauth_token_id": entity_reference.id, + "oauth_token_application_id": entity_reference.application.client_id, + "oauth_token_application": entity_reference.application.name, + "username": entity_reference.authorized_user.username, + }, + ) class AppSpecificTokenEntityHandler(ContextEntityHandler): - def credential_username(self, entity_reference): - return APP_SPECIFIC_TOKEN_USERNAME + def credential_username(self, entity_reference): + return APP_SPECIFIC_TOKEN_USERNAME - def get_serialized_entity_reference(self, entity_reference): - return entity_reference.uuid + def get_serialized_entity_reference(self, entity_reference): + return entity_reference.uuid - def deserialize_entity_reference(self, serialized_entity_reference): - return model.appspecifictoken.get_token_by_uuid(serialized_entity_reference) + def deserialize_entity_reference(self, serialized_entity_reference): + return model.appspecifictoken.get_token_by_uuid(serialized_entity_reference) - def description(self, entity_reference): - tpl = (entity_reference.title, entity_reference.user.username) - return "app specific token %s for user %s" % tpl + def description(self, entity_reference): + tpl = (entity_reference.title, entity_reference.user.username) + return "app specific token %s for user %s" % tpl - def analytics_id_and_public_metadata(self, entity_reference): - return 'appspecifictoken:%s' % entity_reference.id, { - 'app_specific_token': entity_reference.uuid, - 'app_specific_token_title': entity_reference.title, - 'username': entity_reference.user.username, - } + def analytics_id_and_public_metadata(self, entity_reference): + return ( + "appspecifictoken:%s" % entity_reference.id, + { + "app_specific_token": entity_reference.uuid, + "app_specific_token_title": entity_reference.title, + "username": entity_reference.user.username, + }, + ) class SignedDataEntityHandler(ContextEntityHandler): - def credential_username(self, entity_reference): - return None + def credential_username(self, entity_reference): + return None - def get_serialized_entity_reference(self, entity_reference): - raise NotImplementedError + def get_serialized_entity_reference(self, entity_reference): + raise NotImplementedError - def deserialize_entity_reference(self, serialized_entity_reference): - raise NotImplementedError + def deserialize_entity_reference(self, serialized_entity_reference): + raise NotImplementedError - def description(self, entity_reference): - return "signed" + def description(self, entity_reference): + return "signed" - def analytics_id_and_public_metadata(self, entity_reference): - return 'signed', {'signed': entity_reference} + def analytics_id_and_public_metadata(self, entity_reference): + return "signed", {"signed": entity_reference} CONTEXT_ENTITY_HANDLERS = { - ContextEntityKind.anonymous: AnonymousEntityHandler, - ContextEntityKind.user: UserEntityHandler, - ContextEntityKind.robot: RobotEntityHandler, - ContextEntityKind.token: TokenEntityHandler, - ContextEntityKind.oauthtoken: OAuthTokenEntityHandler, - ContextEntityKind.appspecifictoken: AppSpecificTokenEntityHandler, - ContextEntityKind.signed_data: SignedDataEntityHandler, + ContextEntityKind.anonymous: AnonymousEntityHandler, + ContextEntityKind.user: UserEntityHandler, + ContextEntityKind.robot: RobotEntityHandler, + ContextEntityKind.token: TokenEntityHandler, + ContextEntityKind.oauthtoken: OAuthTokenEntityHandler, + ContextEntityKind.appspecifictoken: AppSpecificTokenEntityHandler, + ContextEntityKind.signed_data: SignedDataEntityHandler, } diff --git a/auth/cookie.py b/auth/cookie.py index 68ed0f8ee..839183f32 100644 --- a/auth/cookie.py +++ b/auth/cookie.py @@ -7,31 +7,40 @@ from auth.validateresult import AuthKind, ValidateResult logger = logging.getLogger(__name__) + def validate_session_cookie(auth_header_unusued=None): - """ Attempts to load a user from a session cookie. """ - if current_user.is_anonymous: - return ValidateResult(AuthKind.cookie, missing=True) + """ Attempts to load a user from a session cookie. """ + if current_user.is_anonymous: + return ValidateResult(AuthKind.cookie, missing=True) - try: - # Attempt to parse the user uuid to make sure the cookie has the right value type - UUID(current_user.get_id()) - except ValueError: - logger.debug('Got non-UUID for session cookie user: %s', current_user.get_id()) - return ValidateResult(AuthKind.cookie, error_message='Invalid session cookie format') + try: + # Attempt to parse the user uuid to make sure the cookie has the right value type + UUID(current_user.get_id()) + except ValueError: + logger.debug("Got non-UUID for session cookie user: %s", current_user.get_id()) + return ValidateResult( + AuthKind.cookie, error_message="Invalid session cookie format" + ) - logger.debug('Loading user from cookie: %s', current_user.get_id()) - db_user = current_user.db_user() - if db_user is None: - return ValidateResult(AuthKind.cookie, error_message='Could not find matching user') + logger.debug("Loading user from cookie: %s", current_user.get_id()) + db_user = current_user.db_user() + if db_user is None: + return ValidateResult( + AuthKind.cookie, error_message="Could not find matching user" + ) - # Don't allow disabled users to login. - if not db_user.enabled: - logger.debug('User %s in session cookie is disabled', db_user.username) - return ValidateResult(AuthKind.cookie, error_message='User account is disabled') + # Don't allow disabled users to login. + if not db_user.enabled: + logger.debug("User %s in session cookie is disabled", db_user.username) + return ValidateResult(AuthKind.cookie, error_message="User account is disabled") - # Don't allow organizations to "login". - if db_user.organization: - logger.debug('User %s in session cookie is in-fact organization', db_user.username) - return ValidateResult(AuthKind.cookie, error_message='Cannot login to organization') + # Don't allow organizations to "login". + if db_user.organization: + logger.debug( + "User %s in session cookie is in-fact organization", db_user.username + ) + return ValidateResult( + AuthKind.cookie, error_message="Cannot login to organization" + ) - return ValidateResult(AuthKind.cookie, user=db_user) + return ValidateResult(AuthKind.cookie, user=db_user) diff --git a/auth/credential_consts.py b/auth/credential_consts.py index dda9834d1..93287d833 100644 --- a/auth/credential_consts.py +++ b/auth/credential_consts.py @@ -1,3 +1,3 @@ -ACCESS_TOKEN_USERNAME = '$token' -OAUTH_TOKEN_USERNAME = '$oauthtoken' -APP_SPECIFIC_TOKEN_USERNAME = '$app' +ACCESS_TOKEN_USERNAME = "$token" +OAUTH_TOKEN_USERNAME = "$oauthtoken" +APP_SPECIFIC_TOKEN_USERNAME = "$app" diff --git a/auth/credentials.py b/auth/credentials.py index 5d8c8b4dd..f56f6a540 100644 --- a/auth/credentials.py +++ b/auth/credentials.py @@ -7,8 +7,11 @@ import features from app import authentication from auth.oauth import validate_oauth_token from auth.validateresult import ValidateResult, AuthKind -from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME, - APP_SPECIFIC_TOKEN_USERNAME) +from auth.credential_consts import ( + ACCESS_TOKEN_USERNAME, + OAUTH_TOKEN_USERNAME, + APP_SPECIFIC_TOKEN_USERNAME, +) from data import model from util.names import parse_robot_username @@ -16,70 +19,116 @@ logger = logging.getLogger(__name__) class CredentialKind(Enum): - user = 'user' - robot = 'robot' - token = ACCESS_TOKEN_USERNAME - oauth_token = OAUTH_TOKEN_USERNAME - app_specific_token = APP_SPECIFIC_TOKEN_USERNAME + user = "user" + robot = "robot" + token = ACCESS_TOKEN_USERNAME + oauth_token = OAUTH_TOKEN_USERNAME + app_specific_token = APP_SPECIFIC_TOKEN_USERNAME def validate_credentials(auth_username, auth_password_or_token): - """ Validates a pair of auth username and password/token credentials. """ - # Check for access tokens. - if auth_username == ACCESS_TOKEN_USERNAME: - logger.debug('Found credentials for access token') - try: - token = model.token.load_token_data(auth_password_or_token) - logger.debug('Successfully validated credentials for access token %s', token.id) - return ValidateResult(AuthKind.credentials, token=token), CredentialKind.token - except model.DataModelException: - logger.warning('Failed to validate credentials for access token %s', auth_password_or_token) - return (ValidateResult(AuthKind.credentials, error_message='Invalid access token'), - CredentialKind.token) + """ Validates a pair of auth username and password/token credentials. """ + # Check for access tokens. + if auth_username == ACCESS_TOKEN_USERNAME: + logger.debug("Found credentials for access token") + try: + token = model.token.load_token_data(auth_password_or_token) + logger.debug( + "Successfully validated credentials for access token %s", token.id + ) + return ( + ValidateResult(AuthKind.credentials, token=token), + CredentialKind.token, + ) + except model.DataModelException: + logger.warning( + "Failed to validate credentials for access token %s", + auth_password_or_token, + ) + return ( + ValidateResult( + AuthKind.credentials, error_message="Invalid access token" + ), + CredentialKind.token, + ) - # Check for App Specific tokens. - if features.APP_SPECIFIC_TOKENS and auth_username == APP_SPECIFIC_TOKEN_USERNAME: - logger.debug('Found credentials for app specific auth token') - token = model.appspecifictoken.access_valid_token(auth_password_or_token) - if token is None: - logger.debug('Failed to validate credentials for app specific token: %s', - auth_password_or_token) - return (ValidateResult(AuthKind.credentials, error_message='Invalid token'), - CredentialKind.app_specific_token) + # Check for App Specific tokens. + if features.APP_SPECIFIC_TOKENS and auth_username == APP_SPECIFIC_TOKEN_USERNAME: + logger.debug("Found credentials for app specific auth token") + token = model.appspecifictoken.access_valid_token(auth_password_or_token) + if token is None: + logger.debug( + "Failed to validate credentials for app specific token: %s", + auth_password_or_token, + ) + return ( + ValidateResult(AuthKind.credentials, error_message="Invalid token"), + CredentialKind.app_specific_token, + ) - if not token.user.enabled: - logger.debug('Tried to use an app specific token for a disabled user: %s', - token.uuid) - return (ValidateResult(AuthKind.credentials, - error_message='This user has been disabled. Please contact your administrator.'), - CredentialKind.app_specific_token) + if not token.user.enabled: + logger.debug( + "Tried to use an app specific token for a disabled user: %s", token.uuid + ) + return ( + ValidateResult( + AuthKind.credentials, + error_message="This user has been disabled. Please contact your administrator.", + ), + CredentialKind.app_specific_token, + ) - logger.debug('Successfully validated credentials for app specific token %s', token.id) - return (ValidateResult(AuthKind.credentials, appspecifictoken=token), - CredentialKind.app_specific_token) + logger.debug( + "Successfully validated credentials for app specific token %s", token.id + ) + return ( + ValidateResult(AuthKind.credentials, appspecifictoken=token), + CredentialKind.app_specific_token, + ) - # Check for OAuth tokens. - if auth_username == OAUTH_TOKEN_USERNAME: - return validate_oauth_token(auth_password_or_token), CredentialKind.oauth_token + # Check for OAuth tokens. + if auth_username == OAUTH_TOKEN_USERNAME: + return validate_oauth_token(auth_password_or_token), CredentialKind.oauth_token - # Check for robots and users. - is_robot = parse_robot_username(auth_username) - if is_robot: - logger.debug('Found credentials header for robot %s', auth_username) - try: - robot = model.user.verify_robot(auth_username, auth_password_or_token) - logger.debug('Successfully validated credentials for robot %s', auth_username) - return ValidateResult(AuthKind.credentials, robot=robot), CredentialKind.robot - except model.InvalidRobotException as ire: - logger.warning('Failed to validate credentials for robot %s: %s', auth_username, ire) - return ValidateResult(AuthKind.credentials, error_message=str(ire)), CredentialKind.robot + # Check for robots and users. + is_robot = parse_robot_username(auth_username) + if is_robot: + logger.debug("Found credentials header for robot %s", auth_username) + try: + robot = model.user.verify_robot(auth_username, auth_password_or_token) + logger.debug( + "Successfully validated credentials for robot %s", auth_username + ) + return ( + ValidateResult(AuthKind.credentials, robot=robot), + CredentialKind.robot, + ) + except model.InvalidRobotException as ire: + logger.warning( + "Failed to validate credentials for robot %s: %s", auth_username, ire + ) + return ( + ValidateResult(AuthKind.credentials, error_message=str(ire)), + CredentialKind.robot, + ) - # Otherwise, treat as a standard user. - (authenticated, err) = authentication.verify_and_link_user(auth_username, auth_password_or_token, - basic_auth=True) - if authenticated: - logger.debug('Successfully validated credentials for user %s', authenticated.username) - return ValidateResult(AuthKind.credentials, user=authenticated), CredentialKind.user - else: - logger.warning('Failed to validate credentials for user %s: %s', auth_username, err) - return ValidateResult(AuthKind.credentials, error_message=err), CredentialKind.user + # Otherwise, treat as a standard user. + (authenticated, err) = authentication.verify_and_link_user( + auth_username, auth_password_or_token, basic_auth=True + ) + if authenticated: + logger.debug( + "Successfully validated credentials for user %s", authenticated.username + ) + return ( + ValidateResult(AuthKind.credentials, user=authenticated), + CredentialKind.user, + ) + else: + logger.warning( + "Failed to validate credentials for user %s: %s", auth_username, err + ) + return ( + ValidateResult(AuthKind.credentials, error_message=err), + CredentialKind.user, + ) diff --git a/auth/decorators.py b/auth/decorators.py index 5fc966140..6e5a0cf05 100644 --- a/auth/decorators.py +++ b/auth/decorators.py @@ -14,83 +14,101 @@ from util.http import abort logger = logging.getLogger(__name__) + def _auth_decorator(pass_result=False, handlers=None): - """ Builds an auth decorator that runs the given handlers and, if any return successfully, + """ Builds an auth decorator that runs the given handlers and, if any return successfully, sets up the auth context. The wrapped function will be invoked *regardless of success or failure of the auth handler(s)* """ - def processor(func): - @wraps(func) - def wrapper(*args, **kwargs): - auth_header = request.headers.get('authorization', '') - result = None - for handler in handlers: - result = handler(auth_header) - # If the handler was missing the necessary information, skip it and try the next one. - if result.missing: - continue + def processor(func): + @wraps(func) + def wrapper(*args, **kwargs): + auth_header = request.headers.get("authorization", "") + result = None - # Check for a valid result. - if result.auth_valid: - logger.debug('Found valid auth result: %s', result.tuple()) + for handler in handlers: + result = handler(auth_header) + # If the handler was missing the necessary information, skip it and try the next one. + if result.missing: + continue - # Set the various pieces of the auth context. - result.apply_to_context() + # Check for a valid result. + if result.auth_valid: + logger.debug("Found valid auth result: %s", result.tuple()) - # Log the metric. - metric_queue.authentication_count.Inc(labelvalues=[result.kind, True]) - break + # Set the various pieces of the auth context. + result.apply_to_context() - # Otherwise, report the error. - if result.error_message is not None: - # Log the failure. - metric_queue.authentication_count.Inc(labelvalues=[result.kind, False]) - break + # Log the metric. + metric_queue.authentication_count.Inc( + labelvalues=[result.kind, True] + ) + break - if pass_result: - kwargs['auth_result'] = result + # Otherwise, report the error. + if result.error_message is not None: + # Log the failure. + metric_queue.authentication_count.Inc( + labelvalues=[result.kind, False] + ) + break - return func(*args, **kwargs) - return wrapper - return processor + if pass_result: + kwargs["auth_result"] = result + + return func(*args, **kwargs) + + return wrapper + + return processor -process_oauth = _auth_decorator(handlers=[validate_bearer_auth, validate_session_cookie]) +process_oauth = _auth_decorator( + handlers=[validate_bearer_auth, validate_session_cookie] +) process_auth = _auth_decorator(handlers=[validate_signed_grant, validate_basic_auth]) -process_auth_or_cookie = _auth_decorator(handlers=[validate_basic_auth, validate_session_cookie]) +process_auth_or_cookie = _auth_decorator( + handlers=[validate_basic_auth, validate_session_cookie] +) process_basic_auth = _auth_decorator(handlers=[validate_basic_auth], pass_result=True) process_basic_auth_no_pass = _auth_decorator(handlers=[validate_basic_auth]) def require_session_login(func): - """ Decorates a function and ensures that a valid session cookie exists or a 401 is raised. If + """ Decorates a function and ensures that a valid session cookie exists or a 401 is raised. If a valid session cookie does exist, the authenticated user and identity are also set. """ - @wraps(func) - def wrapper(*args, **kwargs): - result = validate_session_cookie() - if result.has_nonrobot_user: - result.apply_to_context() - metric_queue.authentication_count.Inc(labelvalues=[result.kind, True]) - return func(*args, **kwargs) - elif not result.missing: - metric_queue.authentication_count.Inc(labelvalues=[result.kind, False]) - abort(401, message='Method requires login and no valid login could be loaded.') - return wrapper + @wraps(func) + def wrapper(*args, **kwargs): + result = validate_session_cookie() + if result.has_nonrobot_user: + result.apply_to_context() + metric_queue.authentication_count.Inc(labelvalues=[result.kind, True]) + return func(*args, **kwargs) + elif not result.missing: + metric_queue.authentication_count.Inc(labelvalues=[result.kind, False]) + + abort(401, message="Method requires login and no valid login could be loaded.") + + return wrapper def extract_namespace_repo_from_session(func): - """ Extracts the namespace and repository name from the current session (which must exist) + """ Extracts the namespace and repository name from the current session (which must exist) and passes them into the decorated function as the first and second arguments. If the session doesn't exist or does not contain these arugments, a 400 error is raised. """ - @wraps(func) - def wrapper(*args, **kwargs): - if 'namespace' not in session or 'repository' not in session: - logger.error('Unable to load namespace or repository from session: %s', session) - abort(400, message='Missing namespace in request') - return func(session['namespace'], session['repository'], *args, **kwargs) - return wrapper + @wraps(func) + def wrapper(*args, **kwargs): + if "namespace" not in session or "repository" not in session: + logger.error( + "Unable to load namespace or repository from session: %s", session + ) + abort(400, message="Missing namespace in request") + + return func(session["namespace"], session["repository"], *args, **kwargs) + + return wrapper diff --git a/auth/oauth.py b/auth/oauth.py index aaea92831..b9f3ca7bd 100644 --- a/auth/oauth.py +++ b/auth/oauth.py @@ -8,41 +8,47 @@ from data import model logger = logging.getLogger(__name__) + def validate_bearer_auth(auth_header): - """ Validates an OAuth token found inside a basic auth `Bearer` token, returning whether it + """ Validates an OAuth token found inside a basic auth `Bearer` token, returning whether it points to a valid OAuth token. """ - if not auth_header: - return ValidateResult(AuthKind.oauth, missing=True) + if not auth_header: + return ValidateResult(AuthKind.oauth, missing=True) - normalized = [part.strip() for part in auth_header.split(' ') if part] - if normalized[0].lower() != 'bearer' or len(normalized) != 2: - logger.debug('Got invalid bearer token format: %s', auth_header) - return ValidateResult(AuthKind.oauth, missing=True) + normalized = [part.strip() for part in auth_header.split(" ") if part] + if normalized[0].lower() != "bearer" or len(normalized) != 2: + logger.debug("Got invalid bearer token format: %s", auth_header) + return ValidateResult(AuthKind.oauth, missing=True) - (_, oauth_token) = normalized - return validate_oauth_token(oauth_token) + (_, oauth_token) = normalized + return validate_oauth_token(oauth_token) def validate_oauth_token(token): - """ Validates the specified OAuth token, returning whether it points to a valid OAuth token. + """ Validates the specified OAuth token, returning whether it points to a valid OAuth token. """ - validated = model.oauth.validate_access_token(token) - if not validated: - logger.warning('OAuth access token could not be validated: %s', token) - return ValidateResult(AuthKind.oauth, - error_message='OAuth access token could not be validated') + validated = model.oauth.validate_access_token(token) + if not validated: + logger.warning("OAuth access token could not be validated: %s", token) + return ValidateResult( + AuthKind.oauth, error_message="OAuth access token could not be validated" + ) - if validated.expires_at <= datetime.utcnow(): - logger.warning('OAuth access with an expired token: %s', token) - return ValidateResult(AuthKind.oauth, error_message='OAuth access token has expired') + if validated.expires_at <= datetime.utcnow(): + logger.warning("OAuth access with an expired token: %s", token) + return ValidateResult( + AuthKind.oauth, error_message="OAuth access token has expired" + ) - # Don't allow disabled users to login. - if not validated.authorized_user.enabled: - return ValidateResult(AuthKind.oauth, - error_message='Granter of the oauth access token is disabled') + # Don't allow disabled users to login. + if not validated.authorized_user.enabled: + return ValidateResult( + AuthKind.oauth, + error_message="Granter of the oauth access token is disabled", + ) - # We have a valid token - scope_set = scopes_from_scope_string(validated.scope) - logger.debug('Successfully validated oauth access token with scope: %s', scope_set) - return ValidateResult(AuthKind.oauth, oauthtoken=validated) + # We have a valid token + scope_set = scopes_from_scope_string(validated.scope) + logger.debug("Successfully validated oauth access token with scope: %s", scope_set) + return ValidateResult(AuthKind.oauth, oauthtoken=validated) diff --git a/auth/permissions.py b/auth/permissions.py index c967aa046..10419acbc 100644 --- a/auth/permissions.py +++ b/auth/permissions.py @@ -14,351 +14,399 @@ from data import model logger = logging.getLogger(__name__) -_ResourceNeed = namedtuple('resource', ['type', 'namespace', 'name', 'role']) -_RepositoryNeed = partial(_ResourceNeed, 'repository') -_NamespaceWideNeed = namedtuple('namespacewide', ['type', 'namespace', 'role']) -_OrganizationNeed = partial(_NamespaceWideNeed, 'organization') -_OrganizationRepoNeed = partial(_NamespaceWideNeed, 'organizationrepo') -_TeamTypeNeed = namedtuple('teamwideneed', ['type', 'orgname', 'teamname', 'role']) -_TeamNeed = partial(_TeamTypeNeed, 'orgteam') -_UserTypeNeed = namedtuple('userspecificneed', ['type', 'username', 'role']) -_UserNeed = partial(_UserTypeNeed, 'user') -_SuperUserNeed = partial(namedtuple('superuserneed', ['type']), 'superuser') +_ResourceNeed = namedtuple("resource", ["type", "namespace", "name", "role"]) +_RepositoryNeed = partial(_ResourceNeed, "repository") +_NamespaceWideNeed = namedtuple("namespacewide", ["type", "namespace", "role"]) +_OrganizationNeed = partial(_NamespaceWideNeed, "organization") +_OrganizationRepoNeed = partial(_NamespaceWideNeed, "organizationrepo") +_TeamTypeNeed = namedtuple("teamwideneed", ["type", "orgname", "teamname", "role"]) +_TeamNeed = partial(_TeamTypeNeed, "orgteam") +_UserTypeNeed = namedtuple("userspecificneed", ["type", "username", "role"]) +_UserNeed = partial(_UserTypeNeed, "user") +_SuperUserNeed = partial(namedtuple("superuserneed", ["type"]), "superuser") -REPO_ROLES = [None, 'read', 'write', 'admin'] -TEAM_ROLES = [None, 'member', 'creator', 'admin'] -USER_ROLES = [None, 'read', 'admin'] +REPO_ROLES = [None, "read", "write", "admin"] +TEAM_ROLES = [None, "member", "creator", "admin"] +USER_ROLES = [None, "read", "admin"] -TEAM_ORGWIDE_REPO_ROLES = { - 'admin': 'admin', - 'creator': None, - 'member': None, -} +TEAM_ORGWIDE_REPO_ROLES = {"admin": "admin", "creator": None, "member": None} SCOPE_MAX_REPO_ROLES = defaultdict(lambda: None) -SCOPE_MAX_REPO_ROLES.update({ - scopes.READ_REPO: 'read', - scopes.WRITE_REPO: 'write', - scopes.ADMIN_REPO: 'admin', - scopes.DIRECT_LOGIN: 'admin', -}) +SCOPE_MAX_REPO_ROLES.update( + { + scopes.READ_REPO: "read", + scopes.WRITE_REPO: "write", + scopes.ADMIN_REPO: "admin", + scopes.DIRECT_LOGIN: "admin", + } +) SCOPE_MAX_TEAM_ROLES = defaultdict(lambda: None) -SCOPE_MAX_TEAM_ROLES.update({ - scopes.CREATE_REPO: 'creator', - scopes.DIRECT_LOGIN: 'admin', - scopes.ORG_ADMIN: 'admin', -}) +SCOPE_MAX_TEAM_ROLES.update( + { + scopes.CREATE_REPO: "creator", + scopes.DIRECT_LOGIN: "admin", + scopes.ORG_ADMIN: "admin", + } +) SCOPE_MAX_USER_ROLES = defaultdict(lambda: None) -SCOPE_MAX_USER_ROLES.update({ - scopes.READ_USER: 'read', - scopes.DIRECT_LOGIN: 'admin', - scopes.ADMIN_USER: 'admin', -}) +SCOPE_MAX_USER_ROLES.update( + {scopes.READ_USER: "read", scopes.DIRECT_LOGIN: "admin", scopes.ADMIN_USER: "admin"} +) + def repository_read_grant(namespace, repository): - return _RepositoryNeed(namespace, repository, 'read') + return _RepositoryNeed(namespace, repository, "read") def repository_write_grant(namespace, repository): - return _RepositoryNeed(namespace, repository, 'write') + return _RepositoryNeed(namespace, repository, "write") def repository_admin_grant(namespace, repository): - return _RepositoryNeed(namespace, repository, 'admin') + return _RepositoryNeed(namespace, repository, "admin") class QuayDeferredPermissionUser(Identity): - def __init__(self, uuid, auth_type, auth_scopes, user=None): - super(QuayDeferredPermissionUser, self).__init__(uuid, auth_type) + def __init__(self, uuid, auth_type, auth_scopes, user=None): + super(QuayDeferredPermissionUser, self).__init__(uuid, auth_type) - self._namespace_wide_loaded = set() - self._repositories_loaded = set() - self._personal_loaded = False + self._namespace_wide_loaded = set() + self._repositories_loaded = set() + self._personal_loaded = False - self._scope_set = auth_scopes - self._user_object = user + self._scope_set = auth_scopes + self._user_object = user - @staticmethod - def for_id(uuid, auth_scopes=None): - auth_scopes = auth_scopes if auth_scopes is not None else {scopes.DIRECT_LOGIN} - return QuayDeferredPermissionUser(uuid, 'user_uuid', auth_scopes) + @staticmethod + def for_id(uuid, auth_scopes=None): + auth_scopes = auth_scopes if auth_scopes is not None else {scopes.DIRECT_LOGIN} + return QuayDeferredPermissionUser(uuid, "user_uuid", auth_scopes) - @staticmethod - def for_user(user, auth_scopes=None): - auth_scopes = auth_scopes if auth_scopes is not None else {scopes.DIRECT_LOGIN} - return QuayDeferredPermissionUser(user.uuid, 'user_uuid', auth_scopes, user=user) + @staticmethod + def for_user(user, auth_scopes=None): + auth_scopes = auth_scopes if auth_scopes is not None else {scopes.DIRECT_LOGIN} + return QuayDeferredPermissionUser( + user.uuid, "user_uuid", auth_scopes, user=user + ) - def _translate_role_for_scopes(self, cardinality, max_roles, role): - if self._scope_set is None: - return role + def _translate_role_for_scopes(self, cardinality, max_roles, role): + if self._scope_set is None: + return role - max_for_scopes = max({cardinality.index(max_roles[scope]) for scope in self._scope_set}) + max_for_scopes = max( + {cardinality.index(max_roles[scope]) for scope in self._scope_set} + ) - if max_for_scopes < cardinality.index(role): - logger.debug('Translated permission %s -> %s', role, cardinality[max_for_scopes]) - return cardinality[max_for_scopes] - else: - return role + if max_for_scopes < cardinality.index(role): + logger.debug( + "Translated permission %s -> %s", role, cardinality[max_for_scopes] + ) + return cardinality[max_for_scopes] + else: + return role - def _team_role_for_scopes(self, role): - return self._translate_role_for_scopes(TEAM_ROLES, SCOPE_MAX_TEAM_ROLES, role) + def _team_role_for_scopes(self, role): + return self._translate_role_for_scopes(TEAM_ROLES, SCOPE_MAX_TEAM_ROLES, role) - def _repo_role_for_scopes(self, role): - return self._translate_role_for_scopes(REPO_ROLES, SCOPE_MAX_REPO_ROLES, role) + def _repo_role_for_scopes(self, role): + return self._translate_role_for_scopes(REPO_ROLES, SCOPE_MAX_REPO_ROLES, role) - def _user_role_for_scopes(self, role): - return self._translate_role_for_scopes(USER_ROLES, SCOPE_MAX_USER_ROLES, role) + def _user_role_for_scopes(self, role): + return self._translate_role_for_scopes(USER_ROLES, SCOPE_MAX_USER_ROLES, role) - def _populate_user_provides(self, user_object): - """ Populates the provides that naturally apply to a user, such as being the admin of + def _populate_user_provides(self, user_object): + """ Populates the provides that naturally apply to a user, such as being the admin of their own namespace. """ - # Add the user specific permissions, only for non-oauth permission - user_grant = _UserNeed(user_object.username, self._user_role_for_scopes('admin')) - logger.debug('User permission: {0}'.format(user_grant)) - self.provides.add(user_grant) + # Add the user specific permissions, only for non-oauth permission + user_grant = _UserNeed( + user_object.username, self._user_role_for_scopes("admin") + ) + logger.debug("User permission: {0}".format(user_grant)) + self.provides.add(user_grant) - # Every user is the admin of their own 'org' - user_namespace = _OrganizationNeed(user_object.username, self._team_role_for_scopes('admin')) - logger.debug('User namespace permission: {0}'.format(user_namespace)) - self.provides.add(user_namespace) + # Every user is the admin of their own 'org' + user_namespace = _OrganizationNeed( + user_object.username, self._team_role_for_scopes("admin") + ) + logger.debug("User namespace permission: {0}".format(user_namespace)) + self.provides.add(user_namespace) - # Org repo roles can differ for scopes - user_repos = _OrganizationRepoNeed(user_object.username, self._repo_role_for_scopes('admin')) - logger.debug('User namespace repo permission: {0}'.format(user_repos)) - self.provides.add(user_repos) + # Org repo roles can differ for scopes + user_repos = _OrganizationRepoNeed( + user_object.username, self._repo_role_for_scopes("admin") + ) + logger.debug("User namespace repo permission: {0}".format(user_repos)) + self.provides.add(user_repos) - if ((scopes.SUPERUSER in self._scope_set or scopes.DIRECT_LOGIN in self._scope_set) and - superusers.is_superuser(user_object.username)): - logger.debug('Adding superuser to user: %s', user_object.username) - self.provides.add(_SuperUserNeed()) + if ( + scopes.SUPERUSER in self._scope_set + or scopes.DIRECT_LOGIN in self._scope_set + ) and superusers.is_superuser(user_object.username): + logger.debug("Adding superuser to user: %s", user_object.username) + self.provides.add(_SuperUserNeed()) - def _populate_namespace_wide_provides(self, user_object, namespace_filter): - """ Populates the namespace-wide provides for a particular user under a particular namespace. + def _populate_namespace_wide_provides(self, user_object, namespace_filter): + """ Populates the namespace-wide provides for a particular user under a particular namespace. This method does *not* add any provides for specific repositories. """ - for team in model.permission.get_org_wide_permissions(user_object, org_filter=namespace_filter): - team_org_grant = _OrganizationNeed(team.organization.username, - self._team_role_for_scopes(team.role.name)) - logger.debug('Organization team added permission: {0}'.format(team_org_grant)) - self.provides.add(team_org_grant) + for team in model.permission.get_org_wide_permissions( + user_object, org_filter=namespace_filter + ): + team_org_grant = _OrganizationNeed( + team.organization.username, self._team_role_for_scopes(team.role.name) + ) + logger.debug( + "Organization team added permission: {0}".format(team_org_grant) + ) + self.provides.add(team_org_grant) - team_repo_role = TEAM_ORGWIDE_REPO_ROLES[team.role.name] - org_repo_grant = _OrganizationRepoNeed(team.organization.username, - self._repo_role_for_scopes(team_repo_role)) - logger.debug('Organization team added repo permission: {0}'.format(org_repo_grant)) - self.provides.add(org_repo_grant) + team_repo_role = TEAM_ORGWIDE_REPO_ROLES[team.role.name] + org_repo_grant = _OrganizationRepoNeed( + team.organization.username, self._repo_role_for_scopes(team_repo_role) + ) + logger.debug( + "Organization team added repo permission: {0}".format(org_repo_grant) + ) + self.provides.add(org_repo_grant) - team_grant = _TeamNeed(team.organization.username, team.name, - self._team_role_for_scopes(team.role.name)) - logger.debug('Team added permission: {0}'.format(team_grant)) - self.provides.add(team_grant) + team_grant = _TeamNeed( + team.organization.username, + team.name, + self._team_role_for_scopes(team.role.name), + ) + logger.debug("Team added permission: {0}".format(team_grant)) + self.provides.add(team_grant) - def _populate_repository_provides(self, user_object, namespace_filter, repository_name): - """ Populates the repository-specific provides for a particular user and repository. """ + def _populate_repository_provides( + self, user_object, namespace_filter, repository_name + ): + """ Populates the repository-specific provides for a particular user and repository. """ - if namespace_filter and repository_name: - permissions = model.permission.get_user_repository_permissions(user_object, namespace_filter, - repository_name) - else: - permissions = model.permission.get_all_user_repository_permissions(user_object) + if namespace_filter and repository_name: + permissions = model.permission.get_user_repository_permissions( + user_object, namespace_filter, repository_name + ) + else: + permissions = model.permission.get_all_user_repository_permissions( + user_object + ) - for perm in permissions: - repo_grant = _RepositoryNeed(perm.repository.namespace_user.username, perm.repository.name, - self._repo_role_for_scopes(perm.role.name)) - logger.debug('User added permission: {0}'.format(repo_grant)) - self.provides.add(repo_grant) + for perm in permissions: + repo_grant = _RepositoryNeed( + perm.repository.namespace_user.username, + perm.repository.name, + self._repo_role_for_scopes(perm.role.name), + ) + logger.debug("User added permission: {0}".format(repo_grant)) + self.provides.add(repo_grant) - def can(self, permission): - logger.debug('Loading user permissions after deferring for: %s', self.id) - user_object = self._user_object or model.user.get_user_by_uuid(self.id) - if user_object is None: - return super(QuayDeferredPermissionUser, self).can(permission) + def can(self, permission): + logger.debug("Loading user permissions after deferring for: %s", self.id) + user_object = self._user_object or model.user.get_user_by_uuid(self.id) + if user_object is None: + return super(QuayDeferredPermissionUser, self).can(permission) - # Add the user-specific provides. - if not self._personal_loaded: - self._populate_user_provides(user_object) - self._personal_loaded = True + # Add the user-specific provides. + if not self._personal_loaded: + self._populate_user_provides(user_object) + self._personal_loaded = True - # If we now have permission, no need to load any more permissions. - if super(QuayDeferredPermissionUser, self).can(permission): - return super(QuayDeferredPermissionUser, self).can(permission) + # If we now have permission, no need to load any more permissions. + if super(QuayDeferredPermissionUser, self).can(permission): + return super(QuayDeferredPermissionUser, self).can(permission) - # Check for namespace and/or repository permissions. - perm_namespace = permission.namespace - perm_repo_name = permission.repo_name - perm_repository = None + # Check for namespace and/or repository permissions. + perm_namespace = permission.namespace + perm_repo_name = permission.repo_name + perm_repository = None - if perm_namespace and perm_repo_name: - perm_repository = '%s/%s' % (perm_namespace, perm_repo_name) + if perm_namespace and perm_repo_name: + perm_repository = "%s/%s" % (perm_namespace, perm_repo_name) - if not perm_namespace and not perm_repo_name: - # Nothing more to load, so just check directly. - return super(QuayDeferredPermissionUser, self).can(permission) + if not perm_namespace and not perm_repo_name: + # Nothing more to load, so just check directly. + return super(QuayDeferredPermissionUser, self).can(permission) - # Lazy-load the repository-specific permissions. - if perm_repository and perm_repository not in self._repositories_loaded: - self._populate_repository_provides(user_object, perm_namespace, perm_repo_name) - self._repositories_loaded.add(perm_repository) + # Lazy-load the repository-specific permissions. + if perm_repository and perm_repository not in self._repositories_loaded: + self._populate_repository_provides( + user_object, perm_namespace, perm_repo_name + ) + self._repositories_loaded.add(perm_repository) + + # If we now have permission, no need to load any more permissions. + if super(QuayDeferredPermissionUser, self).can(permission): + return super(QuayDeferredPermissionUser, self).can(permission) + + # Lazy-load the namespace-wide-only permissions. + if perm_namespace and perm_namespace not in self._namespace_wide_loaded: + self._populate_namespace_wide_provides(user_object, perm_namespace) + self._namespace_wide_loaded.add(perm_namespace) - # If we now have permission, no need to load any more permissions. - if super(QuayDeferredPermissionUser, self).can(permission): return super(QuayDeferredPermissionUser, self).can(permission) - # Lazy-load the namespace-wide-only permissions. - if perm_namespace and perm_namespace not in self._namespace_wide_loaded: - self._populate_namespace_wide_provides(user_object, perm_namespace) - self._namespace_wide_loaded.add(perm_namespace) - - return super(QuayDeferredPermissionUser, self).can(permission) - class QuayPermission(Permission): - """ Base for all permissions in Quay. """ - namespace = None - repo_name = None + """ Base for all permissions in Quay. """ + + namespace = None + repo_name = None class ModifyRepositoryPermission(QuayPermission): - def __init__(self, namespace, name): - admin_need = _RepositoryNeed(namespace, name, 'admin') - write_need = _RepositoryNeed(namespace, name, 'write') - org_admin_need = _OrganizationRepoNeed(namespace, 'admin') - org_write_need = _OrganizationRepoNeed(namespace, 'write') + def __init__(self, namespace, name): + admin_need = _RepositoryNeed(namespace, name, "admin") + write_need = _RepositoryNeed(namespace, name, "write") + org_admin_need = _OrganizationRepoNeed(namespace, "admin") + org_write_need = _OrganizationRepoNeed(namespace, "write") - self.namespace = namespace - self.repo_name = name + self.namespace = namespace + self.repo_name = name - super(ModifyRepositoryPermission, self).__init__(admin_need, write_need, org_admin_need, - org_write_need) + super(ModifyRepositoryPermission, self).__init__( + admin_need, write_need, org_admin_need, org_write_need + ) class ReadRepositoryPermission(QuayPermission): - def __init__(self, namespace, name): - admin_need = _RepositoryNeed(namespace, name, 'admin') - write_need = _RepositoryNeed(namespace, name, 'write') - read_need = _RepositoryNeed(namespace, name, 'read') - org_admin_need = _OrganizationRepoNeed(namespace, 'admin') - org_write_need = _OrganizationRepoNeed(namespace, 'write') - org_read_need = _OrganizationRepoNeed(namespace, 'read') + def __init__(self, namespace, name): + admin_need = _RepositoryNeed(namespace, name, "admin") + write_need = _RepositoryNeed(namespace, name, "write") + read_need = _RepositoryNeed(namespace, name, "read") + org_admin_need = _OrganizationRepoNeed(namespace, "admin") + org_write_need = _OrganizationRepoNeed(namespace, "write") + org_read_need = _OrganizationRepoNeed(namespace, "read") - self.namespace = namespace - self.repo_name = name + self.namespace = namespace + self.repo_name = name - super(ReadRepositoryPermission, self).__init__(admin_need, write_need, read_need, - org_admin_need, org_read_need, org_write_need) + super(ReadRepositoryPermission, self).__init__( + admin_need, + write_need, + read_need, + org_admin_need, + org_read_need, + org_write_need, + ) class AdministerRepositoryPermission(QuayPermission): - def __init__(self, namespace, name): - admin_need = _RepositoryNeed(namespace, name, 'admin') - org_admin_need = _OrganizationRepoNeed(namespace, 'admin') + def __init__(self, namespace, name): + admin_need = _RepositoryNeed(namespace, name, "admin") + org_admin_need = _OrganizationRepoNeed(namespace, "admin") - self.namespace = namespace - self.repo_name = name + self.namespace = namespace + self.repo_name = name - super(AdministerRepositoryPermission, self).__init__(admin_need, - org_admin_need) + super(AdministerRepositoryPermission, self).__init__(admin_need, org_admin_need) class CreateRepositoryPermission(QuayPermission): - def __init__(self, namespace): - admin_org = _OrganizationNeed(namespace, 'admin') - create_repo_org = _OrganizationNeed(namespace, 'creator') + def __init__(self, namespace): + admin_org = _OrganizationNeed(namespace, "admin") + create_repo_org = _OrganizationNeed(namespace, "creator") - self.namespace = namespace + self.namespace = namespace + + super(CreateRepositoryPermission, self).__init__(admin_org, create_repo_org) - super(CreateRepositoryPermission, self).__init__(admin_org, - create_repo_org) class SuperUserPermission(QuayPermission): - def __init__(self): - need = _SuperUserNeed() - super(SuperUserPermission, self).__init__(need) + def __init__(self): + need = _SuperUserNeed() + super(SuperUserPermission, self).__init__(need) class UserAdminPermission(QuayPermission): - def __init__(self, username): - user_admin = _UserNeed(username, 'admin') - super(UserAdminPermission, self).__init__(user_admin) + def __init__(self, username): + user_admin = _UserNeed(username, "admin") + super(UserAdminPermission, self).__init__(user_admin) class UserReadPermission(QuayPermission): - def __init__(self, username): - user_admin = _UserNeed(username, 'admin') - user_read = _UserNeed(username, 'read') - super(UserReadPermission, self).__init__(user_read, user_admin) + def __init__(self, username): + user_admin = _UserNeed(username, "admin") + user_read = _UserNeed(username, "read") + super(UserReadPermission, self).__init__(user_read, user_admin) class AdministerOrganizationPermission(QuayPermission): - def __init__(self, org_name): - admin_org = _OrganizationNeed(org_name, 'admin') + def __init__(self, org_name): + admin_org = _OrganizationNeed(org_name, "admin") - self.namespace = org_name + self.namespace = org_name - super(AdministerOrganizationPermission, self).__init__(admin_org) + super(AdministerOrganizationPermission, self).__init__(admin_org) class OrganizationMemberPermission(QuayPermission): - def __init__(self, org_name): - admin_org = _OrganizationNeed(org_name, 'admin') - repo_creator_org = _OrganizationNeed(org_name, 'creator') - org_member = _OrganizationNeed(org_name, 'member') + def __init__(self, org_name): + admin_org = _OrganizationNeed(org_name, "admin") + repo_creator_org = _OrganizationNeed(org_name, "creator") + org_member = _OrganizationNeed(org_name, "member") - self.namespace = org_name + self.namespace = org_name - super(OrganizationMemberPermission, self).__init__(admin_org, org_member, - repo_creator_org) + super(OrganizationMemberPermission, self).__init__( + admin_org, org_member, repo_creator_org + ) class ViewTeamPermission(QuayPermission): - def __init__(self, org_name, team_name): - team_admin = _TeamNeed(org_name, team_name, 'admin') - team_creator = _TeamNeed(org_name, team_name, 'creator') - team_member = _TeamNeed(org_name, team_name, 'member') - admin_org = _OrganizationNeed(org_name, 'admin') + def __init__(self, org_name, team_name): + team_admin = _TeamNeed(org_name, team_name, "admin") + team_creator = _TeamNeed(org_name, team_name, "creator") + team_member = _TeamNeed(org_name, team_name, "member") + admin_org = _OrganizationNeed(org_name, "admin") - self.namespace = org_name + self.namespace = org_name - super(ViewTeamPermission, self).__init__(team_admin, team_creator, - team_member, admin_org) + super(ViewTeamPermission, self).__init__( + team_admin, team_creator, team_member, admin_org + ) class AlwaysFailPermission(QuayPermission): - def can(self): - return False + def can(self): + return False @identity_loaded.connect_via(app) def on_identity_loaded(sender, identity): - logger.debug('Identity loaded: %s' % identity) - # We have verified an identity, load in all of the permissions + logger.debug("Identity loaded: %s" % identity) + # We have verified an identity, load in all of the permissions - if isinstance(identity, QuayDeferredPermissionUser): - logger.debug('Deferring permissions for user with uuid: %s', identity.id) + if isinstance(identity, QuayDeferredPermissionUser): + logger.debug("Deferring permissions for user with uuid: %s", identity.id) - elif identity.auth_type == 'user_uuid': - logger.debug('Switching username permission to deferred object with uuid: %s', identity.id) - switch_to_deferred = QuayDeferredPermissionUser.for_id(identity.id) - identity_changed.send(app, identity=switch_to_deferred) + elif identity.auth_type == "user_uuid": + logger.debug( + "Switching username permission to deferred object with uuid: %s", + identity.id, + ) + switch_to_deferred = QuayDeferredPermissionUser.for_id(identity.id) + identity_changed.send(app, identity=switch_to_deferred) - elif identity.auth_type == 'token': - logger.debug('Loading permissions for token: %s', identity.id) - token_data = model.token.load_token_data(identity.id) + elif identity.auth_type == "token": + logger.debug("Loading permissions for token: %s", identity.id) + token_data = model.token.load_token_data(identity.id) - repo_grant = _RepositoryNeed(token_data.repository.namespace_user.username, - token_data.repository.name, - token_data.role.name) - logger.debug('Delegate token added permission: %s', repo_grant) - identity.provides.add(repo_grant) + repo_grant = _RepositoryNeed( + token_data.repository.namespace_user.username, + token_data.repository.name, + token_data.role.name, + ) + logger.debug("Delegate token added permission: %s", repo_grant) + identity.provides.add(repo_grant) - elif identity.auth_type == 'signed_grant' or identity.auth_type == 'signed_jwt': - logger.debug('Loaded %s identity for: %s', identity.auth_type, identity.id) + elif identity.auth_type == "signed_grant" or identity.auth_type == "signed_jwt": + logger.debug("Loaded %s identity for: %s", identity.auth_type, identity.id) - else: - logger.error('Unknown identity auth type: %s', identity.auth_type) + else: + logger.error("Unknown identity auth type: %s", identity.auth_type) diff --git a/auth/registry_jwt_auth.py b/auth/registry_jwt_auth.py index 75be63d73..135e49a94 100644 --- a/auth/registry_jwt_auth.py +++ b/auth/registry_jwt_auth.py @@ -9,156 +9,166 @@ from flask_principal import identity_changed, Identity from app import app, get_app_url, instance_keys, metric_queue from auth.auth_context import set_authenticated_context from auth.auth_context_type import SignedAuthContext -from auth.permissions import repository_read_grant, repository_write_grant, repository_admin_grant +from auth.permissions import ( + repository_read_grant, + repository_write_grant, + repository_admin_grant, +) from util.http import abort from util.names import parse_namespace_repository -from util.security.registry_jwt import (ANONYMOUS_SUB, decode_bearer_header, - InvalidBearerTokenException) +from util.security.registry_jwt import ( + ANONYMOUS_SUB, + decode_bearer_header, + InvalidBearerTokenException, +) logger = logging.getLogger(__name__) ACCESS_SCHEMA = { - 'type': 'array', - 'description': 'List of access granted to the subject', - 'items': { - 'type': 'object', - 'required': [ - 'type', - 'name', - 'actions', - ], - 'properties': { - 'type': { - 'type': 'string', - 'description': 'We only allow repository permissions', - 'enum': [ - 'repository', - ], - }, - 'name': { - 'type': 'string', - 'description': 'The name of the repository for which we are receiving access' - }, - 'actions': { - 'type': 'array', - 'description': 'List of specific verbs which can be performed against repository', - 'items': { - 'type': 'string', - 'enum': [ - 'push', - 'pull', - '*', - ], + "type": "array", + "description": "List of access granted to the subject", + "items": { + "type": "object", + "required": ["type", "name", "actions"], + "properties": { + "type": { + "type": "string", + "description": "We only allow repository permissions", + "enum": ["repository"], + }, + "name": { + "type": "string", + "description": "The name of the repository for which we are receiving access", + }, + "actions": { + "type": "array", + "description": "List of specific verbs which can be performed against repository", + "items": {"type": "string", "enum": ["push", "pull", "*"]}, + }, }, - }, }, - }, } class InvalidJWTException(Exception): - pass + pass def get_auth_headers(repository=None, scopes=None): - """ Returns a dictionary of headers for auth responses. """ - headers = {} - realm_auth_path = url_for('v2.generate_registry_jwt') - authenticate = 'Bearer realm="{0}{1}",service="{2}"'.format(get_app_url(), - realm_auth_path, - app.config['SERVER_HOSTNAME']) - if repository: - scopes_string = "repository:{0}".format(repository) - if scopes: - scopes_string += ':' + ','.join(scopes) + """ Returns a dictionary of headers for auth responses. """ + headers = {} + realm_auth_path = url_for("v2.generate_registry_jwt") + authenticate = 'Bearer realm="{0}{1}",service="{2}"'.format( + get_app_url(), realm_auth_path, app.config["SERVER_HOSTNAME"] + ) + if repository: + scopes_string = "repository:{0}".format(repository) + if scopes: + scopes_string += ":" + ",".join(scopes) - authenticate += ',scope="{0}"'.format(scopes_string) + authenticate += ',scope="{0}"'.format(scopes_string) - headers['WWW-Authenticate'] = authenticate - headers['Docker-Distribution-API-Version'] = 'registry/2.0' - return headers + headers["WWW-Authenticate"] = authenticate + headers["Docker-Distribution-API-Version"] = "registry/2.0" + return headers def identity_from_bearer_token(bearer_header): - """ Process a bearer header and return the loaded identity, or raise InvalidJWTException if an + """ Process a bearer header and return the loaded identity, or raise InvalidJWTException if an identity could not be loaded. Expects tokens and grants in the format of the Docker registry v2 auth spec: https://docs.docker.com/registry/spec/auth/token/ """ - logger.debug('Validating auth header: %s', bearer_header) + logger.debug("Validating auth header: %s", bearer_header) - try: - payload = decode_bearer_header(bearer_header, instance_keys, app.config, - metric_queue=metric_queue) - except InvalidBearerTokenException as bte: - logger.exception('Invalid bearer token: %s', bte) - raise InvalidJWTException(bte) - - loaded_identity = Identity(payload['sub'], 'signed_jwt') - - # Process the grants from the payload - if 'access' in payload: try: - validate(payload['access'], ACCESS_SCHEMA) - except ValidationError: - logger.exception('We should not be minting invalid credentials') - raise InvalidJWTException('Token contained invalid or malformed access grants') + payload = decode_bearer_header( + bearer_header, instance_keys, app.config, metric_queue=metric_queue + ) + except InvalidBearerTokenException as bte: + logger.exception("Invalid bearer token: %s", bte) + raise InvalidJWTException(bte) - lib_namespace = app.config['LIBRARY_NAMESPACE'] - for grant in payload['access']: - namespace, repo_name = parse_namespace_repository(grant['name'], lib_namespace) + loaded_identity = Identity(payload["sub"], "signed_jwt") - if '*' in grant['actions']: - loaded_identity.provides.add(repository_admin_grant(namespace, repo_name)) - elif 'push' in grant['actions']: - loaded_identity.provides.add(repository_write_grant(namespace, repo_name)) - elif 'pull' in grant['actions']: - loaded_identity.provides.add(repository_read_grant(namespace, repo_name)) + # Process the grants from the payload + if "access" in payload: + try: + validate(payload["access"], ACCESS_SCHEMA) + except ValidationError: + logger.exception("We should not be minting invalid credentials") + raise InvalidJWTException( + "Token contained invalid or malformed access grants" + ) - default_context = { - 'kind': 'anonymous' - } + lib_namespace = app.config["LIBRARY_NAMESPACE"] + for grant in payload["access"]: + namespace, repo_name = parse_namespace_repository( + grant["name"], lib_namespace + ) - if payload['sub'] != ANONYMOUS_SUB: - default_context = { - 'kind': 'user', - 'user': payload['sub'], - } + if "*" in grant["actions"]: + loaded_identity.provides.add( + repository_admin_grant(namespace, repo_name) + ) + elif "push" in grant["actions"]: + loaded_identity.provides.add( + repository_write_grant(namespace, repo_name) + ) + elif "pull" in grant["actions"]: + loaded_identity.provides.add( + repository_read_grant(namespace, repo_name) + ) - return loaded_identity, payload.get('context', default_context) + default_context = {"kind": "anonymous"} + + if payload["sub"] != ANONYMOUS_SUB: + default_context = {"kind": "user", "user": payload["sub"]} + + return loaded_identity, payload.get("context", default_context) def process_registry_jwt_auth(scopes=None): - """ Processes the registry JWT auth token found in the authorization header. If none found, + """ Processes the registry JWT auth token found in the authorization header. If none found, no error is returned. If an invalid token is found, raises a 401. """ - def inner(func): - @wraps(func) - def wrapper(*args, **kwargs): - logger.debug('Called with params: %s, %s', args, kwargs) - auth = request.headers.get('authorization', '').strip() - if auth: - try: - extracted_identity, context_dict = identity_from_bearer_token(auth) - identity_changed.send(app, identity=extracted_identity) - logger.debug('Identity changed to %s', extracted_identity.id) - auth_context = SignedAuthContext.build_from_signed_dict(context_dict) - if auth_context is not None: - logger.debug('Auth context set to %s', auth_context.signed_data) - set_authenticated_context(auth_context) + def inner(func): + @wraps(func) + def wrapper(*args, **kwargs): + logger.debug("Called with params: %s, %s", args, kwargs) + auth = request.headers.get("authorization", "").strip() + if auth: + try: + extracted_identity, context_dict = identity_from_bearer_token(auth) + identity_changed.send(app, identity=extracted_identity) + logger.debug("Identity changed to %s", extracted_identity.id) - except InvalidJWTException as ije: - repository = None - if 'namespace_name' in kwargs and 'repo_name' in kwargs: - repository = kwargs['namespace_name'] + '/' + kwargs['repo_name'] + auth_context = SignedAuthContext.build_from_signed_dict( + context_dict + ) + if auth_context is not None: + logger.debug("Auth context set to %s", auth_context.signed_data) + set_authenticated_context(auth_context) - abort(401, message=ije.message, headers=get_auth_headers(repository=repository, - scopes=scopes)) - else: - logger.debug('No auth header.') + except InvalidJWTException as ije: + repository = None + if "namespace_name" in kwargs and "repo_name" in kwargs: + repository = ( + kwargs["namespace_name"] + "/" + kwargs["repo_name"] + ) - return func(*args, **kwargs) - return wrapper - return inner + abort( + 401, + message=ije.message, + headers=get_auth_headers(repository=repository, scopes=scopes), + ) + else: + logger.debug("No auth header.") + + return func(*args, **kwargs) + + return wrapper + + return inner diff --git a/auth/scopes.py b/auth/scopes.py index dbbb0ae1c..c16a3dbf3 100644 --- a/auth/scopes.py +++ b/auth/scopes.py @@ -2,145 +2,194 @@ from collections import namedtuple import features import re -Scope = namedtuple('scope', ['scope', 'icon', 'dangerous', 'title', 'description']) +Scope = namedtuple("scope", ["scope", "icon", "dangerous", "title", "description"]) -READ_REPO = Scope(scope='repo:read', - icon='fa-hdd-o', - dangerous=False, - title='View all visible repositories', - description=('This application will be able to view and pull all repositories ' - 'visible to the granting user or robot account')) +READ_REPO = Scope( + scope="repo:read", + icon="fa-hdd-o", + dangerous=False, + title="View all visible repositories", + description=( + "This application will be able to view and pull all repositories " + "visible to the granting user or robot account" + ), +) -WRITE_REPO = Scope(scope='repo:write', - icon='fa-hdd-o', - dangerous=False, - title='Read/Write to any accessible repositories', - description=('This application will be able to view, push and pull to all ' - 'repositories to which the granting user or robot account has ' - 'write access')) +WRITE_REPO = Scope( + scope="repo:write", + icon="fa-hdd-o", + dangerous=False, + title="Read/Write to any accessible repositories", + description=( + "This application will be able to view, push and pull to all " + "repositories to which the granting user or robot account has " + "write access" + ), +) -ADMIN_REPO = Scope(scope='repo:admin', - icon='fa-hdd-o', - dangerous=False, - title='Administer Repositories', - description=('This application will have administrator access to all ' - 'repositories to which the granting user or robot account has ' - 'access')) +ADMIN_REPO = Scope( + scope="repo:admin", + icon="fa-hdd-o", + dangerous=False, + title="Administer Repositories", + description=( + "This application will have administrator access to all " + "repositories to which the granting user or robot account has " + "access" + ), +) -CREATE_REPO = Scope(scope='repo:create', - icon='fa-plus', - dangerous=False, - title='Create Repositories', - description=('This application will be able to create repositories in to any ' - 'namespaces that the granting user or robot account is allowed ' - 'to create repositories')) +CREATE_REPO = Scope( + scope="repo:create", + icon="fa-plus", + dangerous=False, + title="Create Repositories", + description=( + "This application will be able to create repositories in to any " + "namespaces that the granting user or robot account is allowed " + "to create repositories" + ), +) -READ_USER = Scope(scope= 'user:read', - icon='fa-user', - dangerous=False, - title='Read User Information', - description=('This application will be able to read user information such as ' - 'username and email address.')) +READ_USER = Scope( + scope="user:read", + icon="fa-user", + dangerous=False, + title="Read User Information", + description=( + "This application will be able to read user information such as " + "username and email address." + ), +) -ADMIN_USER = Scope(scope= 'user:admin', - icon='fa-gear', - dangerous=True, - title='Administer User', - description=('This application will be able to administer your account ' - 'including creating robots and granting them permissions ' - 'to your repositories. You should have absolute trust in the ' - 'requesting application before granting this permission.')) +ADMIN_USER = Scope( + scope="user:admin", + icon="fa-gear", + dangerous=True, + title="Administer User", + description=( + "This application will be able to administer your account " + "including creating robots and granting them permissions " + "to your repositories. You should have absolute trust in the " + "requesting application before granting this permission." + ), +) -ORG_ADMIN = Scope(scope='org:admin', - icon='fa-gear', - dangerous=True, - title='Administer Organization', - description=('This application will be able to administer your organizations ' - 'including creating robots, creating teams, adjusting team ' - 'membership, and changing billing settings. You should have ' - 'absolute trust in the requesting application before granting this ' - 'permission.')) +ORG_ADMIN = Scope( + scope="org:admin", + icon="fa-gear", + dangerous=True, + title="Administer Organization", + description=( + "This application will be able to administer your organizations " + "including creating robots, creating teams, adjusting team " + "membership, and changing billing settings. You should have " + "absolute trust in the requesting application before granting this " + "permission." + ), +) -DIRECT_LOGIN = Scope(scope='direct_user_login', - icon='fa-exclamation-triangle', - dangerous=True, - title='Full Access', - description=('This scope should not be available to OAuth applications. ' - 'Never approve a request for this scope!')) +DIRECT_LOGIN = Scope( + scope="direct_user_login", + icon="fa-exclamation-triangle", + dangerous=True, + title="Full Access", + description=( + "This scope should not be available to OAuth applications. " + "Never approve a request for this scope!" + ), +) -SUPERUSER = Scope(scope='super:user', - icon='fa-street-view', - dangerous=True, - title='Super User Access', - description=('This application will be able to administer your installation ' - 'including managing users, managing organizations and other ' - 'features found in the superuser panel. You should have ' - 'absolute trust in the requesting application before granting this ' - 'permission.')) +SUPERUSER = Scope( + scope="super:user", + icon="fa-street-view", + dangerous=True, + title="Super User Access", + description=( + "This application will be able to administer your installation " + "including managing users, managing organizations and other " + "features found in the superuser panel. You should have " + "absolute trust in the requesting application before granting this " + "permission." + ), +) -ALL_SCOPES = {scope.scope: scope for scope in (READ_REPO, WRITE_REPO, ADMIN_REPO, CREATE_REPO, - READ_USER, ORG_ADMIN, SUPERUSER, ADMIN_USER)} +ALL_SCOPES = { + scope.scope: scope + for scope in ( + READ_REPO, + WRITE_REPO, + ADMIN_REPO, + CREATE_REPO, + READ_USER, + ORG_ADMIN, + SUPERUSER, + ADMIN_USER, + ) +} IMPLIED_SCOPES = { - ADMIN_REPO: {ADMIN_REPO, WRITE_REPO, READ_REPO}, - WRITE_REPO: {WRITE_REPO, READ_REPO}, - READ_REPO: {READ_REPO}, - CREATE_REPO: {CREATE_REPO}, - READ_USER: {READ_USER}, - ORG_ADMIN: {ORG_ADMIN}, - SUPERUSER: {SUPERUSER}, - ADMIN_USER: {ADMIN_USER}, - None: set(), + ADMIN_REPO: {ADMIN_REPO, WRITE_REPO, READ_REPO}, + WRITE_REPO: {WRITE_REPO, READ_REPO}, + READ_REPO: {READ_REPO}, + CREATE_REPO: {CREATE_REPO}, + READ_USER: {READ_USER}, + ORG_ADMIN: {ORG_ADMIN}, + SUPERUSER: {SUPERUSER}, + ADMIN_USER: {ADMIN_USER}, + None: set(), } def app_scopes(app_config): - scopes_from_config = dict(ALL_SCOPES) - if not app_config.get('FEATURE_SUPER_USERS', False): - del scopes_from_config[SUPERUSER.scope] - return scopes_from_config + scopes_from_config = dict(ALL_SCOPES) + if not app_config.get("FEATURE_SUPER_USERS", False): + del scopes_from_config[SUPERUSER.scope] + return scopes_from_config def scopes_from_scope_string(scopes): - if not scopes: - scopes = '' + if not scopes: + scopes = "" - # Note: The scopes string should be space seperated according to the spec: - # https://tools.ietf.org/html/rfc6749#section-3.3 - # However, we also support commas for backwards compatibility with existing callers to our code. - scope_set = {ALL_SCOPES.get(scope, None) for scope in re.split(' |,', scopes)} - return scope_set if not None in scope_set else set() + # Note: The scopes string should be space seperated according to the spec: + # https://tools.ietf.org/html/rfc6749#section-3.3 + # However, we also support commas for backwards compatibility with existing callers to our code. + scope_set = {ALL_SCOPES.get(scope, None) for scope in re.split(" |,", scopes)} + return scope_set if not None in scope_set else set() def validate_scope_string(scopes): - decoded = scopes_from_scope_string(scopes) - return len(decoded) > 0 + decoded = scopes_from_scope_string(scopes) + return len(decoded) > 0 def is_subset_string(full_string, expected_string): - """ Returns true if the scopes found in expected_string are also found + """ Returns true if the scopes found in expected_string are also found in full_string. """ - full_scopes = scopes_from_scope_string(full_string) - if not full_scopes: - return False + full_scopes = scopes_from_scope_string(full_string) + if not full_scopes: + return False - full_implied_scopes = set.union(*[IMPLIED_SCOPES[scope] for scope in full_scopes]) - expected_scopes = scopes_from_scope_string(expected_string) - return expected_scopes.issubset(full_implied_scopes) + full_implied_scopes = set.union(*[IMPLIED_SCOPES[scope] for scope in full_scopes]) + expected_scopes = scopes_from_scope_string(expected_string) + return expected_scopes.issubset(full_implied_scopes) def get_scope_information(scopes_string): - scopes = scopes_from_scope_string(scopes_string) - scope_info = [] - for scope in scopes: - scope_info.append({ - 'title': scope.title, - 'scope': scope.scope, - 'description': scope.description, - 'icon': scope.icon, - 'dangerous': scope.dangerous, - }) + scopes = scopes_from_scope_string(scopes_string) + scope_info = [] + for scope in scopes: + scope_info.append( + { + "title": scope.title, + "scope": scope.scope, + "description": scope.description, + "icon": scope.icon, + "dangerous": scope.dangerous, + } + ) - return scope_info + return scope_info diff --git a/auth/signedgrant.py b/auth/signedgrant.py index b8169114d..4063115a0 100644 --- a/auth/signedgrant.py +++ b/auth/signedgrant.py @@ -8,48 +8,49 @@ from auth.validateresult import AuthKind, ValidateResult logger = logging.getLogger(__name__) # The prefix for all signatures of signed granted. -SIGNATURE_PREFIX = 'sigv2=' +SIGNATURE_PREFIX = "sigv2=" + def generate_signed_token(grants, user_context): - """ Generates a signed session token with the given grants and user context. """ - ser = SecureCookieSessionInterface().get_signing_serializer(app) - data_to_sign = { - 'grants': grants, - 'user_context': user_context, - } + """ Generates a signed session token with the given grants and user context. """ + ser = SecureCookieSessionInterface().get_signing_serializer(app) + data_to_sign = {"grants": grants, "user_context": user_context} - encrypted = ser.dumps(data_to_sign) - return '{0}{1}'.format(SIGNATURE_PREFIX, encrypted) + encrypted = ser.dumps(data_to_sign) + return "{0}{1}".format(SIGNATURE_PREFIX, encrypted) def validate_signed_grant(auth_header): - """ Validates a signed grant as found inside an auth header and returns whether it points to + """ Validates a signed grant as found inside an auth header and returns whether it points to a valid grant. """ - if not auth_header: - return ValidateResult(AuthKind.signed_grant, missing=True) + if not auth_header: + return ValidateResult(AuthKind.signed_grant, missing=True) - # Try to parse the token from the header. - normalized = [part.strip() for part in auth_header.split(' ') if part] - if normalized[0].lower() != 'token' or len(normalized) != 2: - logger.debug('Not a token: %s', auth_header) - return ValidateResult(AuthKind.signed_grant, missing=True) + # Try to parse the token from the header. + normalized = [part.strip() for part in auth_header.split(" ") if part] + if normalized[0].lower() != "token" or len(normalized) != 2: + logger.debug("Not a token: %s", auth_header) + return ValidateResult(AuthKind.signed_grant, missing=True) - # Check that it starts with the expected prefix. - if not normalized[1].startswith(SIGNATURE_PREFIX): - logger.debug('Not a signed grant token: %s', auth_header) - return ValidateResult(AuthKind.signed_grant, missing=True) + # Check that it starts with the expected prefix. + if not normalized[1].startswith(SIGNATURE_PREFIX): + logger.debug("Not a signed grant token: %s", auth_header) + return ValidateResult(AuthKind.signed_grant, missing=True) - # Decrypt the grant. - encrypted = normalized[1][len(SIGNATURE_PREFIX):] - ser = SecureCookieSessionInterface().get_signing_serializer(app) + # Decrypt the grant. + encrypted = normalized[1][len(SIGNATURE_PREFIX) :] + ser = SecureCookieSessionInterface().get_signing_serializer(app) - try: - token_data = ser.loads(encrypted, max_age=app.config['SIGNED_GRANT_EXPIRATION_SEC']) - except BadSignature: - logger.warning('Signed grant could not be validated: %s', encrypted) - return ValidateResult(AuthKind.signed_grant, - error_message='Signed grant could not be validated') + try: + token_data = ser.loads( + encrypted, max_age=app.config["SIGNED_GRANT_EXPIRATION_SEC"] + ) + except BadSignature: + logger.warning("Signed grant could not be validated: %s", encrypted) + return ValidateResult( + AuthKind.signed_grant, error_message="Signed grant could not be validated" + ) - logger.debug('Successfully validated signed grant with data: %s', token_data) - return ValidateResult(AuthKind.signed_grant, signed_data=token_data) + logger.debug("Successfully validated signed grant with data: %s", token_data) + return ValidateResult(AuthKind.signed_grant, signed_data=token_data) diff --git a/auth/test/test_auth_context_type.py b/auth/test/test_auth_context_type.py index 7778d7f90..0b4e8227a 100644 --- a/auth/test/test_auth_context_type.py +++ b/auth/test/test_auth_context_type.py @@ -1,51 +1,65 @@ import pytest -from auth.auth_context_type import SignedAuthContext, ValidatedAuthContext, ContextEntityKind +from auth.auth_context_type import ( + SignedAuthContext, + ValidatedAuthContext, + ContextEntityKind, +) from data import model, database from test.fixtures import * + def get_oauth_token(_): - return database.OAuthAccessToken.get() + return database.OAuthAccessToken.get() -@pytest.mark.parametrize('kind, entity_reference, loader', [ - (ContextEntityKind.anonymous, None, None), - (ContextEntityKind.appspecifictoken, '%s%s' % ('a' * 60, 'b' * 60), - model.appspecifictoken.access_valid_token), - (ContextEntityKind.oauthtoken, None, get_oauth_token), - (ContextEntityKind.robot, 'devtable+dtrobot', model.user.lookup_robot), - (ContextEntityKind.user, 'devtable', model.user.get_user), -]) -@pytest.mark.parametrize('v1_dict_format', [ - (True), - (False), -]) -def test_signed_auth_context(kind, entity_reference, loader, v1_dict_format, initialized_db): - if kind == ContextEntityKind.anonymous: - validated = ValidatedAuthContext() - assert validated.is_anonymous - else: - ref = loader(entity_reference) - validated = ValidatedAuthContext(**{kind.value: ref}) - assert not validated.is_anonymous +@pytest.mark.parametrize( + "kind, entity_reference, loader", + [ + (ContextEntityKind.anonymous, None, None), + ( + ContextEntityKind.appspecifictoken, + "%s%s" % ("a" * 60, "b" * 60), + model.appspecifictoken.access_valid_token, + ), + (ContextEntityKind.oauthtoken, None, get_oauth_token), + (ContextEntityKind.robot, "devtable+dtrobot", model.user.lookup_robot), + (ContextEntityKind.user, "devtable", model.user.get_user), + ], +) +@pytest.mark.parametrize("v1_dict_format", [(True), (False)]) +def test_signed_auth_context( + kind, entity_reference, loader, v1_dict_format, initialized_db +): + if kind == ContextEntityKind.anonymous: + validated = ValidatedAuthContext() + assert validated.is_anonymous + else: + ref = loader(entity_reference) + validated = ValidatedAuthContext(**{kind.value: ref}) + assert not validated.is_anonymous - assert validated.entity_kind == kind - assert validated.unique_key + assert validated.entity_kind == kind + assert validated.unique_key - signed = SignedAuthContext.build_from_signed_dict(validated.to_signed_dict(), - v1_dict_format=v1_dict_format) + signed = SignedAuthContext.build_from_signed_dict( + validated.to_signed_dict(), v1_dict_format=v1_dict_format + ) - if not v1_dict_format: - # Under legacy V1 format, we don't track the app specific token, merely its associated user. - assert signed.entity_kind == kind - assert signed.description == validated.description - assert signed.credential_username == validated.credential_username - assert signed.analytics_id_and_public_metadata() == validated.analytics_id_and_public_metadata() - assert signed.unique_key == validated.unique_key + if not v1_dict_format: + # Under legacy V1 format, we don't track the app specific token, merely its associated user. + assert signed.entity_kind == kind + assert signed.description == validated.description + assert signed.credential_username == validated.credential_username + assert ( + signed.analytics_id_and_public_metadata() + == validated.analytics_id_and_public_metadata() + ) + assert signed.unique_key == validated.unique_key - assert signed.is_anonymous == validated.is_anonymous - assert signed.authed_user == validated.authed_user - assert signed.has_nonrobot_user == validated.has_nonrobot_user + assert signed.is_anonymous == validated.is_anonymous + assert signed.authed_user == validated.authed_user + assert signed.has_nonrobot_user == validated.has_nonrobot_user - assert signed.to_signed_dict() == validated.to_signed_dict() + assert signed.to_signed_dict() == validated.to_signed_dict() diff --git a/auth/test/test_basic.py b/auth/test/test_basic.py index 24279b4b2..c7ecdc09c 100644 --- a/auth/test/test_basic.py +++ b/auth/test/test_basic.py @@ -5,8 +5,11 @@ import pytest from base64 import b64encode from auth.basic import validate_basic_auth -from auth.credentials import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME, - APP_SPECIFIC_TOKEN_USERNAME) +from auth.credentials import ( + ACCESS_TOKEN_USERNAME, + OAUTH_TOKEN_USERNAME, + APP_SPECIFIC_TOKEN_USERNAME, +) from auth.validateresult import AuthKind, ValidateResult from data import model @@ -14,85 +17,120 @@ from test.fixtures import * def _token(username, password): - assert isinstance(username, basestring) - assert isinstance(password, basestring) - return 'basic ' + b64encode('%s:%s' % (username, password)) + assert isinstance(username, basestring) + assert isinstance(password, basestring) + return "basic " + b64encode("%s:%s" % (username, password)) -@pytest.mark.parametrize('token, expected_result', [ - ('', ValidateResult(AuthKind.basic, missing=True)), - ('someinvalidtoken', ValidateResult(AuthKind.basic, missing=True)), - ('somefoobartoken', ValidateResult(AuthKind.basic, missing=True)), - ('basic ', ValidateResult(AuthKind.basic, missing=True)), - ('basic some token', ValidateResult(AuthKind.basic, missing=True)), - ('basic sometoken', ValidateResult(AuthKind.basic, missing=True)), - (_token(APP_SPECIFIC_TOKEN_USERNAME, 'invalid'), ValidateResult(AuthKind.basic, - error_message='Invalid token')), - (_token(ACCESS_TOKEN_USERNAME, 'invalid'), ValidateResult(AuthKind.basic, - error_message='Invalid access token')), - (_token(OAUTH_TOKEN_USERNAME, 'invalid'), - ValidateResult(AuthKind.basic, error_message='OAuth access token could not be validated')), - (_token('devtable', 'invalid'), ValidateResult(AuthKind.basic, - error_message='Invalid Username or Password')), - (_token('devtable+somebot', 'invalid'), ValidateResult( - AuthKind.basic, error_message='Could not find robot with username: devtable+somebot')), - (_token('disabled', 'password'), ValidateResult( - AuthKind.basic, - error_message='This user has been disabled. Please contact your administrator.')),]) +@pytest.mark.parametrize( + "token, expected_result", + [ + ("", ValidateResult(AuthKind.basic, missing=True)), + ("someinvalidtoken", ValidateResult(AuthKind.basic, missing=True)), + ("somefoobartoken", ValidateResult(AuthKind.basic, missing=True)), + ("basic ", ValidateResult(AuthKind.basic, missing=True)), + ("basic some token", ValidateResult(AuthKind.basic, missing=True)), + ("basic sometoken", ValidateResult(AuthKind.basic, missing=True)), + ( + _token(APP_SPECIFIC_TOKEN_USERNAME, "invalid"), + ValidateResult(AuthKind.basic, error_message="Invalid token"), + ), + ( + _token(ACCESS_TOKEN_USERNAME, "invalid"), + ValidateResult(AuthKind.basic, error_message="Invalid access token"), + ), + ( + _token(OAUTH_TOKEN_USERNAME, "invalid"), + ValidateResult( + AuthKind.basic, + error_message="OAuth access token could not be validated", + ), + ), + ( + _token("devtable", "invalid"), + ValidateResult( + AuthKind.basic, error_message="Invalid Username or Password" + ), + ), + ( + _token("devtable+somebot", "invalid"), + ValidateResult( + AuthKind.basic, + error_message="Could not find robot with username: devtable+somebot", + ), + ), + ( + _token("disabled", "password"), + ValidateResult( + AuthKind.basic, + error_message="This user has been disabled. Please contact your administrator.", + ), + ), + ], +) def test_validate_basic_auth_token(token, expected_result, app): - result = validate_basic_auth(token) - assert result == expected_result + result = validate_basic_auth(token) + assert result == expected_result def test_valid_user(app): - token = _token('devtable', 'password') - result = validate_basic_auth(token) - assert result == ValidateResult(AuthKind.basic, user=model.user.get_user('devtable')) + token = _token("devtable", "password") + result = validate_basic_auth(token) + assert result == ValidateResult( + AuthKind.basic, user=model.user.get_user("devtable") + ) def test_valid_robot(app): - robot, password = model.user.create_robot('somerobot', model.user.get_user('devtable')) - token = _token(robot.username, password) - result = validate_basic_auth(token) - assert result == ValidateResult(AuthKind.basic, robot=robot) + robot, password = model.user.create_robot( + "somerobot", model.user.get_user("devtable") + ) + token = _token(robot.username, password) + result = validate_basic_auth(token) + assert result == ValidateResult(AuthKind.basic, robot=robot) def test_valid_token(app): - access_token = model.token.create_delegate_token('devtable', 'simple', 'sometoken') - token = _token(ACCESS_TOKEN_USERNAME, access_token.get_code()) - result = validate_basic_auth(token) - assert result == ValidateResult(AuthKind.basic, token=access_token) + access_token = model.token.create_delegate_token("devtable", "simple", "sometoken") + token = _token(ACCESS_TOKEN_USERNAME, access_token.get_code()) + result = validate_basic_auth(token) + assert result == ValidateResult(AuthKind.basic, token=access_token) def test_valid_oauth(app): - user = model.user.get_user('devtable') - app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0] - oauth_token, code = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read') - token = _token(OAUTH_TOKEN_USERNAME, code) - result = validate_basic_auth(token) - assert result == ValidateResult(AuthKind.basic, oauthtoken=oauth_token) + user = model.user.get_user("devtable") + app = model.oauth.list_applications_for_org( + model.user.get_user_or_org("buynlarge") + )[0] + oauth_token, code = model.oauth.create_access_token_for_testing( + user, app.client_id, "repo:read" + ) + token = _token(OAUTH_TOKEN_USERNAME, code) + result = validate_basic_auth(token) + assert result == ValidateResult(AuthKind.basic, oauthtoken=oauth_token) def test_valid_app_specific_token(app): - user = model.user.get_user('devtable') - app_specific_token = model.appspecifictoken.create_token(user, 'some token') - full_token = model.appspecifictoken.get_full_token_string(app_specific_token) - token = _token(APP_SPECIFIC_TOKEN_USERNAME, full_token) - result = validate_basic_auth(token) - assert result == ValidateResult(AuthKind.basic, appspecifictoken=app_specific_token) + user = model.user.get_user("devtable") + app_specific_token = model.appspecifictoken.create_token(user, "some token") + full_token = model.appspecifictoken.get_full_token_string(app_specific_token) + token = _token(APP_SPECIFIC_TOKEN_USERNAME, full_token) + result = validate_basic_auth(token) + assert result == ValidateResult(AuthKind.basic, appspecifictoken=app_specific_token) def test_invalid_unicode(app): - token = '\xebOH' - header = 'basic ' + b64encode(token) - result = validate_basic_auth(header) - assert result == ValidateResult(AuthKind.basic, missing=True) + token = "\xebOH" + header = "basic " + b64encode(token) + result = validate_basic_auth(header) + assert result == ValidateResult(AuthKind.basic, missing=True) def test_invalid_unicode_2(app): - token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”' - header = 'basic ' + b64encode('devtable+somerobot:%s' % token) - result = validate_basic_auth(header) - assert result == ValidateResult( - AuthKind.basic, - error_message='Could not find robot with username: devtable+somerobot and supplied password.') + token = "“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”" + header = "basic " + b64encode("devtable+somerobot:%s" % token) + result = validate_basic_auth(header) + assert result == ValidateResult( + AuthKind.basic, + error_message="Could not find robot with username: devtable+somerobot and supplied password.", + ) diff --git a/auth/test/test_cookie.py b/auth/test/test_cookie.py index 8c212d709..b9e69b571 100644 --- a/auth/test/test_cookie.py +++ b/auth/test/test_cookie.py @@ -9,58 +9,58 @@ from test.fixtures import * def test_anonymous_cookie(app): - assert validate_session_cookie().missing + assert validate_session_cookie().missing def test_invalidformatted_cookie(app): - # "Login" with a non-UUID reference. - someuser = model.user.get_user('devtable') - login_user(LoginWrappedDBUser('somenonuuid', someuser)) + # "Login" with a non-UUID reference. + someuser = model.user.get_user("devtable") + login_user(LoginWrappedDBUser("somenonuuid", someuser)) - # Ensure we get an invalid session cookie format error. - result = validate_session_cookie() - assert result.authed_user is None - assert result.context.identity is None - assert not result.has_nonrobot_user - assert result.error_message == 'Invalid session cookie format' + # Ensure we get an invalid session cookie format error. + result = validate_session_cookie() + assert result.authed_user is None + assert result.context.identity is None + assert not result.has_nonrobot_user + assert result.error_message == "Invalid session cookie format" def test_disabled_user(app): - # "Login" with a disabled user. - someuser = model.user.get_user('disabled') - login_user(LoginWrappedDBUser(someuser.uuid, someuser)) + # "Login" with a disabled user. + someuser = model.user.get_user("disabled") + login_user(LoginWrappedDBUser(someuser.uuid, someuser)) - # Ensure we get an invalid session cookie format error. - result = validate_session_cookie() - assert result.authed_user is None - assert result.context.identity is None - assert not result.has_nonrobot_user - assert result.error_message == 'User account is disabled' + # Ensure we get an invalid session cookie format error. + result = validate_session_cookie() + assert result.authed_user is None + assert result.context.identity is None + assert not result.has_nonrobot_user + assert result.error_message == "User account is disabled" def test_valid_user(app): - # Login with a valid user. - someuser = model.user.get_user('devtable') - login_user(LoginWrappedDBUser(someuser.uuid, someuser)) + # Login with a valid user. + someuser = model.user.get_user("devtable") + login_user(LoginWrappedDBUser(someuser.uuid, someuser)) - result = validate_session_cookie() - assert result.authed_user == someuser - assert result.context.identity is not None - assert result.has_nonrobot_user - assert result.error_message is None + result = validate_session_cookie() + assert result.authed_user == someuser + assert result.context.identity is not None + assert result.has_nonrobot_user + assert result.error_message is None def test_valid_organization(app): - # "Login" with a valid organization. - someorg = model.user.get_namespace_user('buynlarge') - someorg.uuid = str(uuid.uuid4()) - someorg.verified = True - someorg.save() + # "Login" with a valid organization. + someorg = model.user.get_namespace_user("buynlarge") + someorg.uuid = str(uuid.uuid4()) + someorg.verified = True + someorg.save() - login_user(LoginWrappedDBUser(someorg.uuid, someorg)) + login_user(LoginWrappedDBUser(someorg.uuid, someorg)) - result = validate_session_cookie() - assert result.authed_user is None - assert result.context.identity is None - assert not result.has_nonrobot_user - assert result.error_message == 'Cannot login to organization' + result = validate_session_cookie() + assert result.authed_user is None + assert result.context.identity is None + assert not result.has_nonrobot_user + assert result.error_message == "Cannot login to organization" diff --git a/auth/test/test_credentials.py b/auth/test/test_credentials.py index 4e55c470c..08e5a39c1 100644 --- a/auth/test/test_credentials.py +++ b/auth/test/test_credentials.py @@ -1,147 +1,184 @@ # -*- coding: utf-8 -*- from auth.credentials import validate_credentials, CredentialKind -from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME, - APP_SPECIFIC_TOKEN_USERNAME) +from auth.credential_consts import ( + ACCESS_TOKEN_USERNAME, + OAUTH_TOKEN_USERNAME, + APP_SPECIFIC_TOKEN_USERNAME, +) from auth.validateresult import AuthKind, ValidateResult from data import model from test.fixtures import * + def test_valid_user(app): - result, kind = validate_credentials('devtable', 'password') - assert kind == CredentialKind.user - assert result == ValidateResult(AuthKind.credentials, user=model.user.get_user('devtable')) + result, kind = validate_credentials("devtable", "password") + assert kind == CredentialKind.user + assert result == ValidateResult( + AuthKind.credentials, user=model.user.get_user("devtable") + ) + def test_valid_robot(app): - robot, password = model.user.create_robot('somerobot', model.user.get_user('devtable')) - result, kind = validate_credentials(robot.username, password) - assert kind == CredentialKind.robot - assert result == ValidateResult(AuthKind.credentials, robot=robot) + robot, password = model.user.create_robot( + "somerobot", model.user.get_user("devtable") + ) + result, kind = validate_credentials(robot.username, password) + assert kind == CredentialKind.robot + assert result == ValidateResult(AuthKind.credentials, robot=robot) + def test_valid_robot_for_disabled_user(app): - user = model.user.get_user('devtable') - user.enabled = False - user.save() + user = model.user.get_user("devtable") + user.enabled = False + user.save() - robot, password = model.user.create_robot('somerobot', user) - result, kind = validate_credentials(robot.username, password) - assert kind == CredentialKind.robot + robot, password = model.user.create_robot("somerobot", user) + result, kind = validate_credentials(robot.username, password) + assert kind == CredentialKind.robot + + err = "This user has been disabled. Please contact your administrator." + assert result == ValidateResult(AuthKind.credentials, error_message=err) - err = 'This user has been disabled. Please contact your administrator.' - assert result == ValidateResult(AuthKind.credentials, error_message=err) def test_valid_token(app): - access_token = model.token.create_delegate_token('devtable', 'simple', 'sometoken') - result, kind = validate_credentials(ACCESS_TOKEN_USERNAME, access_token.get_code()) - assert kind == CredentialKind.token - assert result == ValidateResult(AuthKind.credentials, token=access_token) + access_token = model.token.create_delegate_token("devtable", "simple", "sometoken") + result, kind = validate_credentials(ACCESS_TOKEN_USERNAME, access_token.get_code()) + assert kind == CredentialKind.token + assert result == ValidateResult(AuthKind.credentials, token=access_token) + def test_valid_oauth(app): - user = model.user.get_user('devtable') - app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0] - oauth_token, code = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read') - result, kind = validate_credentials(OAUTH_TOKEN_USERNAME, code) - assert kind == CredentialKind.oauth_token - assert result == ValidateResult(AuthKind.oauth, oauthtoken=oauth_token) + user = model.user.get_user("devtable") + app = model.oauth.list_applications_for_org( + model.user.get_user_or_org("buynlarge") + )[0] + oauth_token, code = model.oauth.create_access_token_for_testing( + user, app.client_id, "repo:read" + ) + result, kind = validate_credentials(OAUTH_TOKEN_USERNAME, code) + assert kind == CredentialKind.oauth_token + assert result == ValidateResult(AuthKind.oauth, oauthtoken=oauth_token) + def test_invalid_user(app): - result, kind = validate_credentials('devtable', 'somepassword') - assert kind == CredentialKind.user - assert result == ValidateResult(AuthKind.credentials, - error_message='Invalid Username or Password') + result, kind = validate_credentials("devtable", "somepassword") + assert kind == CredentialKind.user + assert result == ValidateResult( + AuthKind.credentials, error_message="Invalid Username or Password" + ) + def test_valid_app_specific_token(app): - user = model.user.get_user('devtable') - app_specific_token = model.appspecifictoken.create_token(user, 'some token') - full_token = model.appspecifictoken.get_full_token_string(app_specific_token) - result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token) - assert kind == CredentialKind.app_specific_token - assert result == ValidateResult(AuthKind.credentials, appspecifictoken=app_specific_token) + user = model.user.get_user("devtable") + app_specific_token = model.appspecifictoken.create_token(user, "some token") + full_token = model.appspecifictoken.get_full_token_string(app_specific_token) + result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token) + assert kind == CredentialKind.app_specific_token + assert result == ValidateResult( + AuthKind.credentials, appspecifictoken=app_specific_token + ) + def test_valid_app_specific_token_for_disabled_user(app): - user = model.user.get_user('devtable') - user.enabled = False - user.save() + user = model.user.get_user("devtable") + user.enabled = False + user.save() - app_specific_token = model.appspecifictoken.create_token(user, 'some token') - full_token = model.appspecifictoken.get_full_token_string(app_specific_token) - result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token) - assert kind == CredentialKind.app_specific_token + app_specific_token = model.appspecifictoken.create_token(user, "some token") + full_token = model.appspecifictoken.get_full_token_string(app_specific_token) + result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token) + assert kind == CredentialKind.app_specific_token - err = 'This user has been disabled. Please contact your administrator.' - assert result == ValidateResult(AuthKind.credentials, error_message=err) + err = "This user has been disabled. Please contact your administrator." + assert result == ValidateResult(AuthKind.credentials, error_message=err) + + +def test_invalid_app_specific_token(app): + result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, "somecode") + assert kind == CredentialKind.app_specific_token + assert result == ValidateResult(AuthKind.credentials, error_message="Invalid token") -def test_invalid_app_specific_token(app): - result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, 'somecode') - assert kind == CredentialKind.app_specific_token - assert result == ValidateResult(AuthKind.credentials, error_message='Invalid token') def test_invalid_app_specific_token_code(app): - user = model.user.get_user('devtable') - app_specific_token = model.appspecifictoken.create_token(user, 'some token') - full_token = app_specific_token.token_name + 'something' - result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token) - assert kind == CredentialKind.app_specific_token - assert result == ValidateResult(AuthKind.credentials, error_message='Invalid token') + user = model.user.get_user("devtable") + app_specific_token = model.appspecifictoken.create_token(user, "some token") + full_token = app_specific_token.token_name + "something" + result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token) + assert kind == CredentialKind.app_specific_token + assert result == ValidateResult(AuthKind.credentials, error_message="Invalid token") -def test_unicode(app): - result, kind = validate_credentials('someusername', 'some₪code') - assert kind == CredentialKind.user - assert not result.auth_valid - assert result == ValidateResult(AuthKind.credentials, - error_message='Invalid Username or Password') -def test_unicode_robot(app): - robot, _ = model.user.create_robot('somerobot', model.user.get_user('devtable')) - result, kind = validate_credentials(robot.username, 'some₪code') +def test_unicode(app): + result, kind = validate_credentials("someusername", "some₪code") + assert kind == CredentialKind.user + assert not result.auth_valid + assert result == ValidateResult( + AuthKind.credentials, error_message="Invalid Username or Password" + ) - assert kind == CredentialKind.robot - assert not result.auth_valid - msg = 'Could not find robot with username: devtable+somerobot and supplied password.' - assert result == ValidateResult(AuthKind.credentials, error_message=msg) +def test_unicode_robot(app): + robot, _ = model.user.create_robot("somerobot", model.user.get_user("devtable")) + result, kind = validate_credentials(robot.username, "some₪code") + + assert kind == CredentialKind.robot + assert not result.auth_valid + + msg = ( + "Could not find robot with username: devtable+somerobot and supplied password." + ) + assert result == ValidateResult(AuthKind.credentials, error_message=msg) + def test_invalid_user(app): - result, kind = validate_credentials('someinvaliduser', 'password') - assert kind == CredentialKind.user - assert not result.authed_user - assert not result.auth_valid + result, kind = validate_credentials("someinvaliduser", "password") + assert kind == CredentialKind.user + assert not result.authed_user + assert not result.auth_valid + def test_invalid_user_password(app): - result, kind = validate_credentials('devtable', 'somepassword') - assert kind == CredentialKind.user - assert not result.authed_user - assert not result.auth_valid + result, kind = validate_credentials("devtable", "somepassword") + assert kind == CredentialKind.user + assert not result.authed_user + assert not result.auth_valid + def test_invalid_robot(app): - result, kind = validate_credentials('devtable+doesnotexist', 'password') - assert kind == CredentialKind.robot - assert not result.authed_user - assert not result.auth_valid + result, kind = validate_credentials("devtable+doesnotexist", "password") + assert kind == CredentialKind.robot + assert not result.authed_user + assert not result.auth_valid + def test_invalid_robot_token(app): - robot, _ = model.user.create_robot('somerobot', model.user.get_user('devtable')) - result, kind = validate_credentials(robot.username, 'invalidpassword') - assert kind == CredentialKind.robot - assert not result.authed_user - assert not result.auth_valid + robot, _ = model.user.create_robot("somerobot", model.user.get_user("devtable")) + result, kind = validate_credentials(robot.username, "invalidpassword") + assert kind == CredentialKind.robot + assert not result.authed_user + assert not result.auth_valid + def test_invalid_unicode_robot(app): - token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”' - result, kind = validate_credentials('devtable+somerobot', token) - assert kind == CredentialKind.robot - assert not result.auth_valid - msg = 'Could not find robot with username: devtable+somerobot' - assert result == ValidateResult(AuthKind.credentials, error_message=msg) + token = "“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”" + result, kind = validate_credentials("devtable+somerobot", token) + assert kind == CredentialKind.robot + assert not result.auth_valid + msg = "Could not find robot with username: devtable+somerobot" + assert result == ValidateResult(AuthKind.credentials, error_message=msg) + def test_invalid_unicode_robot_2(app): - user = model.user.get_user('devtable') - robot, password = model.user.create_robot('somerobot', user) + user = model.user.get_user("devtable") + robot, password = model.user.create_robot("somerobot", user) - token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”' - result, kind = validate_credentials('devtable+somerobot', token) - assert kind == CredentialKind.robot - assert not result.auth_valid - msg = 'Could not find robot with username: devtable+somerobot and supplied password.' - assert result == ValidateResult(AuthKind.credentials, error_message=msg) + token = "“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”" + result, kind = validate_credentials("devtable+somerobot", token) + assert kind == CredentialKind.robot + assert not result.auth_valid + msg = ( + "Could not find robot with username: devtable+somerobot and supplied password." + ) + assert result == ValidateResult(AuthKind.credentials, error_message=msg) diff --git a/auth/test/test_decorators.py b/auth/test/test_decorators.py index b0477f7bd..87b4d2ae9 100644 --- a/auth/test/test_decorators.py +++ b/auth/test/test_decorators.py @@ -7,99 +7,102 @@ from werkzeug.exceptions import HTTPException from app import LoginWrappedDBUser from auth.auth_context import get_authenticated_user from auth.decorators import ( - extract_namespace_repo_from_session, require_session_login, process_auth_or_cookie) + extract_namespace_repo_from_session, + require_session_login, + process_auth_or_cookie, +) from data import model from test.fixtures import * def test_extract_namespace_repo_from_session_missing(app): - def emptyfunc(): - pass + def emptyfunc(): + pass - session.clear() - with pytest.raises(HTTPException): - extract_namespace_repo_from_session(emptyfunc)() + session.clear() + with pytest.raises(HTTPException): + extract_namespace_repo_from_session(emptyfunc)() def test_extract_namespace_repo_from_session_present(app): - encountered = [] + encountered = [] - def somefunc(namespace, repository): - encountered.append(namespace) - encountered.append(repository) + def somefunc(namespace, repository): + encountered.append(namespace) + encountered.append(repository) - # Add the namespace and repository to the session. - session.clear() - session['namespace'] = 'foo' - session['repository'] = 'bar' + # Add the namespace and repository to the session. + session.clear() + session["namespace"] = "foo" + session["repository"] = "bar" - # Call the decorated method. - extract_namespace_repo_from_session(somefunc)() + # Call the decorated method. + extract_namespace_repo_from_session(somefunc)() - assert encountered[0] == 'foo' - assert encountered[1] == 'bar' + assert encountered[0] == "foo" + assert encountered[1] == "bar" def test_require_session_login_missing(app): - def emptyfunc(): - pass + def emptyfunc(): + pass - with pytest.raises(HTTPException): - require_session_login(emptyfunc)() + with pytest.raises(HTTPException): + require_session_login(emptyfunc)() def test_require_session_login_valid_user(app): - def emptyfunc(): - pass + def emptyfunc(): + pass - # Login as a valid user. - someuser = model.user.get_user('devtable') - login_user(LoginWrappedDBUser(someuser.uuid, someuser)) + # Login as a valid user. + someuser = model.user.get_user("devtable") + login_user(LoginWrappedDBUser(someuser.uuid, someuser)) - # Call the function. - require_session_login(emptyfunc)() + # Call the function. + require_session_login(emptyfunc)() - # Ensure the authenticated user was updated. - assert get_authenticated_user() == someuser + # Ensure the authenticated user was updated. + assert get_authenticated_user() == someuser def test_require_session_login_invalid_user(app): - def emptyfunc(): - pass + def emptyfunc(): + pass - # "Login" as a disabled user. - someuser = model.user.get_user('disabled') - login_user(LoginWrappedDBUser(someuser.uuid, someuser)) + # "Login" as a disabled user. + someuser = model.user.get_user("disabled") + login_user(LoginWrappedDBUser(someuser.uuid, someuser)) - # Call the function. - with pytest.raises(HTTPException): - require_session_login(emptyfunc)() + # Call the function. + with pytest.raises(HTTPException): + require_session_login(emptyfunc)() - # Ensure the authenticated user was not updated. - assert get_authenticated_user() is None + # Ensure the authenticated user was not updated. + assert get_authenticated_user() is None def test_process_auth_or_cookie_invalid_user(app): - def emptyfunc(): - pass + def emptyfunc(): + pass - # Call the function. - process_auth_or_cookie(emptyfunc)() + # Call the function. + process_auth_or_cookie(emptyfunc)() - # Ensure the authenticated user was not updated. - assert get_authenticated_user() is None + # Ensure the authenticated user was not updated. + assert get_authenticated_user() is None def test_process_auth_or_cookie_valid_user(app): - def emptyfunc(): - pass + def emptyfunc(): + pass - # Login as a valid user. - someuser = model.user.get_user('devtable') - login_user(LoginWrappedDBUser(someuser.uuid, someuser)) + # Login as a valid user. + someuser = model.user.get_user("devtable") + login_user(LoginWrappedDBUser(someuser.uuid, someuser)) - # Call the function. - process_auth_or_cookie(emptyfunc)() + # Call the function. + process_auth_or_cookie(emptyfunc)() - # Ensure the authenticated user was updated. - assert get_authenticated_user() == someuser + # Ensure the authenticated user was updated. + assert get_authenticated_user() == someuser diff --git a/auth/test/test_oauth.py b/auth/test/test_oauth.py index f678f2604..1453e878a 100644 --- a/auth/test/test_oauth.py +++ b/auth/test/test_oauth.py @@ -6,50 +6,63 @@ from data import model from test.fixtures import * -@pytest.mark.parametrize('header, expected_result', [ - ('', ValidateResult(AuthKind.oauth, missing=True)), - ('somerandomtoken', ValidateResult(AuthKind.oauth, missing=True)), - ('bearer some random token', ValidateResult(AuthKind.oauth, missing=True)), - ('bearer invalidtoken', - ValidateResult(AuthKind.oauth, error_message='OAuth access token could not be validated')),]) +@pytest.mark.parametrize( + "header, expected_result", + [ + ("", ValidateResult(AuthKind.oauth, missing=True)), + ("somerandomtoken", ValidateResult(AuthKind.oauth, missing=True)), + ("bearer some random token", ValidateResult(AuthKind.oauth, missing=True)), + ( + "bearer invalidtoken", + ValidateResult( + AuthKind.oauth, + error_message="OAuth access token could not be validated", + ), + ), + ], +) def test_bearer(header, expected_result, app): - assert validate_bearer_auth(header) == expected_result + assert validate_bearer_auth(header) == expected_result def test_valid_oauth(app): - user = model.user.get_user('devtable') - app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0] - token_string = '%s%s' % ('a' * 20, 'b' * 20) - oauth_token, _ = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read', - access_token=token_string) - result = validate_bearer_auth('bearer ' + token_string) - assert result.context.oauthtoken == oauth_token - assert result.authed_user == user - assert result.auth_valid + user = model.user.get_user("devtable") + app = model.oauth.list_applications_for_org( + model.user.get_user_or_org("buynlarge") + )[0] + token_string = "%s%s" % ("a" * 20, "b" * 20) + oauth_token, _ = model.oauth.create_access_token_for_testing( + user, app.client_id, "repo:read", access_token=token_string + ) + result = validate_bearer_auth("bearer " + token_string) + assert result.context.oauthtoken == oauth_token + assert result.authed_user == user + assert result.auth_valid def test_disabled_user_oauth(app): - user = model.user.get_user('disabled') - token_string = '%s%s' % ('a' * 20, 'b' * 20) - oauth_token, _ = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin', - access_token=token_string) + user = model.user.get_user("disabled") + token_string = "%s%s" % ("a" * 20, "b" * 20) + oauth_token, _ = model.oauth.create_access_token_for_testing( + user, "deadbeef", "repo:admin", access_token=token_string + ) - result = validate_bearer_auth('bearer ' + token_string) - assert result.context.oauthtoken is None - assert result.authed_user is None - assert not result.auth_valid - assert result.error_message == 'Granter of the oauth access token is disabled' + result = validate_bearer_auth("bearer " + token_string) + assert result.context.oauthtoken is None + assert result.authed_user is None + assert not result.auth_valid + assert result.error_message == "Granter of the oauth access token is disabled" def test_expired_token(app): - user = model.user.get_user('devtable') - token_string = '%s%s' % ('a' * 20, 'b' * 20) - oauth_token, _ = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin', - access_token=token_string, - expires_in=-1000) + user = model.user.get_user("devtable") + token_string = "%s%s" % ("a" * 20, "b" * 20) + oauth_token, _ = model.oauth.create_access_token_for_testing( + user, "deadbeef", "repo:admin", access_token=token_string, expires_in=-1000 + ) - result = validate_bearer_auth('bearer ' + token_string) - assert result.context.oauthtoken is None - assert result.authed_user is None - assert not result.auth_valid - assert result.error_message == 'OAuth access token has expired' + result = validate_bearer_auth("bearer " + token_string) + assert result.context.oauthtoken is None + assert result.authed_user is None + assert not result.auth_valid + assert result.error_message == "OAuth access token has expired" diff --git a/auth/test/test_permissions.py b/auth/test/test_permissions.py index f2849934d..e97a11f1d 100644 --- a/auth/test/test_permissions.py +++ b/auth/test/test_permissions.py @@ -6,32 +6,33 @@ from data import model from test.fixtures import * -SUPER_USERNAME = 'devtable' -UNSUPER_USERNAME = 'freshuser' +SUPER_USERNAME = "devtable" +UNSUPER_USERNAME = "freshuser" + @pytest.fixture() def superuser(initialized_db): - return model.user.get_user(SUPER_USERNAME) + return model.user.get_user(SUPER_USERNAME) @pytest.fixture() def normie(initialized_db): - return model.user.get_user(UNSUPER_USERNAME) + return model.user.get_user(UNSUPER_USERNAME) def test_superuser_matrix(superuser, normie): - test_cases = [ - (superuser, {scopes.SUPERUSER}, True), - (superuser, {scopes.DIRECT_LOGIN}, True), - (superuser, {scopes.READ_USER, scopes.SUPERUSER}, True), - (superuser, {scopes.READ_USER}, False), - (normie, {scopes.SUPERUSER}, False), - (normie, {scopes.DIRECT_LOGIN}, False), - (normie, {scopes.READ_USER, scopes.SUPERUSER}, False), - (normie, {scopes.READ_USER}, False), - ] + test_cases = [ + (superuser, {scopes.SUPERUSER}, True), + (superuser, {scopes.DIRECT_LOGIN}, True), + (superuser, {scopes.READ_USER, scopes.SUPERUSER}, True), + (superuser, {scopes.READ_USER}, False), + (normie, {scopes.SUPERUSER}, False), + (normie, {scopes.DIRECT_LOGIN}, False), + (normie, {scopes.READ_USER, scopes.SUPERUSER}, False), + (normie, {scopes.READ_USER}, False), + ] - for user_obj, scope_set, expected in test_cases: - perm_user = QuayDeferredPermissionUser.for_user(user_obj, scope_set) - has_su = perm_user.can(SuperUserPermission()) - assert has_su == expected + for user_obj, scope_set, expected in test_cases: + perm_user = QuayDeferredPermissionUser.for_user(user_obj, scope_set) + has_su = perm_user.can(SuperUserPermission()) + assert has_su == expected diff --git a/auth/test/test_registry_jwt.py b/auth/test/test_registry_jwt.py index fc6548d74..ffcb9fca7 100644 --- a/auth/test/test_registry_jwt.py +++ b/auth/test/test_registry_jwt.py @@ -14,190 +14,226 @@ from initdb import setup_database_for_testing, finished_database_for_testing from util.morecollections import AttrDict from util.security.registry_jwt import ANONYMOUS_SUB, build_context_and_subject -TEST_AUDIENCE = app.config['SERVER_HOSTNAME'] -TEST_USER = AttrDict({'username': 'joeuser', 'uuid': 'foobar', 'enabled': True}) +TEST_AUDIENCE = app.config["SERVER_HOSTNAME"] +TEST_USER = AttrDict({"username": "joeuser", "uuid": "foobar", "enabled": True}) MAX_SIGNED_S = 3660 TOKEN_VALIDITY_LIFETIME_S = 60 * 60 # 1 hour -ANONYMOUS_SUB = '(anonymous)' -SERVICE_NAME = 'quay' +ANONYMOUS_SUB = "(anonymous)" +SERVICE_NAME = "quay" # This import has to come below any references to "app". from test.fixtures import * -def _access(typ='repository', name='somens/somerepo', actions=None): - actions = [] if actions is None else actions - return [{ - 'type': typ, - 'name': name, - 'actions': actions, - }] +def _access(typ="repository", name="somens/somerepo", actions=None): + actions = [] if actions is None else actions + return [{"type": typ, "name": name, "actions": actions}] def _delete_field(token_data, field_name): - token_data.pop(field_name) - return token_data + token_data.pop(field_name) + return token_data -def _token_data(access=[], context=None, audience=TEST_AUDIENCE, user=TEST_USER, iat=None, - exp=None, nbf=None, iss=None, subject=None): - if subject is None: - _, subject = build_context_and_subject(ValidatedAuthContext(user=user)) - return { - 'iss': iss or instance_keys.service_name, - 'aud': audience, - 'nbf': nbf if nbf is not None else int(time.time()), - 'iat': iat if iat is not None else int(time.time()), - 'exp': exp if exp is not None else int(time.time() + TOKEN_VALIDITY_LIFETIME_S), - 'sub': subject, - 'access': access, - 'context': context, - } +def _token_data( + access=[], + context=None, + audience=TEST_AUDIENCE, + user=TEST_USER, + iat=None, + exp=None, + nbf=None, + iss=None, + subject=None, +): + if subject is None: + _, subject = build_context_and_subject(ValidatedAuthContext(user=user)) + return { + "iss": iss or instance_keys.service_name, + "aud": audience, + "nbf": nbf if nbf is not None else int(time.time()), + "iat": iat if iat is not None else int(time.time()), + "exp": exp if exp is not None else int(time.time() + TOKEN_VALIDITY_LIFETIME_S), + "sub": subject, + "access": access, + "context": context, + } def _token(token_data, key_id=None, private_key=None, skip_header=False, alg=None): - key_id = key_id or instance_keys.local_key_id - private_key = private_key or instance_keys.local_private_key + key_id = key_id or instance_keys.local_key_id + private_key = private_key or instance_keys.local_private_key - if alg == "none": - private_key = None + if alg == "none": + private_key = None - token_headers = {'kid': key_id} + token_headers = {"kid": key_id} - if skip_header: - token_headers = {} + if skip_header: + token_headers = {} - token_data = jwt.encode(token_data, private_key, alg or 'RS256', headers=token_headers) - return 'Bearer {0}'.format(token_data) + token_data = jwt.encode( + token_data, private_key, alg or "RS256", headers=token_headers + ) + return "Bearer {0}".format(token_data) def _parse_token(token): - return identity_from_bearer_token(token)[0] + return identity_from_bearer_token(token)[0] def test_accepted_token(initialized_db): - token = _token(_token_data()) - identity = _parse_token(token) - assert identity.id == TEST_USER.username, 'should be %s, but was %s' % (TEST_USER.username, - identity.id) - assert len(identity.provides) == 0 + token = _token(_token_data()) + identity = _parse_token(token) + assert identity.id == TEST_USER.username, "should be %s, but was %s" % ( + TEST_USER.username, + identity.id, + ) + assert len(identity.provides) == 0 - anon_token = _token(_token_data(user=None)) - anon_identity = _parse_token(anon_token) - assert anon_identity.id == ANONYMOUS_SUB, 'should be %s, but was %s' % (ANONYMOUS_SUB, - anon_identity.id) - assert len(identity.provides) == 0 + anon_token = _token(_token_data(user=None)) + anon_identity = _parse_token(anon_token) + assert anon_identity.id == ANONYMOUS_SUB, "should be %s, but was %s" % ( + ANONYMOUS_SUB, + anon_identity.id, + ) + assert len(identity.provides) == 0 -@pytest.mark.parametrize('access', [ - (_access(actions=['pull', 'push'])), - (_access(actions=['pull', '*'])), - (_access(actions=['*', 'push'])), - (_access(actions=['*'])), - (_access(actions=['pull', '*', 'push'])),]) +@pytest.mark.parametrize( + "access", + [ + (_access(actions=["pull", "push"])), + (_access(actions=["pull", "*"])), + (_access(actions=["*", "push"])), + (_access(actions=["*"])), + (_access(actions=["pull", "*", "push"])), + ], +) def test_token_with_access(access, initialized_db): - token = _token(_token_data(access=access)) - identity = _parse_token(token) - assert identity.id == TEST_USER.username, 'should be %s, but was %s' % (TEST_USER.username, - identity.id) - assert len(identity.provides) == 1 + token = _token(_token_data(access=access)) + identity = _parse_token(token) + assert identity.id == TEST_USER.username, "should be %s, but was %s" % ( + TEST_USER.username, + identity.id, + ) + assert len(identity.provides) == 1 - role = list(identity.provides)[0][3] - if "*" in access[0]['actions']: - assert role == 'admin' - elif "push" in access[0]['actions']: - assert role == 'write' - elif "pull" in access[0]['actions']: - assert role == 'read' + role = list(identity.provides)[0][3] + if "*" in access[0]["actions"]: + assert role == "admin" + elif "push" in access[0]["actions"]: + assert role == "write" + elif "pull" in access[0]["actions"]: + assert role == "read" -@pytest.mark.parametrize('token', [ - pytest.param(_token( - _token_data(access=[{ - 'toipe': 'repository', - 'namesies': 'somens/somerepo', - 'akshuns': ['pull', 'push', '*']}])), id='bad access'), - pytest.param(_token(_token_data(audience='someotherapp')), id='bad aud'), - pytest.param(_token(_delete_field(_token_data(), 'aud')), id='no aud'), - pytest.param(_token(_token_data(nbf=int(time.time()) + 600)), id='future nbf'), - pytest.param(_token(_delete_field(_token_data(), 'nbf')), id='no nbf'), - pytest.param(_token(_token_data(iat=int(time.time()) + 600)), id='future iat'), - pytest.param(_token(_delete_field(_token_data(), 'iat')), id='no iat'), - pytest.param(_token(_token_data(exp=int(time.time()) + MAX_SIGNED_S * 2)), id='exp too long'), - pytest.param(_token(_token_data(exp=int(time.time()) - 60)), id='expired'), - pytest.param(_token(_delete_field(_token_data(), 'exp')), id='no exp'), - pytest.param(_token(_delete_field(_token_data(), 'sub')), id='no sub'), - pytest.param(_token(_token_data(iss='badissuer')), id='bad iss'), - pytest.param(_token(_delete_field(_token_data(), 'iss')), id='no iss'), - pytest.param(_token(_token_data(), skip_header=True), id='no header'), - pytest.param(_token(_token_data(), key_id='someunknownkey'), id='bad key'), - pytest.param(_token(_token_data(), key_id='kid7'), id='bad key :: kid7'), - pytest.param(_token(_token_data(), alg='none', private_key=None), id='none alg'), - pytest.param('some random token', id='random token'), - pytest.param('Bearer: sometokenhere', id='extra bearer'), - pytest.param('\nBearer: dGVzdA', id='leading newline'), -]) +@pytest.mark.parametrize( + "token", + [ + pytest.param( + _token( + _token_data( + access=[ + { + "toipe": "repository", + "namesies": "somens/somerepo", + "akshuns": ["pull", "push", "*"], + } + ] + ) + ), + id="bad access", + ), + pytest.param(_token(_token_data(audience="someotherapp")), id="bad aud"), + pytest.param(_token(_delete_field(_token_data(), "aud")), id="no aud"), + pytest.param(_token(_token_data(nbf=int(time.time()) + 600)), id="future nbf"), + pytest.param(_token(_delete_field(_token_data(), "nbf")), id="no nbf"), + pytest.param(_token(_token_data(iat=int(time.time()) + 600)), id="future iat"), + pytest.param(_token(_delete_field(_token_data(), "iat")), id="no iat"), + pytest.param( + _token(_token_data(exp=int(time.time()) + MAX_SIGNED_S * 2)), + id="exp too long", + ), + pytest.param(_token(_token_data(exp=int(time.time()) - 60)), id="expired"), + pytest.param(_token(_delete_field(_token_data(), "exp")), id="no exp"), + pytest.param(_token(_delete_field(_token_data(), "sub")), id="no sub"), + pytest.param(_token(_token_data(iss="badissuer")), id="bad iss"), + pytest.param(_token(_delete_field(_token_data(), "iss")), id="no iss"), + pytest.param(_token(_token_data(), skip_header=True), id="no header"), + pytest.param(_token(_token_data(), key_id="someunknownkey"), id="bad key"), + pytest.param(_token(_token_data(), key_id="kid7"), id="bad key :: kid7"), + pytest.param( + _token(_token_data(), alg="none", private_key=None), id="none alg" + ), + pytest.param("some random token", id="random token"), + pytest.param("Bearer: sometokenhere", id="extra bearer"), + pytest.param("\nBearer: dGVzdA", id="leading newline"), + ], +) def test_invalid_jwt(token, initialized_db): - with pytest.raises(InvalidJWTException): - _parse_token(token) + with pytest.raises(InvalidJWTException): + _parse_token(token) def test_mixing_keys_e2e(initialized_db): - token_data = _token_data() + token_data = _token_data() - # Create a new key for testing. - p, key = model.service_keys.generate_service_key(instance_keys.service_name, None, kid='newkey', - name='newkey', metadata={}) - private_key = p.exportKey('PEM') + # Create a new key for testing. + p, key = model.service_keys.generate_service_key( + instance_keys.service_name, None, kid="newkey", name="newkey", metadata={} + ) + private_key = p.exportKey("PEM") - # Test first with the new valid, but unapproved key. - unapproved_key_token = _token(token_data, key_id='newkey', private_key=private_key) - with pytest.raises(InvalidJWTException): - _parse_token(unapproved_key_token) + # Test first with the new valid, but unapproved key. + unapproved_key_token = _token(token_data, key_id="newkey", private_key=private_key) + with pytest.raises(InvalidJWTException): + _parse_token(unapproved_key_token) - # Approve the key and try again. - admin_user = model.user.get_user('devtable') - model.service_keys.approve_service_key(key.kid, ServiceKeyApprovalType.SUPERUSER, approver=admin_user) + # Approve the key and try again. + admin_user = model.user.get_user("devtable") + model.service_keys.approve_service_key( + key.kid, ServiceKeyApprovalType.SUPERUSER, approver=admin_user + ) - valid_token = _token(token_data, key_id='newkey', private_key=private_key) + valid_token = _token(token_data, key_id="newkey", private_key=private_key) - identity = _parse_token(valid_token) - assert identity.id == TEST_USER.username - assert len(identity.provides) == 0 + identity = _parse_token(valid_token) + assert identity.id == TEST_USER.username + assert len(identity.provides) == 0 - # Try using a different private key with the existing key ID. - bad_private_token = _token(token_data, key_id='newkey', - private_key=instance_keys.local_private_key) - with pytest.raises(InvalidJWTException): - _parse_token(bad_private_token) + # Try using a different private key with the existing key ID. + bad_private_token = _token( + token_data, key_id="newkey", private_key=instance_keys.local_private_key + ) + with pytest.raises(InvalidJWTException): + _parse_token(bad_private_token) - # Try using a different key ID with the existing private key. - kid_mismatch_token = _token(token_data, key_id=instance_keys.local_key_id, - private_key=private_key) - with pytest.raises(InvalidJWTException): - _parse_token(kid_mismatch_token) + # Try using a different key ID with the existing private key. + kid_mismatch_token = _token( + token_data, key_id=instance_keys.local_key_id, private_key=private_key + ) + with pytest.raises(InvalidJWTException): + _parse_token(kid_mismatch_token) - # Delete the new key. - key.delete_instance(recursive=True) + # Delete the new key. + key.delete_instance(recursive=True) - # Ensure it still works (via the cache.) - deleted_key_token = _token(token_data, key_id='newkey', private_key=private_key) - identity = _parse_token(deleted_key_token) - assert identity.id == TEST_USER.username - assert len(identity.provides) == 0 + # Ensure it still works (via the cache.) + deleted_key_token = _token(token_data, key_id="newkey", private_key=private_key) + identity = _parse_token(deleted_key_token) + assert identity.id == TEST_USER.username + assert len(identity.provides) == 0 - # Break the cache. - instance_keys.clear_cache() + # Break the cache. + instance_keys.clear_cache() - # Ensure the key no longer works. - with pytest.raises(InvalidJWTException): - _parse_token(deleted_key_token) + # Ensure the key no longer works. + with pytest.raises(InvalidJWTException): + _parse_token(deleted_key_token) -@pytest.mark.parametrize('token', [ - u'someunicodetoken✡', - u'\xc9\xad\xbd', -]) +@pytest.mark.parametrize("token", [u"someunicodetoken✡", u"\xc9\xad\xbd"]) def test_unicode_token(token): - with pytest.raises(InvalidJWTException): - _parse_token(token) + with pytest.raises(InvalidJWTException): + _parse_token(token) diff --git a/auth/test/test_scopes.py b/auth/test/test_scopes.py index b71140136..a5aa883ea 100644 --- a/auth/test/test_scopes.py +++ b/auth/test/test_scopes.py @@ -1,50 +1,55 @@ import pytest from auth.scopes import ( - scopes_from_scope_string, validate_scope_string, ALL_SCOPES, is_subset_string) + scopes_from_scope_string, + validate_scope_string, + ALL_SCOPES, + is_subset_string, +) @pytest.mark.parametrize( - 'scopes_string, expected', - [ - # Valid single scopes. - ('repo:read', ['repo:read']), - ('repo:admin', ['repo:admin']), - - # Invalid scopes. - ('not:valid', []), - ('repo:admins', []), - - # Valid scope strings. - ('repo:read repo:admin', ['repo:read', 'repo:admin']), - ('repo:read,repo:admin', ['repo:read', 'repo:admin']), - ('repo:read,repo:admin repo:write', ['repo:read', 'repo:admin', 'repo:write']), - - # Partially invalid scopes. - ('repo:read,not:valid', []), - ('repo:read repo:admins', []), - - # Invalid scope strings. - ('repo:read|repo:admin', []), - - # Mixture of delimiters. - ('repo:read, repo:admin', []),]) + "scopes_string, expected", + [ + # Valid single scopes. + ("repo:read", ["repo:read"]), + ("repo:admin", ["repo:admin"]), + # Invalid scopes. + ("not:valid", []), + ("repo:admins", []), + # Valid scope strings. + ("repo:read repo:admin", ["repo:read", "repo:admin"]), + ("repo:read,repo:admin", ["repo:read", "repo:admin"]), + ("repo:read,repo:admin repo:write", ["repo:read", "repo:admin", "repo:write"]), + # Partially invalid scopes. + ("repo:read,not:valid", []), + ("repo:read repo:admins", []), + # Invalid scope strings. + ("repo:read|repo:admin", []), + # Mixture of delimiters. + ("repo:read, repo:admin", []), + ], +) def test_parsing(scopes_string, expected): - expected_scope_set = {ALL_SCOPES[scope_name] for scope_name in expected} - parsed_scope_set = scopes_from_scope_string(scopes_string) - assert parsed_scope_set == expected_scope_set - assert validate_scope_string(scopes_string) == bool(expected) + expected_scope_set = {ALL_SCOPES[scope_name] for scope_name in expected} + parsed_scope_set = scopes_from_scope_string(scopes_string) + assert parsed_scope_set == expected_scope_set + assert validate_scope_string(scopes_string) == bool(expected) -@pytest.mark.parametrize('superset, subset, result', [ - ('repo:read', 'repo:read', True), - ('repo:read repo:admin', 'repo:read', True), - ('repo:read,repo:admin', 'repo:read', True), - ('repo:read,repo:admin', 'repo:admin', True), - ('repo:read,repo:admin', 'repo:admin repo:read', True), - ('', 'repo:read', False), - ('unknown:tag', 'repo:read', False), - ('repo:read unknown:tag', 'repo:read', False), - ('repo:read,unknown:tag', 'repo:read', False),]) +@pytest.mark.parametrize( + "superset, subset, result", + [ + ("repo:read", "repo:read", True), + ("repo:read repo:admin", "repo:read", True), + ("repo:read,repo:admin", "repo:read", True), + ("repo:read,repo:admin", "repo:admin", True), + ("repo:read,repo:admin", "repo:admin repo:read", True), + ("", "repo:read", False), + ("unknown:tag", "repo:read", False), + ("repo:read unknown:tag", "repo:read", False), + ("repo:read,unknown:tag", "repo:read", False), + ], +) def test_subset_string(superset, subset, result): - assert is_subset_string(superset, subset) == result + assert is_subset_string(superset, subset) == result diff --git a/auth/test/test_signedgrant.py b/auth/test/test_signedgrant.py index e200f0bf1..5575a032d 100644 --- a/auth/test/test_signedgrant.py +++ b/auth/test/test_signedgrant.py @@ -1,32 +1,47 @@ import pytest -from auth.signedgrant import validate_signed_grant, generate_signed_token, SIGNATURE_PREFIX +from auth.signedgrant import ( + validate_signed_grant, + generate_signed_token, + SIGNATURE_PREFIX, +) from auth.validateresult import AuthKind, ValidateResult -@pytest.mark.parametrize('header, expected_result', [ - pytest.param('', ValidateResult(AuthKind.signed_grant, missing=True), id='Missing'), - pytest.param('somerandomtoken', ValidateResult(AuthKind.signed_grant, missing=True), - id='Invalid header'), - pytest.param('token somerandomtoken', ValidateResult(AuthKind.signed_grant, missing=True), - id='Random Token'), - pytest.param('token ' + SIGNATURE_PREFIX + 'foo', - ValidateResult(AuthKind.signed_grant, - error_message='Signed grant could not be validated'), - id='Invalid token'), -]) +@pytest.mark.parametrize( + "header, expected_result", + [ + pytest.param( + "", ValidateResult(AuthKind.signed_grant, missing=True), id="Missing" + ), + pytest.param( + "somerandomtoken", + ValidateResult(AuthKind.signed_grant, missing=True), + id="Invalid header", + ), + pytest.param( + "token somerandomtoken", + ValidateResult(AuthKind.signed_grant, missing=True), + id="Random Token", + ), + pytest.param( + "token " + SIGNATURE_PREFIX + "foo", + ValidateResult( + AuthKind.signed_grant, + error_message="Signed grant could not be validated", + ), + id="Invalid token", + ), + ], +) def test_token(header, expected_result): - assert validate_signed_grant(header) == expected_result + assert validate_signed_grant(header) == expected_result def test_valid_grant(): - header = 'token ' + generate_signed_token({'a': 'b'}, {'c': 'd'}) - expected = ValidateResult(AuthKind.signed_grant, signed_data={ - 'grants': { - 'a': 'b', - }, - 'user_context': { - 'c': 'd' - }, - }) - assert validate_signed_grant(header) == expected + header = "token " + generate_signed_token({"a": "b"}, {"c": "d"}) + expected = ValidateResult( + AuthKind.signed_grant, + signed_data={"grants": {"a": "b"}, "user_context": {"c": "d"}}, + ) + assert validate_signed_grant(header) == expected diff --git a/auth/test/test_validateresult.py b/auth/test/test_validateresult.py index 90875da76..bc514e843 100644 --- a/auth/test/test_validateresult.py +++ b/auth/test/test_validateresult.py @@ -6,58 +6,68 @@ from data import model from data.database import AppSpecificAuthToken from test.fixtures import * + def get_user(): - return model.user.get_user('devtable') + return model.user.get_user("devtable") + def get_app_specific_token(): - return AppSpecificAuthToken.get() + return AppSpecificAuthToken.get() + def get_robot(): - robot, _ = model.user.create_robot('somebot', get_user()) - return robot + robot, _ = model.user.create_robot("somebot", get_user()) + return robot + def get_token(): - return model.token.create_delegate_token('devtable', 'simple', 'sometoken') + return model.token.create_delegate_token("devtable", "simple", "sometoken") + def get_oauthtoken(): - user = model.user.get_user('devtable') - return list(model.oauth.list_access_tokens_for_user(user))[0] + user = model.user.get_user("devtable") + return list(model.oauth.list_access_tokens_for_user(user))[0] + def get_signeddata(): - return {'grants': {'a': 'b'}, 'user_context': {'c': 'd'}} + return {"grants": {"a": "b"}, "user_context": {"c": "d"}} -@pytest.mark.parametrize('get_entity,entity_kind', [ - (get_user, 'user'), - (get_robot, 'robot'), - (get_token, 'token'), - (get_oauthtoken, 'oauthtoken'), - (get_signeddata, 'signed_data'), - (get_app_specific_token, 'appspecifictoken'), -]) + +@pytest.mark.parametrize( + "get_entity,entity_kind", + [ + (get_user, "user"), + (get_robot, "robot"), + (get_token, "token"), + (get_oauthtoken, "oauthtoken"), + (get_signeddata, "signed_data"), + (get_app_specific_token, "appspecifictoken"), + ], +) def test_apply_context(get_entity, entity_kind, app): - assert get_authenticated_context() is None + assert get_authenticated_context() is None - entity = get_entity() - args = {} - args[entity_kind] = entity + entity = get_entity() + args = {} + args[entity_kind] = entity - result = ValidateResult(AuthKind.basic, **args) - result.apply_to_context() + result = ValidateResult(AuthKind.basic, **args) + result.apply_to_context() - expected_user = entity if entity_kind == 'user' or entity_kind == 'robot' else None - if entity_kind == 'oauthtoken': - expected_user = entity.authorized_user + expected_user = entity if entity_kind == "user" or entity_kind == "robot" else None + if entity_kind == "oauthtoken": + expected_user = entity.authorized_user - if entity_kind == 'appspecifictoken': - expected_user = entity.user + if entity_kind == "appspecifictoken": + expected_user = entity.user - expected_token = entity if entity_kind == 'token' else None - expected_oauth = entity if entity_kind == 'oauthtoken' else None - expected_appspecifictoken = entity if entity_kind == 'appspecifictoken' else None - expected_grant = entity if entity_kind == 'signed_data' else None + expected_token = entity if entity_kind == "token" else None + expected_oauth = entity if entity_kind == "oauthtoken" else None + expected_appspecifictoken = entity if entity_kind == "appspecifictoken" else None + expected_grant = entity if entity_kind == "signed_data" else None - assert get_authenticated_context().authed_user == expected_user - assert get_authenticated_context().token == expected_token - assert get_authenticated_context().oauthtoken == expected_oauth - assert get_authenticated_context().appspecifictoken == expected_appspecifictoken - assert get_authenticated_context().signed_data == expected_grant + assert get_authenticated_context().authed_user == expected_user + assert get_authenticated_context().token == expected_token + assert get_authenticated_context().oauthtoken == expected_oauth + assert get_authenticated_context().appspecifictoken == expected_appspecifictoken + assert get_authenticated_context().signed_data == expected_grant diff --git a/auth/validateresult.py b/auth/validateresult.py index 3235104e0..09cc09b11 100644 --- a/auth/validateresult.py +++ b/auth/validateresult.py @@ -3,54 +3,76 @@ from auth.auth_context_type import ValidatedAuthContext, ContextEntityKind class AuthKind(Enum): - cookie = 'cookie' - basic = 'basic' - oauth = 'oauth' - signed_grant = 'signed_grant' - credentials = 'credentials' + cookie = "cookie" + basic = "basic" + oauth = "oauth" + signed_grant = "signed_grant" + credentials = "credentials" class ValidateResult(object): - """ A result of validating auth in one form or another. """ - def __init__(self, kind, missing=False, user=None, token=None, oauthtoken=None, - robot=None, appspecifictoken=None, signed_data=None, error_message=None): - self.kind = kind - self.missing = missing - self.error_message = error_message - self.context = ValidatedAuthContext(user=user, token=token, oauthtoken=oauthtoken, robot=robot, - appspecifictoken=appspecifictoken, signed_data=signed_data) + """ A result of validating auth in one form or another. """ - def tuple(self): - return (self.kind, self.missing, self.error_message, self.context.tuple()) + def __init__( + self, + kind, + missing=False, + user=None, + token=None, + oauthtoken=None, + robot=None, + appspecifictoken=None, + signed_data=None, + error_message=None, + ): + self.kind = kind + self.missing = missing + self.error_message = error_message + self.context = ValidatedAuthContext( + user=user, + token=token, + oauthtoken=oauthtoken, + robot=robot, + appspecifictoken=appspecifictoken, + signed_data=signed_data, + ) - def __eq__(self, other): - return self.tuple() == other.tuple() + def tuple(self): + return (self.kind, self.missing, self.error_message, self.context.tuple()) - def apply_to_context(self): - """ Applies this auth result to the auth context and Flask-Principal. """ - self.context.apply_to_request_context() + def __eq__(self, other): + return self.tuple() == other.tuple() - def with_kind(self, kind): - """ Returns a copy of this result, but with the kind replaced. """ - result = ValidateResult(kind, missing=self.missing, error_message=self.error_message) - result.context = self.context - return result + def apply_to_context(self): + """ Applies this auth result to the auth context and Flask-Principal. """ + self.context.apply_to_request_context() - def __repr__(self): - return 'ValidateResult: %s (missing: %s, error: %s)' % (self.kind, self.missing, - self.error_message) + def with_kind(self, kind): + """ Returns a copy of this result, but with the kind replaced. """ + result = ValidateResult( + kind, missing=self.missing, error_message=self.error_message + ) + result.context = self.context + return result - @property - def authed_user(self): - """ Returns the authenticated user, whether directly, or via an OAuth token. """ - return self.context.authed_user + def __repr__(self): + return "ValidateResult: %s (missing: %s, error: %s)" % ( + self.kind, + self.missing, + self.error_message, + ) - @property - def has_nonrobot_user(self): - """ Returns whether a user (not a robot) was authenticated successfully. """ - return self.context.has_nonrobot_user + @property + def authed_user(self): + """ Returns the authenticated user, whether directly, or via an OAuth token. """ + return self.context.authed_user - @property - def auth_valid(self): - """ Returns whether authentication successfully occurred. """ - return self.context.entity_kind != ContextEntityKind.anonymous + @property + def has_nonrobot_user(self): + """ Returns whether a user (not a robot) was authenticated successfully. """ + return self.context.has_nonrobot_user + + @property + def auth_valid(self): + """ Returns whether authentication successfully occurred. """ + return self.context.entity_kind != ContextEntityKind.anonymous diff --git a/avatars/avatars.py b/avatars/avatars.py index 737b51191..67969eee7 100644 --- a/avatars/avatars.py +++ b/avatars/avatars.py @@ -6,110 +6,133 @@ from requests.exceptions import RequestException logger = logging.getLogger(__name__) + class Avatar(object): - def __init__(self, app=None): - self.app = app - self.state = self._init_app(app) + def __init__(self, app=None): + self.app = app + self.state = self._init_app(app) - def _init_app(self, app): - return AVATAR_CLASSES[app.config.get('AVATAR_KIND', 'Gravatar')]( - app.config['PREFERRED_URL_SCHEME'], app.config['AVATAR_COLORS'], app.config['HTTPCLIENT']) + def _init_app(self, app): + return AVATAR_CLASSES[app.config.get("AVATAR_KIND", "Gravatar")]( + app.config["PREFERRED_URL_SCHEME"], + app.config["AVATAR_COLORS"], + app.config["HTTPCLIENT"], + ) - def __getattr__(self, name): - return getattr(self.state, name, None) + def __getattr__(self, name): + return getattr(self.state, name, None) class BaseAvatar(object): - """ Base class for all avatar implementations. """ - def __init__(self, preferred_url_scheme, colors, http_client): - self.preferred_url_scheme = preferred_url_scheme - self.colors = colors - self.http_client = http_client + """ Base class for all avatar implementations. """ - def get_mail_html(self, name, email_or_id, size=16, kind='user'): - """ Returns the full HTML and CSS for viewing the avatar of the given name and email address, + def __init__(self, preferred_url_scheme, colors, http_client): + self.preferred_url_scheme = preferred_url_scheme + self.colors = colors + self.http_client = http_client + + def get_mail_html(self, name, email_or_id, size=16, kind="user"): + """ Returns the full HTML and CSS for viewing the avatar of the given name and email address, with an optional size. """ - data = self.get_data(name, email_or_id, kind) - url = self._get_url(data['hash'], size) if kind != 'team' else None - font_size = size - 6 + data = self.get_data(name, email_or_id, kind) + url = self._get_url(data["hash"], size) if kind != "team" else None + font_size = size - 6 - if url is not None: - # Try to load the gravatar. If we get a non-404 response, then we use it in place of - # the CSS avatar. - try: - response = self.http_client.get(url, timeout=5) - if response.status_code == 200: - return """%s""" % (url, size, size, kind) - except RequestException: - logger.exception('Could not retrieve avatar for user %s', name) + if url is not None: + # Try to load the gravatar. If we get a non-404 response, then we use it in place of + # the CSS avatar. + try: + response = self.http_client.get(url, timeout=5) + if response.status_code == 200: + return """%s""" % ( + url, + size, + size, + kind, + ) + except RequestException: + logger.exception("Could not retrieve avatar for user %s", name) - radius = '50%' if kind == 'team' else '0%' - letter = 'Ω' if kind == 'team' and data['name'] == 'owners' else data['name'].upper()[0] + radius = "50%" if kind == "team" else "0%" + letter = ( + "Ω" + if kind == "team" and data["name"] == "owners" + else data["name"].upper()[0] + ) - return """ + return """ %s -""" % (size, size, data['color'], font_size, size, radius, letter) +""" % ( + size, + size, + data["color"], + font_size, + size, + radius, + letter, + ) - def get_data_for_user(self, user): - return self.get_data(user.username, user.email, 'robot' if user.robot else 'user') + def get_data_for_user(self, user): + return self.get_data( + user.username, user.email, "robot" if user.robot else "user" + ) - def get_data_for_team(self, team): - return self.get_data(team.name, team.name, 'team') + def get_data_for_team(self, team): + return self.get_data(team.name, team.name, "team") - def get_data_for_org(self, org): - return self.get_data(org.username, org.email, 'org') + def get_data_for_org(self, org): + return self.get_data(org.username, org.email, "org") - def get_data_for_external_user(self, external_user): - return self.get_data(external_user.username, external_user.email, 'user') + def get_data_for_external_user(self, external_user): + return self.get_data(external_user.username, external_user.email, "user") - def get_data(self, name, email_or_id, kind='user'): - """ Computes and returns the full data block for the avatar: + def get_data(self, name, email_or_id, kind="user"): + """ Computes and returns the full data block for the avatar: { 'name': name, 'hash': The gravatar hash, if any. 'color': The color for the avatar } """ - colors = self.colors + colors = self.colors - # Note: email_or_id may be None if gotten from external auth when email is disabled, - # so use the username in that case. - username_email_or_id = email_or_id or name - hash_value = hashlib.md5(username_email_or_id.strip().lower()).hexdigest() + # Note: email_or_id may be None if gotten from external auth when email is disabled, + # so use the username in that case. + username_email_or_id = email_or_id or name + hash_value = hashlib.md5(username_email_or_id.strip().lower()).hexdigest() - byte_count = int(math.ceil(math.log(len(colors), 16))) - byte_data = hash_value[0:byte_count] - hash_color = colors[int(byte_data, 16) % len(colors)] + byte_count = int(math.ceil(math.log(len(colors), 16))) + byte_data = hash_value[0:byte_count] + hash_color = colors[int(byte_data, 16) % len(colors)] - return { - 'name': name, - 'hash': hash_value, - 'color': hash_color, - 'kind': kind - } + return {"name": name, "hash": hash_value, "color": hash_color, "kind": kind} - def _get_url(self, hash_value, size): - """ Returns the URL for displaying the overlay avatar. """ - return None + def _get_url(self, hash_value, size): + """ Returns the URL for displaying the overlay avatar. """ + return None class GravatarAvatar(BaseAvatar): - """ Avatar system that uses gravatar for generating avatars. """ - def _get_url(self, hash_value, size=16): - return '%s://www.gravatar.com/avatar/%s?d=404&size=%s' % (self.preferred_url_scheme, - hash_value, size) + """ Avatar system that uses gravatar for generating avatars. """ + + def _get_url(self, hash_value, size=16): + return "%s://www.gravatar.com/avatar/%s?d=404&size=%s" % ( + self.preferred_url_scheme, + hash_value, + size, + ) + class LocalAvatar(BaseAvatar): - """ Avatar system that uses the local system for generating avatars. """ - pass + """ Avatar system that uses the local system for generating avatars. """ -AVATAR_CLASSES = { - 'gravatar': GravatarAvatar, - 'local': LocalAvatar -} + pass + + +AVATAR_CLASSES = {"gravatar": GravatarAvatar, "local": LocalAvatar} diff --git a/boot.py b/boot.py index 228fb2987..d9c906ab5 100755 --- a/boot.py +++ b/boot.py @@ -16,7 +16,7 @@ from data.model.release import set_region_release from data.model.service_keys import get_service_key from util.config.database import sync_database_with_config from util.generatepresharedkey import generate_key -from _init import CONF_DIR +from _init import CONF_DIR logger = logging.getLogger(__name__) @@ -24,108 +24,117 @@ logger = logging.getLogger(__name__) @lru_cache(maxsize=1) def get_audience(): - audience = app.config.get('JWTPROXY_AUDIENCE') + audience = app.config.get("JWTPROXY_AUDIENCE") - if audience: - return audience + if audience: + return audience - scheme = app.config.get('PREFERRED_URL_SCHEME') - hostname = app.config.get('SERVER_HOSTNAME') + scheme = app.config.get("PREFERRED_URL_SCHEME") + hostname = app.config.get("SERVER_HOSTNAME") - # hostname includes port, use that - if ':' in hostname: - return urlunparse((scheme, hostname, '', '', '', '')) + # hostname includes port, use that + if ":" in hostname: + return urlunparse((scheme, hostname, "", "", "", "")) - # no port, guess based on scheme - if scheme == 'https': - port = '443' - else: - port = '80' + # no port, guess based on scheme + if scheme == "https": + port = "443" + else: + port = "80" - return urlunparse((scheme, hostname + ':' + port, '', '', '', '')) + return urlunparse((scheme, hostname + ":" + port, "", "", "", "")) def _verify_service_key(): - try: - with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION']) as f: - quay_key_id = f.read() - try: - get_service_key(quay_key_id, approved_only=False) - assert os.path.exists(app.config['INSTANCE_SERVICE_KEY_LOCATION']) - return quay_key_id - except ServiceKeyDoesNotExist: - logger.exception('Could not find non-expired existing service key %s; creating a new one', - quay_key_id) - return None + with open(app.config["INSTANCE_SERVICE_KEY_KID_LOCATION"]) as f: + quay_key_id = f.read() - # Found a valid service key, so exiting. - except IOError: - logger.exception('Could not load existing service key; creating a new one') - return None + try: + get_service_key(quay_key_id, approved_only=False) + assert os.path.exists(app.config["INSTANCE_SERVICE_KEY_LOCATION"]) + return quay_key_id + except ServiceKeyDoesNotExist: + logger.exception( + "Could not find non-expired existing service key %s; creating a new one", + quay_key_id, + ) + return None + + # Found a valid service key, so exiting. + except IOError: + logger.exception("Could not load existing service key; creating a new one") + return None def setup_jwt_proxy(): - """ + """ Creates a service key for quay to use in the jwtproxy and generates the JWT proxy configuration. """ - if os.path.exists(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml')): - # Proxy is already setup. Make sure the service key is still valid. - quay_key_id = _verify_service_key() - if quay_key_id is not None: - return + if os.path.exists(os.path.join(CONF_DIR, "jwtproxy_conf.yaml")): + # Proxy is already setup. Make sure the service key is still valid. + quay_key_id = _verify_service_key() + if quay_key_id is not None: + return - # Ensure we have an existing key if in read-only mode. - if app.config.get('REGISTRY_STATE', 'normal') == 'readonly': - quay_key_id = _verify_service_key() - if quay_key_id is None: - raise Exception('No valid service key found for read-only registry.') - else: - # Generate the key for this Quay instance to use. - minutes_until_expiration = app.config.get('INSTANCE_SERVICE_KEY_EXPIRATION', 120) - expiration = datetime.now() + timedelta(minutes=minutes_until_expiration) - quay_key, quay_key_id = generate_key(app.config['INSTANCE_SERVICE_KEY_SERVICE'], - get_audience(), expiration_date=expiration) + # Ensure we have an existing key if in read-only mode. + if app.config.get("REGISTRY_STATE", "normal") == "readonly": + quay_key_id = _verify_service_key() + if quay_key_id is None: + raise Exception("No valid service key found for read-only registry.") + else: + # Generate the key for this Quay instance to use. + minutes_until_expiration = app.config.get( + "INSTANCE_SERVICE_KEY_EXPIRATION", 120 + ) + expiration = datetime.now() + timedelta(minutes=minutes_until_expiration) + quay_key, quay_key_id = generate_key( + app.config["INSTANCE_SERVICE_KEY_SERVICE"], + get_audience(), + expiration_date=expiration, + ) - with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION'], mode='w') as f: - f.truncate(0) - f.write(quay_key_id) + with open(app.config["INSTANCE_SERVICE_KEY_KID_LOCATION"], mode="w") as f: + f.truncate(0) + f.write(quay_key_id) - with open(app.config['INSTANCE_SERVICE_KEY_LOCATION'], mode='w') as f: - f.truncate(0) - f.write(quay_key.exportKey()) + with open(app.config["INSTANCE_SERVICE_KEY_LOCATION"], mode="w") as f: + f.truncate(0) + f.write(quay_key.exportKey()) - # Generate the JWT proxy configuration. - audience = get_audience() - registry = audience + '/keys' - security_issuer = app.config.get('SECURITY_SCANNER_ISSUER_NAME', 'security_scanner') + # Generate the JWT proxy configuration. + audience = get_audience() + registry = audience + "/keys" + security_issuer = app.config.get("SECURITY_SCANNER_ISSUER_NAME", "security_scanner") - with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml.jnj')) as f: - template = Template(f.read()) - rendered = template.render( - conf_dir=CONF_DIR, - audience=audience, - registry=registry, - key_id=quay_key_id, - security_issuer=security_issuer, - service_key_location=app.config['INSTANCE_SERVICE_KEY_LOCATION'], - ) + with open(os.path.join(CONF_DIR, "jwtproxy_conf.yaml.jnj")) as f: + template = Template(f.read()) + rendered = template.render( + conf_dir=CONF_DIR, + audience=audience, + registry=registry, + key_id=quay_key_id, + security_issuer=security_issuer, + service_key_location=app.config["INSTANCE_SERVICE_KEY_LOCATION"], + ) - with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml'), 'w') as f: - f.write(rendered) + with open(os.path.join(CONF_DIR, "jwtproxy_conf.yaml"), "w") as f: + f.write(rendered) def main(): - if not app.config.get('SETUP_COMPLETE', False): - raise Exception('Your configuration bundle is either not mounted or setup has not been completed') + if not app.config.get("SETUP_COMPLETE", False): + raise Exception( + "Your configuration bundle is either not mounted or setup has not been completed" + ) - sync_database_with_config(app.config) - setup_jwt_proxy() + sync_database_with_config(app.config) + setup_jwt_proxy() - # Record deploy - if release.REGION and release.GIT_HEAD: - set_region_release(release.SERVICE, release.REGION, release.GIT_HEAD) + # Record deploy + if release.REGION and release.GIT_HEAD: + set_region_release(release.SERVICE, release.REGION, release.GIT_HEAD) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/buildman/asyncutil.py b/buildman/asyncutil.py index accb13542..f913072c4 100644 --- a/buildman/asyncutil.py +++ b/buildman/asyncutil.py @@ -5,38 +5,39 @@ from trollius import get_event_loop, coroutine def wrap_with_threadpool(obj, worker_threads=1): - """ + """ Wraps a class in an async executor so that it can be safely used in an event loop like trollius. """ - async_executor = ThreadPoolExecutor(worker_threads) - return AsyncWrapper(obj, executor=async_executor), async_executor + async_executor = ThreadPoolExecutor(worker_threads) + return AsyncWrapper(obj, executor=async_executor), async_executor class AsyncWrapper(object): - """ Wrapper class which will transform a syncronous library to one that can be used with + """ Wrapper class which will transform a syncronous library to one that can be used with trollius coroutines. """ - def __init__(self, delegate, loop=None, executor=None): - self._loop = loop if loop is not None else get_event_loop() - self._delegate = delegate - self._executor = executor - def __getattr__(self, attrib): - delegate_attr = getattr(self._delegate, attrib) + def __init__(self, delegate, loop=None, executor=None): + self._loop = loop if loop is not None else get_event_loop() + self._delegate = delegate + self._executor = executor - if not callable(delegate_attr): - return delegate_attr + def __getattr__(self, attrib): + delegate_attr = getattr(self._delegate, attrib) - def wrapper(*args, **kwargs): - """ Wraps the delegate_attr with primitives that will transform sync calls to ones shelled + if not callable(delegate_attr): + return delegate_attr + + def wrapper(*args, **kwargs): + """ Wraps the delegate_attr with primitives that will transform sync calls to ones shelled out to a thread pool. """ - callable_delegate_attr = partial(delegate_attr, *args, **kwargs) - return self._loop.run_in_executor(self._executor, callable_delegate_attr) + callable_delegate_attr = partial(delegate_attr, *args, **kwargs) + return self._loop.run_in_executor(self._executor, callable_delegate_attr) - return wrapper + return wrapper - @coroutine - def __call__(self, *args, **kwargs): - callable_delegate_attr = partial(self._delegate, *args, **kwargs) - return self._loop.run_in_executor(self._executor, callable_delegate_attr) + @coroutine + def __call__(self, *args, **kwargs): + callable_delegate_attr = partial(self._delegate, *args, **kwargs) + return self._loop.run_in_executor(self._executor, callable_delegate_attr) diff --git a/buildman/builder.py b/buildman/builder.py index 0261c262d..8c31da891 100644 --- a/buildman/builder.py +++ b/buildman/builder.py @@ -18,80 +18,104 @@ from raven.conf import setup_logging logger = logging.getLogger(__name__) -BUILD_MANAGERS = { - 'enterprise': EnterpriseManager, - 'ephemeral': EphemeralBuilderManager, -} +BUILD_MANAGERS = {"enterprise": EnterpriseManager, "ephemeral": EphemeralBuilderManager} -EXTERNALLY_MANAGED = 'external' +EXTERNALLY_MANAGED = "external" DEFAULT_WEBSOCKET_PORT = 8787 DEFAULT_CONTROLLER_PORT = 8686 LOG_FORMAT = "%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s" + def run_build_manager(): - if not features.BUILD_SUPPORT: - logger.debug('Building is disabled. Please enable the feature flag') - while True: - time.sleep(1000) - return + if not features.BUILD_SUPPORT: + logger.debug("Building is disabled. Please enable the feature flag") + while True: + time.sleep(1000) + return - if app.config.get('REGISTRY_STATE', 'normal') == 'readonly': - logger.debug('Building is disabled while in read-only mode.') - while True: - time.sleep(1000) - return + if app.config.get("REGISTRY_STATE", "normal") == "readonly": + logger.debug("Building is disabled while in read-only mode.") + while True: + time.sleep(1000) + return - build_manager_config = app.config.get('BUILD_MANAGER') - if build_manager_config is None: - return + build_manager_config = app.config.get("BUILD_MANAGER") + if build_manager_config is None: + return - # If the build system is externally managed, then we just sleep this process. - if build_manager_config[0] == EXTERNALLY_MANAGED: - logger.debug('Builds are externally managed.') - while True: - time.sleep(1000) - return + # If the build system is externally managed, then we just sleep this process. + if build_manager_config[0] == EXTERNALLY_MANAGED: + logger.debug("Builds are externally managed.") + while True: + time.sleep(1000) + return - logger.debug('Asking to start build manager with lifecycle "%s"', build_manager_config[0]) - manager_klass = BUILD_MANAGERS.get(build_manager_config[0]) - if manager_klass is None: - return + logger.debug( + 'Asking to start build manager with lifecycle "%s"', build_manager_config[0] + ) + manager_klass = BUILD_MANAGERS.get(build_manager_config[0]) + if manager_klass is None: + return - manager_hostname = os.environ.get('BUILDMAN_HOSTNAME', - app.config.get('BUILDMAN_HOSTNAME', - app.config['SERVER_HOSTNAME'])) - websocket_port = int(os.environ.get('BUILDMAN_WEBSOCKET_PORT', - app.config.get('BUILDMAN_WEBSOCKET_PORT', - DEFAULT_WEBSOCKET_PORT))) - controller_port = int(os.environ.get('BUILDMAN_CONTROLLER_PORT', - app.config.get('BUILDMAN_CONTROLLER_PORT', - DEFAULT_CONTROLLER_PORT))) + manager_hostname = os.environ.get( + "BUILDMAN_HOSTNAME", + app.config.get("BUILDMAN_HOSTNAME", app.config["SERVER_HOSTNAME"]), + ) + websocket_port = int( + os.environ.get( + "BUILDMAN_WEBSOCKET_PORT", + app.config.get("BUILDMAN_WEBSOCKET_PORT", DEFAULT_WEBSOCKET_PORT), + ) + ) + controller_port = int( + os.environ.get( + "BUILDMAN_CONTROLLER_PORT", + app.config.get("BUILDMAN_CONTROLLER_PORT", DEFAULT_CONTROLLER_PORT), + ) + ) - logger.debug('Will pass buildman hostname %s to builders for websocket connection', - manager_hostname) + logger.debug( + "Will pass buildman hostname %s to builders for websocket connection", + manager_hostname, + ) - logger.debug('Starting build manager with lifecycle "%s"', build_manager_config[0]) - ssl_context = None - if os.environ.get('SSL_CONFIG'): - logger.debug('Loading SSL cert and key') - ssl_context = SSLContext() - ssl_context.load_cert_chain(os.path.join(os.environ.get('SSL_CONFIG'), 'ssl.cert'), - os.path.join(os.environ.get('SSL_CONFIG'), 'ssl.key')) + logger.debug('Starting build manager with lifecycle "%s"', build_manager_config[0]) + ssl_context = None + if os.environ.get("SSL_CONFIG"): + logger.debug("Loading SSL cert and key") + ssl_context = SSLContext() + ssl_context.load_cert_chain( + os.path.join(os.environ.get("SSL_CONFIG"), "ssl.cert"), + os.path.join(os.environ.get("SSL_CONFIG"), "ssl.key"), + ) - server = BuilderServer(app.config['SERVER_HOSTNAME'], dockerfile_build_queue, build_logs, - user_files, manager_klass, build_manager_config[1], manager_hostname) - server.run('0.0.0.0', websocket_port, controller_port, ssl=ssl_context) + server = BuilderServer( + app.config["SERVER_HOSTNAME"], + dockerfile_build_queue, + build_logs, + user_files, + manager_klass, + build_manager_config[1], + manager_hostname, + ) + server.run("0.0.0.0", websocket_port, controller_port, ssl=ssl_context) -if __name__ == '__main__': - logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) - logging.getLogger('peewee').setLevel(logging.WARN) - logging.getLogger('boto').setLevel(logging.WARN) - if app.config.get('EXCEPTION_LOG_TYPE', 'FakeSentry') == 'Sentry': - buildman_name = '%s:buildman' % socket.gethostname() - setup_logging(SentryHandler(app.config.get('SENTRY_DSN', ''), name=buildman_name, - level=logging.ERROR)) +if __name__ == "__main__": + logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) + logging.getLogger("peewee").setLevel(logging.WARN) + logging.getLogger("boto").setLevel(logging.WARN) - run_build_manager() + if app.config.get("EXCEPTION_LOG_TYPE", "FakeSentry") == "Sentry": + buildman_name = "%s:buildman" % socket.gethostname() + setup_logging( + SentryHandler( + app.config.get("SENTRY_DSN", ""), + name=buildman_name, + level=logging.ERROR, + ) + ) + + run_build_manager() diff --git a/buildman/component/basecomponent.py b/buildman/component/basecomponent.py index bd4032776..8806b5629 100644 --- a/buildman/component/basecomponent.py +++ b/buildman/component/basecomponent.py @@ -1,13 +1,15 @@ from autobahn.asyncio.wamp import ApplicationSession -class BaseComponent(ApplicationSession): - """ Base class for all registered component sessions in the server. """ - def __init__(self, config, **kwargs): - ApplicationSession.__init__(self, config) - self.server = None - self.parent_manager = None - self.build_logs = None - self.user_files = None - def kind(self): - raise NotImplementedError \ No newline at end of file +class BaseComponent(ApplicationSession): + """ Base class for all registered component sessions in the server. """ + + def __init__(self, config, **kwargs): + ApplicationSession.__init__(self, config) + self.server = None + self.parent_manager = None + self.build_logs = None + self.user_files = None + + def kind(self): + raise NotImplementedError diff --git a/buildman/component/buildcomponent.py b/buildman/component/buildcomponent.py index 62c64e6b8..9e7b4946a 100644 --- a/buildman/component/buildcomponent.py +++ b/buildman/component/buildcomponent.py @@ -27,513 +27,632 @@ BUILD_HEARTBEAT_DELAY = datetime.timedelta(seconds=30) HEARTBEAT_TIMEOUT = 10 INITIAL_TIMEOUT = 25 -SUPPORTED_WORKER_VERSIONS = ['0.3'] +SUPPORTED_WORKER_VERSIONS = ["0.3"] # Label which marks a manifest with its source build ID. -INTERNAL_LABEL_BUILD_UUID = 'quay.build.uuid' +INTERNAL_LABEL_BUILD_UUID = "quay.build.uuid" logger = logging.getLogger(__name__) + class ComponentStatus(object): - """ ComponentStatus represents the possible states of a component. """ - JOINING = 'joining' - WAITING = 'waiting' - RUNNING = 'running' - BUILDING = 'building' - TIMED_OUT = 'timeout' + """ ComponentStatus represents the possible states of a component. """ + + JOINING = "joining" + WAITING = "waiting" + RUNNING = "running" + BUILDING = "building" + TIMED_OUT = "timeout" + class BuildComponent(BaseComponent): - """ An application session component which conducts one (or more) builds. """ - def __init__(self, config, realm=None, token=None, **kwargs): - self.expected_token = token - self.builder_realm = realm + """ An application session component which conducts one (or more) builds. """ - self.parent_manager = None - self.registry_hostname = None + def __init__(self, config, realm=None, token=None, **kwargs): + self.expected_token = token + self.builder_realm = realm - self._component_status = ComponentStatus.JOINING - self._last_heartbeat = None - self._current_job = None - self._build_status = None - self._image_info = None - self._worker_version = None + self.parent_manager = None + self.registry_hostname = None - BaseComponent.__init__(self, config, **kwargs) + self._component_status = ComponentStatus.JOINING + self._last_heartbeat = None + self._current_job = None + self._build_status = None + self._image_info = None + self._worker_version = None - def kind(self): - return 'builder' + BaseComponent.__init__(self, config, **kwargs) - def onConnect(self): - self.join(self.builder_realm) + def kind(self): + return "builder" - @trollius.coroutine - def onJoin(self, details): - logger.debug('Registering methods and listeners for component %s', self.builder_realm) - yield From(self.register(self._on_ready, u'io.quay.buildworker.ready')) - yield From(self.register(self._determine_cache_tag, u'io.quay.buildworker.determinecachetag')) - yield From(self.register(self._ping, u'io.quay.buildworker.ping')) - yield From(self.register(self._on_log_message, u'io.quay.builder.logmessagesynchronously')) + def onConnect(self): + self.join(self.builder_realm) - yield From(self.subscribe(self._on_heartbeat, u'io.quay.builder.heartbeat')) + @trollius.coroutine + def onJoin(self, details): + logger.debug( + "Registering methods and listeners for component %s", self.builder_realm + ) + yield From(self.register(self._on_ready, u"io.quay.buildworker.ready")) + yield From( + self.register( + self._determine_cache_tag, u"io.quay.buildworker.determinecachetag" + ) + ) + yield From(self.register(self._ping, u"io.quay.buildworker.ping")) + yield From( + self.register( + self._on_log_message, u"io.quay.builder.logmessagesynchronously" + ) + ) - yield From(self._set_status(ComponentStatus.WAITING)) + yield From(self.subscribe(self._on_heartbeat, u"io.quay.builder.heartbeat")) - @trollius.coroutine - def start_build(self, build_job): - """ Starts a build. """ - if self._component_status not in (ComponentStatus.WAITING, ComponentStatus.RUNNING): - logger.debug('Could not start build for component %s (build %s, worker version: %s): %s', - self.builder_realm, build_job.repo_build.uuid, self._worker_version, - self._component_status) - raise Return() + yield From(self._set_status(ComponentStatus.WAITING)) - logger.debug('Starting build for component %s (build %s, worker version: %s)', - self.builder_realm, build_job.repo_build.uuid, self._worker_version) + @trollius.coroutine + def start_build(self, build_job): + """ Starts a build. """ + if self._component_status not in ( + ComponentStatus.WAITING, + ComponentStatus.RUNNING, + ): + logger.debug( + "Could not start build for component %s (build %s, worker version: %s): %s", + self.builder_realm, + build_job.repo_build.uuid, + self._worker_version, + self._component_status, + ) + raise Return() - self._current_job = build_job - self._build_status = StatusHandler(self.build_logs, build_job.repo_build.uuid) - self._image_info = {} + logger.debug( + "Starting build for component %s (build %s, worker version: %s)", + self.builder_realm, + build_job.repo_build.uuid, + self._worker_version, + ) - yield From(self._set_status(ComponentStatus.BUILDING)) + self._current_job = build_job + self._build_status = StatusHandler(self.build_logs, build_job.repo_build.uuid) + self._image_info = {} - # Send the notification that the build has started. - build_job.send_notification('build_start') + yield From(self._set_status(ComponentStatus.BUILDING)) - # Parse the build configuration. - try: - build_config = build_job.build_config - except BuildJobLoadException as irbe: - yield From(self._build_failure('Could not load build job information', irbe)) - raise Return() + # Send the notification that the build has started. + build_job.send_notification("build_start") - base_image_information = {} + # Parse the build configuration. + try: + build_config = build_job.build_config + except BuildJobLoadException as irbe: + yield From( + self._build_failure("Could not load build job information", irbe) + ) + raise Return() - # Add the pull robot information, if any. - if build_job.pull_credentials: - base_image_information['username'] = build_job.pull_credentials.get('username', '') - base_image_information['password'] = build_job.pull_credentials.get('password', '') + base_image_information = {} - # Retrieve the repository's fully qualified name. - repo = build_job.repo_build.repository - repository_name = repo.namespace_user.username + '/' + repo.name + # Add the pull robot information, if any. + if build_job.pull_credentials: + base_image_information["username"] = build_job.pull_credentials.get( + "username", "" + ) + base_image_information["password"] = build_job.pull_credentials.get( + "password", "" + ) - # Parse the build queue item into build arguments. - # build_package: URL to the build package to download and untar/unzip. - # defaults to empty string to avoid requiring a pointer on the builder. - # sub_directory: The location within the build package of the Dockerfile and the build context. - # repository: The repository for which this build is occurring. - # registry: The registry for which this build is occuring (e.g. 'quay.io'). - # pull_token: The token to use when pulling the cache for building. - # push_token: The token to use to push the built image. - # tag_names: The name(s) of the tag(s) for the newly built image. - # base_image: The image name and credentials to use to conduct the base image pull. - # username: The username for pulling the base image (if any). - # password: The password for pulling the base image (if any). - context, dockerfile_path = self.extract_dockerfile_args(build_config) - build_arguments = { - 'build_package': build_job.get_build_package_url(self.user_files), - 'context': context, - 'dockerfile_path': dockerfile_path, - 'repository': repository_name, - 'registry': self.registry_hostname, - 'pull_token': build_job.repo_build.access_token.get_code(), - 'push_token': build_job.repo_build.access_token.get_code(), - 'tag_names': build_config.get('docker_tags', ['latest']), - 'base_image': base_image_information, - } + # Retrieve the repository's fully qualified name. + repo = build_job.repo_build.repository + repository_name = repo.namespace_user.username + "/" + repo.name - # If the trigger has a private key, it's using git, thus we should add - # git data to the build args. - # url: url used to clone the git repository - # sha: the sha1 identifier of the commit to check out - # private_key: the key used to get read access to the git repository + # Parse the build queue item into build arguments. + # build_package: URL to the build package to download and untar/unzip. + # defaults to empty string to avoid requiring a pointer on the builder. + # sub_directory: The location within the build package of the Dockerfile and the build context. + # repository: The repository for which this build is occurring. + # registry: The registry for which this build is occuring (e.g. 'quay.io'). + # pull_token: The token to use when pulling the cache for building. + # push_token: The token to use to push the built image. + # tag_names: The name(s) of the tag(s) for the newly built image. + # base_image: The image name and credentials to use to conduct the base image pull. + # username: The username for pulling the base image (if any). + # password: The password for pulling the base image (if any). + context, dockerfile_path = self.extract_dockerfile_args(build_config) + build_arguments = { + "build_package": build_job.get_build_package_url(self.user_files), + "context": context, + "dockerfile_path": dockerfile_path, + "repository": repository_name, + "registry": self.registry_hostname, + "pull_token": build_job.repo_build.access_token.get_code(), + "push_token": build_job.repo_build.access_token.get_code(), + "tag_names": build_config.get("docker_tags", ["latest"]), + "base_image": base_image_information, + } - # TODO(remove-unenc): Remove legacy field. - private_key = None - if build_job.repo_build.trigger is not None and \ - build_job.repo_build.trigger.secure_private_key is not None: - private_key = build_job.repo_build.trigger.secure_private_key.decrypt() + # If the trigger has a private key, it's using git, thus we should add + # git data to the build args. + # url: url used to clone the git repository + # sha: the sha1 identifier of the commit to check out + # private_key: the key used to get read access to the git repository - if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS) and \ - private_key is None and \ - build_job.repo_build.trigger is not None: - private_key = build_job.repo_build.trigger.private_key + # TODO(remove-unenc): Remove legacy field. + private_key = None + if ( + build_job.repo_build.trigger is not None + and build_job.repo_build.trigger.secure_private_key is not None + ): + private_key = build_job.repo_build.trigger.secure_private_key.decrypt() - if private_key is not None: - build_arguments['git'] = { - 'url': build_config['trigger_metadata'].get('git_url', ''), - 'sha': BuildComponent._commit_sha(build_config), - 'private_key': private_key or '', - } + if ( + ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS) + and private_key is None + and build_job.repo_build.trigger is not None + ): + private_key = build_job.repo_build.trigger.private_key - # If the build args have no buildpack, mark it as a failure before sending - # it to a builder instance. - if not build_arguments['build_package'] and not build_arguments['git']: - logger.error('%s: insufficient build args: %s', - self._current_job.repo_build.uuid, build_arguments) - yield From(self._build_failure('Insufficient build arguments. No buildpack available.')) - raise Return() + if private_key is not None: + build_arguments["git"] = { + "url": build_config["trigger_metadata"].get("git_url", ""), + "sha": BuildComponent._commit_sha(build_config), + "private_key": private_key or "", + } - # Invoke the build. - logger.debug('Invoking build: %s', self.builder_realm) - logger.debug('With Arguments: %s', build_arguments) + # If the build args have no buildpack, mark it as a failure before sending + # it to a builder instance. + if not build_arguments["build_package"] and not build_arguments["git"]: + logger.error( + "%s: insufficient build args: %s", + self._current_job.repo_build.uuid, + build_arguments, + ) + yield From( + self._build_failure( + "Insufficient build arguments. No buildpack available." + ) + ) + raise Return() - def build_complete_callback(result): - """ This function is used to execute a coroutine as the callback. """ - trollius.ensure_future(self._build_complete(result)) + # Invoke the build. + logger.debug("Invoking build: %s", self.builder_realm) + logger.debug("With Arguments: %s", build_arguments) - self.call("io.quay.builder.build", **build_arguments).add_done_callback(build_complete_callback) + def build_complete_callback(result): + """ This function is used to execute a coroutine as the callback. """ + trollius.ensure_future(self._build_complete(result)) - # Set the heartbeat for the future. If the builder never receives the build call, - # then this will cause a timeout after 30 seconds. We know the builder has registered - # by this point, so it makes sense to have a timeout. - self._last_heartbeat = datetime.datetime.utcnow() + BUILD_HEARTBEAT_DELAY + self.call("io.quay.builder.build", **build_arguments).add_done_callback( + build_complete_callback + ) - @staticmethod - def extract_dockerfile_args(build_config): - dockerfile_path = build_config.get('build_subdir', '') - context = build_config.get('context', '') - if not (dockerfile_path == '' or context == ''): - # This should not happen and can be removed when we centralize validating build_config - dockerfile_abspath = slash_join('', dockerfile_path) - if ".." in os.path.relpath(dockerfile_abspath, context): - return os.path.split(dockerfile_path) - dockerfile_path = os.path.relpath(dockerfile_abspath, context) + # Set the heartbeat for the future. If the builder never receives the build call, + # then this will cause a timeout after 30 seconds. We know the builder has registered + # by this point, so it makes sense to have a timeout. + self._last_heartbeat = datetime.datetime.utcnow() + BUILD_HEARTBEAT_DELAY - return context, dockerfile_path + @staticmethod + def extract_dockerfile_args(build_config): + dockerfile_path = build_config.get("build_subdir", "") + context = build_config.get("context", "") + if not (dockerfile_path == "" or context == ""): + # This should not happen and can be removed when we centralize validating build_config + dockerfile_abspath = slash_join("", dockerfile_path) + if ".." in os.path.relpath(dockerfile_abspath, context): + return os.path.split(dockerfile_path) + dockerfile_path = os.path.relpath(dockerfile_abspath, context) - @staticmethod - def _commit_sha(build_config): - """ Determines whether the metadata is using an old schema or not and returns the commit. """ - commit_sha = build_config['trigger_metadata'].get('commit', '') - old_commit_sha = build_config['trigger_metadata'].get('commit_sha', '') - return commit_sha or old_commit_sha + return context, dockerfile_path - @staticmethod - def name_and_path(subdir): - """ Returns the dockerfile path and name """ - if subdir.endswith("/"): - subdir += "Dockerfile" - elif not subdir.endswith("Dockerfile"): - subdir += "/Dockerfile" - return os.path.split(subdir) + @staticmethod + def _commit_sha(build_config): + """ Determines whether the metadata is using an old schema or not and returns the commit. """ + commit_sha = build_config["trigger_metadata"].get("commit", "") + old_commit_sha = build_config["trigger_metadata"].get("commit_sha", "") + return commit_sha or old_commit_sha - @staticmethod - def _total_completion(statuses, total_images): - """ Returns the current amount completion relative to the total completion of a build. """ - percentage_with_sizes = float(len(statuses.values())) / total_images - sent_bytes = sum([status['current'] for status in statuses.values()]) - total_bytes = sum([status['total'] for status in statuses.values()]) - return float(sent_bytes) / total_bytes * percentage_with_sizes + @staticmethod + def name_and_path(subdir): + """ Returns the dockerfile path and name """ + if subdir.endswith("/"): + subdir += "Dockerfile" + elif not subdir.endswith("Dockerfile"): + subdir += "/Dockerfile" + return os.path.split(subdir) - @staticmethod - def _process_pushpull_status(status_dict, current_phase, docker_data, images): - """ Processes the status of a push or pull by updating the provided status_dict and images. """ - if not docker_data: - return + @staticmethod + def _total_completion(statuses, total_images): + """ Returns the current amount completion relative to the total completion of a build. """ + percentage_with_sizes = float(len(statuses.values())) / total_images + sent_bytes = sum([status["current"] for status in statuses.values()]) + total_bytes = sum([status["total"] for status in statuses.values()]) + return float(sent_bytes) / total_bytes * percentage_with_sizes - num_images = 0 - status_completion_key = '' + @staticmethod + def _process_pushpull_status(status_dict, current_phase, docker_data, images): + """ Processes the status of a push or pull by updating the provided status_dict and images. """ + if not docker_data: + return - if current_phase == 'pushing': - status_completion_key = 'push_completion' - num_images = status_dict['total_commands'] - elif current_phase == 'pulling': - status_completion_key = 'pull_completion' - elif current_phase == 'priming-cache': - status_completion_key = 'cache_completion' - else: - return + num_images = 0 + status_completion_key = "" - if 'progressDetail' in docker_data and 'id' in docker_data: - image_id = docker_data['id'] - detail = docker_data['progressDetail'] + if current_phase == "pushing": + status_completion_key = "push_completion" + num_images = status_dict["total_commands"] + elif current_phase == "pulling": + status_completion_key = "pull_completion" + elif current_phase == "priming-cache": + status_completion_key = "cache_completion" + else: + return - if 'current' in detail and 'total' in detail: - images[image_id] = detail - status_dict[status_completion_key] = \ - BuildComponent._total_completion(images, max(len(images), num_images)) + if "progressDetail" in docker_data and "id" in docker_data: + image_id = docker_data["id"] + detail = docker_data["progressDetail"] + if "current" in detail and "total" in detail: + images[image_id] = detail + status_dict[status_completion_key] = BuildComponent._total_completion( + images, max(len(images), num_images) + ) - @trollius.coroutine - def _on_log_message(self, phase, json_data): - """ Tails log messages and updates the build status. """ - # Update the heartbeat. - self._last_heartbeat = datetime.datetime.utcnow() + @trollius.coroutine + def _on_log_message(self, phase, json_data): + """ Tails log messages and updates the build status. """ + # Update the heartbeat. + self._last_heartbeat = datetime.datetime.utcnow() - # Parse any of the JSON data logged. - log_data = {} - if json_data: - try: - log_data = json.loads(json_data) - except ValueError: - pass + # Parse any of the JSON data logged. + log_data = {} + if json_data: + try: + log_data = json.loads(json_data) + except ValueError: + pass - # Extract the current status message (if any). - fully_unwrapped = '' - keys_to_extract = ['error', 'status', 'stream'] - for key in keys_to_extract: - if key in log_data: - fully_unwrapped = log_data[key] - break + # Extract the current status message (if any). + fully_unwrapped = "" + keys_to_extract = ["error", "status", "stream"] + for key in keys_to_extract: + if key in log_data: + fully_unwrapped = log_data[key] + break - # Determine if this is a step string. - current_step = None - current_status_string = str(fully_unwrapped.encode('utf-8')) + # Determine if this is a step string. + current_step = None + current_status_string = str(fully_unwrapped.encode("utf-8")) - if current_status_string and phase == BUILD_PHASE.BUILDING: - current_step = extract_current_step(current_status_string) + if current_status_string and phase == BUILD_PHASE.BUILDING: + current_step = extract_current_step(current_status_string) + + # Parse and update the phase and the status_dict. The status dictionary contains + # the pull/push progress, as well as the current step index. + with self._build_status as status_dict: + try: + changed_phase = yield From( + self._build_status.set_phase(phase, log_data.get("status_data")) + ) + if changed_phase: + logger.debug( + "Build %s has entered a new phase: %s", + self.builder_realm, + phase, + ) + elif self._current_job.repo_build.phase == BUILD_PHASE.CANCELLED: + build_id = self._current_job.repo_build.uuid + logger.debug( + "Trying to move cancelled build into phase: %s with id: %s", + phase, + build_id, + ) + raise Return(False) + except InvalidRepositoryBuildException: + build_id = self._current_job.repo_build.uuid + logger.warning( + "Build %s was not found; repo was probably deleted", build_id + ) + raise Return(False) + + BuildComponent._process_pushpull_status( + status_dict, phase, log_data, self._image_info + ) + + # If the current message represents the beginning of a new step, then update the + # current command index. + if current_step is not None: + status_dict["current_command"] = current_step + + # If the json data contains an error, then something went wrong with a push or pull. + if "error" in log_data: + yield From(self._build_status.set_error(log_data["error"])) + + if current_step is not None: + yield From(self._build_status.set_command(current_status_string)) + elif phase == BUILD_PHASE.BUILDING: + yield From(self._build_status.append_log(current_status_string)) + raise Return(True) + + @trollius.coroutine + def _determine_cache_tag( + self, command_comments, base_image_name, base_image_tag, base_image_id + ): + with self._build_status as status_dict: + status_dict["total_commands"] = len(command_comments) + 1 + + logger.debug( + "Checking cache on realm %s. Base image: %s:%s (%s)", + self.builder_realm, + base_image_name, + base_image_tag, + base_image_id, + ) + + tag_found = self._current_job.determine_cached_tag( + base_image_id, command_comments + ) + raise Return(tag_found or "") + + @trollius.coroutine + def _build_failure(self, error_message, exception=None): + """ Handles and logs a failed build. """ + yield From( + self._build_status.set_error( + error_message, {"internal_error": str(exception) if exception else None} + ) + ) - # Parse and update the phase and the status_dict. The status dictionary contains - # the pull/push progress, as well as the current step index. - with self._build_status as status_dict: - try: - changed_phase = yield From(self._build_status.set_phase(phase, log_data.get('status_data'))) - if changed_phase: - logger.debug('Build %s has entered a new phase: %s', self.builder_realm, phase) - elif self._current_job.repo_build.phase == BUILD_PHASE.CANCELLED: - build_id = self._current_job.repo_build.uuid - logger.debug('Trying to move cancelled build into phase: %s with id: %s', phase, build_id) - raise Return(False) - except InvalidRepositoryBuildException: build_id = self._current_job.repo_build.uuid - logger.warning('Build %s was not found; repo was probably deleted', build_id) - raise Return(False) + logger.warning("Build %s failed with message: %s", build_id, error_message) - BuildComponent._process_pushpull_status(status_dict, phase, log_data, self._image_info) - - # If the current message represents the beginning of a new step, then update the - # current command index. - if current_step is not None: - status_dict['current_command'] = current_step - - # If the json data contains an error, then something went wrong with a push or pull. - if 'error' in log_data: - yield From(self._build_status.set_error(log_data['error'])) - - if current_step is not None: - yield From(self._build_status.set_command(current_status_string)) - elif phase == BUILD_PHASE.BUILDING: - yield From(self._build_status.append_log(current_status_string)) - raise Return(True) - - @trollius.coroutine - def _determine_cache_tag(self, command_comments, base_image_name, base_image_tag, base_image_id): - with self._build_status as status_dict: - status_dict['total_commands'] = len(command_comments) + 1 - - logger.debug('Checking cache on realm %s. Base image: %s:%s (%s)', self.builder_realm, - base_image_name, base_image_tag, base_image_id) - - tag_found = self._current_job.determine_cached_tag(base_image_id, command_comments) - raise Return(tag_found or '') - - @trollius.coroutine - def _build_failure(self, error_message, exception=None): - """ Handles and logs a failed build. """ - yield From(self._build_status.set_error(error_message, { - 'internal_error': str(exception) if exception else None - })) - - build_id = self._current_job.repo_build.uuid - logger.warning('Build %s failed with message: %s', build_id, error_message) - - # Mark that the build has finished (in an error state) - yield From(self._build_finished(BuildJobResult.ERROR)) - - @trollius.coroutine - def _build_complete(self, result): - """ Wraps up a completed build. Handles any errors and calls self._build_finished. """ - build_id = self._current_job.repo_build.uuid - - try: - # Retrieve the result. This will raise an ApplicationError on any error that occurred. - result_value = result.result() - kwargs = {} - - # Note: If we are hitting an older builder that didn't return ANY map data, then the result - # value will be a bool instead of a proper CallResult object. - # Therefore: we have a try-except guard here to ensure we don't hit this pitfall. - try: - kwargs = result_value.kwresults - except: - pass - - try: - yield From(self._build_status.set_phase(BUILD_PHASE.COMPLETE)) - except InvalidRepositoryBuildException: - logger.warning('Build %s was not found; repo was probably deleted', build_id) - raise Return() - - yield From(self._build_finished(BuildJobResult.COMPLETE)) - - # Label the pushed manifests with the build metadata. - manifest_digests = kwargs.get('digests') or [] - repository = registry_model.lookup_repository(self._current_job.namespace, - self._current_job.repo_name) - if repository is not None: - for digest in manifest_digests: - with UseThenDisconnect(app.config): - manifest = registry_model.lookup_manifest_by_digest(repository, digest, - require_available=True) - if manifest is None: - continue - - registry_model.create_manifest_label(manifest, INTERNAL_LABEL_BUILD_UUID, - build_id, 'internal', 'text/plain') - - # Send the notification that the build has completed successfully. - self._current_job.send_notification('build_success', - image_id=kwargs.get('image_id'), - manifest_digests=manifest_digests) - except ApplicationError as aex: - worker_error = WorkerError(aex.error, aex.kwargs.get('base_error')) - - # Write the error to the log. - yield From(self._build_status.set_error(worker_error.public_message(), - worker_error.extra_data(), - internal_error=worker_error.is_internal_error(), - requeued=self._current_job.has_retries_remaining())) - - # Send the notification that the build has failed. - self._current_job.send_notification('build_failure', - error_message=worker_error.public_message()) - - # Mark the build as completed. - if worker_error.is_internal_error(): - logger.exception('[BUILD INTERNAL ERROR: Remote] Build ID: %s: %s', build_id, - worker_error.public_message()) - yield From(self._build_finished(BuildJobResult.INCOMPLETE)) - else: - logger.debug('Got remote failure exception for build %s: %s', build_id, aex) + # Mark that the build has finished (in an error state) yield From(self._build_finished(BuildJobResult.ERROR)) - # Remove the current job. - self._current_job = None + @trollius.coroutine + def _build_complete(self, result): + """ Wraps up a completed build. Handles any errors and calls self._build_finished. """ + build_id = self._current_job.repo_build.uuid + try: + # Retrieve the result. This will raise an ApplicationError on any error that occurred. + result_value = result.result() + kwargs = {} - @trollius.coroutine - def _build_finished(self, job_status): - """ Alerts the parent that a build has completed and sets the status back to running. """ - yield From(self.parent_manager.job_completed(self._current_job, job_status, self)) + # Note: If we are hitting an older builder that didn't return ANY map data, then the result + # value will be a bool instead of a proper CallResult object. + # Therefore: we have a try-except guard here to ensure we don't hit this pitfall. + try: + kwargs = result_value.kwresults + except: + pass - # Set the component back to a running state. - yield From(self._set_status(ComponentStatus.RUNNING)) + try: + yield From(self._build_status.set_phase(BUILD_PHASE.COMPLETE)) + except InvalidRepositoryBuildException: + logger.warning( + "Build %s was not found; repo was probably deleted", build_id + ) + raise Return() - @staticmethod - def _ping(): - """ Ping pong. """ - return 'pong' + yield From(self._build_finished(BuildJobResult.COMPLETE)) - @trollius.coroutine - def _on_ready(self, token, version): - logger.debug('On ready called (token "%s")', token) - self._worker_version = version + # Label the pushed manifests with the build metadata. + manifest_digests = kwargs.get("digests") or [] + repository = registry_model.lookup_repository( + self._current_job.namespace, self._current_job.repo_name + ) + if repository is not None: + for digest in manifest_digests: + with UseThenDisconnect(app.config): + manifest = registry_model.lookup_manifest_by_digest( + repository, digest, require_available=True + ) + if manifest is None: + continue - if not version in SUPPORTED_WORKER_VERSIONS: - logger.warning('Build component (token "%s") is running an out-of-date version: %s', token, - version) - raise Return(False) + registry_model.create_manifest_label( + manifest, + INTERNAL_LABEL_BUILD_UUID, + build_id, + "internal", + "text/plain", + ) - if self._component_status != ComponentStatus.WAITING: - logger.warning('Build component (token "%s") is already connected', self.expected_token) - raise Return(False) + # Send the notification that the build has completed successfully. + self._current_job.send_notification( + "build_success", + image_id=kwargs.get("image_id"), + manifest_digests=manifest_digests, + ) + except ApplicationError as aex: + worker_error = WorkerError(aex.error, aex.kwargs.get("base_error")) - if token != self.expected_token: - logger.warning('Builder token mismatch. Expected: "%s". Found: "%s"', self.expected_token, - token) - raise Return(False) + # Write the error to the log. + yield From( + self._build_status.set_error( + worker_error.public_message(), + worker_error.extra_data(), + internal_error=worker_error.is_internal_error(), + requeued=self._current_job.has_retries_remaining(), + ) + ) - yield From(self._set_status(ComponentStatus.RUNNING)) + # Send the notification that the build has failed. + self._current_job.send_notification( + "build_failure", error_message=worker_error.public_message() + ) - # Start the heartbeat check and updating loop. - loop = trollius.get_event_loop() - loop.create_task(self._heartbeat()) - logger.debug('Build worker %s is connected and ready', self.builder_realm) - raise Return(True) + # Mark the build as completed. + if worker_error.is_internal_error(): + logger.exception( + "[BUILD INTERNAL ERROR: Remote] Build ID: %s: %s", + build_id, + worker_error.public_message(), + ) + yield From(self._build_finished(BuildJobResult.INCOMPLETE)) + else: + logger.debug( + "Got remote failure exception for build %s: %s", build_id, aex + ) + yield From(self._build_finished(BuildJobResult.ERROR)) - @trollius.coroutine - def _set_status(self, phase): - if phase == ComponentStatus.RUNNING: - yield From(self.parent_manager.build_component_ready(self)) + # Remove the current job. + self._current_job = None - self._component_status = phase + @trollius.coroutine + def _build_finished(self, job_status): + """ Alerts the parent that a build has completed and sets the status back to running. """ + yield From( + self.parent_manager.job_completed(self._current_job, job_status, self) + ) - def _on_heartbeat(self): - """ Updates the last known heartbeat. """ - if self._component_status == ComponentStatus.TIMED_OUT: - return + # Set the component back to a running state. + yield From(self._set_status(ComponentStatus.RUNNING)) - logger.debug('Got heartbeat on realm %s', self.builder_realm) - self._last_heartbeat = datetime.datetime.utcnow() + @staticmethod + def _ping(): + """ Ping pong. """ + return "pong" - @trollius.coroutine - def _heartbeat(self): - """ Coroutine that runs every HEARTBEAT_TIMEOUT seconds, both checking the worker's heartbeat + @trollius.coroutine + def _on_ready(self, token, version): + logger.debug('On ready called (token "%s")', token) + self._worker_version = version + + if not version in SUPPORTED_WORKER_VERSIONS: + logger.warning( + 'Build component (token "%s") is running an out-of-date version: %s', + token, + version, + ) + raise Return(False) + + if self._component_status != ComponentStatus.WAITING: + logger.warning( + 'Build component (token "%s") is already connected', self.expected_token + ) + raise Return(False) + + if token != self.expected_token: + logger.warning( + 'Builder token mismatch. Expected: "%s". Found: "%s"', + self.expected_token, + token, + ) + raise Return(False) + + yield From(self._set_status(ComponentStatus.RUNNING)) + + # Start the heartbeat check and updating loop. + loop = trollius.get_event_loop() + loop.create_task(self._heartbeat()) + logger.debug("Build worker %s is connected and ready", self.builder_realm) + raise Return(True) + + @trollius.coroutine + def _set_status(self, phase): + if phase == ComponentStatus.RUNNING: + yield From(self.parent_manager.build_component_ready(self)) + + self._component_status = phase + + def _on_heartbeat(self): + """ Updates the last known heartbeat. """ + if self._component_status == ComponentStatus.TIMED_OUT: + return + + logger.debug("Got heartbeat on realm %s", self.builder_realm) + self._last_heartbeat = datetime.datetime.utcnow() + + @trollius.coroutine + def _heartbeat(self): + """ Coroutine that runs every HEARTBEAT_TIMEOUT seconds, both checking the worker's heartbeat and updating the heartbeat in the build status dictionary (if applicable). This allows the build system to catch crashes from either end. """ - yield From(trollius.sleep(INITIAL_TIMEOUT)) + yield From(trollius.sleep(INITIAL_TIMEOUT)) - while True: - # If the component is no longer running or actively building, nothing more to do. - if (self._component_status != ComponentStatus.RUNNING and - self._component_status != ComponentStatus.BUILDING): - raise Return() + while True: + # If the component is no longer running or actively building, nothing more to do. + if ( + self._component_status != ComponentStatus.RUNNING + and self._component_status != ComponentStatus.BUILDING + ): + raise Return() - # If there is an active build, write the heartbeat to its status. - if self._build_status is not None: - with self._build_status as status_dict: - status_dict['heartbeat'] = int(time.time()) + # If there is an active build, write the heartbeat to its status. + if self._build_status is not None: + with self._build_status as status_dict: + status_dict["heartbeat"] = int(time.time()) - # Mark the build item. - current_job = self._current_job - if current_job is not None: - yield From(self.parent_manager.job_heartbeat(current_job)) + # Mark the build item. + current_job = self._current_job + if current_job is not None: + yield From(self.parent_manager.job_heartbeat(current_job)) - # Check the heartbeat from the worker. - logger.debug('Checking heartbeat on realm %s', self.builder_realm) - if (self._last_heartbeat and - self._last_heartbeat < datetime.datetime.utcnow() - HEARTBEAT_DELTA): - logger.debug('Heartbeat on realm %s has expired: %s', self.builder_realm, - self._last_heartbeat) + # Check the heartbeat from the worker. + logger.debug("Checking heartbeat on realm %s", self.builder_realm) + if ( + self._last_heartbeat + and self._last_heartbeat < datetime.datetime.utcnow() - HEARTBEAT_DELTA + ): + logger.debug( + "Heartbeat on realm %s has expired: %s", + self.builder_realm, + self._last_heartbeat, + ) - yield From(self._timeout()) - raise Return() + yield From(self._timeout()) + raise Return() - logger.debug('Heartbeat on realm %s is valid: %s (%s).', self.builder_realm, - self._last_heartbeat, self._component_status) + logger.debug( + "Heartbeat on realm %s is valid: %s (%s).", + self.builder_realm, + self._last_heartbeat, + self._component_status, + ) - yield From(trollius.sleep(HEARTBEAT_TIMEOUT)) + yield From(trollius.sleep(HEARTBEAT_TIMEOUT)) - @trollius.coroutine - def _timeout(self): - if self._component_status == ComponentStatus.TIMED_OUT: - raise Return() + @trollius.coroutine + def _timeout(self): + if self._component_status == ComponentStatus.TIMED_OUT: + raise Return() - yield From(self._set_status(ComponentStatus.TIMED_OUT)) - logger.warning('Build component with realm %s has timed out', self.builder_realm) + yield From(self._set_status(ComponentStatus.TIMED_OUT)) + logger.warning( + "Build component with realm %s has timed out", self.builder_realm + ) - # If we still have a running job, then it has not completed and we need to tell the parent - # manager. - if self._current_job is not None: - yield From(self._build_status.set_error('Build worker timed out', internal_error=True, - requeued=self._current_job.has_retries_remaining())) + # If we still have a running job, then it has not completed and we need to tell the parent + # manager. + if self._current_job is not None: + yield From( + self._build_status.set_error( + "Build worker timed out", + internal_error=True, + requeued=self._current_job.has_retries_remaining(), + ) + ) - build_id = self._current_job.build_uuid - logger.error('[BUILD INTERNAL ERROR: Timeout] Build ID: %s', build_id) - yield From(self.parent_manager.job_completed(self._current_job, - BuildJobResult.INCOMPLETE, - self)) + build_id = self._current_job.build_uuid + logger.error("[BUILD INTERNAL ERROR: Timeout] Build ID: %s", build_id) + yield From( + self.parent_manager.job_completed( + self._current_job, BuildJobResult.INCOMPLETE, self + ) + ) - # Unregister the current component so that it cannot be invoked again. - self.parent_manager.build_component_disposed(self, True) + # Unregister the current component so that it cannot be invoked again. + self.parent_manager.build_component_disposed(self, True) - # Remove the job reference. - self._current_job = None + # Remove the job reference. + self._current_job = None - @trollius.coroutine - def cancel_build(self): - self.parent_manager.build_component_disposed(self, True) - self._current_job = None - yield From(self._set_status(ComponentStatus.RUNNING)) + @trollius.coroutine + def cancel_build(self): + self.parent_manager.build_component_disposed(self, True) + self._current_job = None + yield From(self._set_status(ComponentStatus.RUNNING)) diff --git a/buildman/component/buildparse.py b/buildman/component/buildparse.py index 3560c0861..18d678cae 100644 --- a/buildman/component/buildparse.py +++ b/buildman/component/buildparse.py @@ -1,15 +1,16 @@ import re + def extract_current_step(current_status_string): - """ Attempts to extract the current step numeric identifier from the given status string. Returns the step + """ Attempts to extract the current step numeric identifier from the given status string. Returns the step number or None if none. """ - # Older format: `Step 12 :` - # Newer format: `Step 4/13 :` - step_increment = re.search(r'Step ([0-9]+)/([0-9]+) :', current_status_string) - if step_increment: - return int(step_increment.group(1)) + # Older format: `Step 12 :` + # Newer format: `Step 4/13 :` + step_increment = re.search(r"Step ([0-9]+)/([0-9]+) :", current_status_string) + if step_increment: + return int(step_increment.group(1)) - step_increment = re.search(r'Step ([0-9]+) :', current_status_string) - if step_increment: - return int(step_increment.group(1)) + step_increment = re.search(r"Step ([0-9]+) :", current_status_string) + if step_increment: + return int(step_increment.group(1)) diff --git a/buildman/component/test/test_buildcomponent.py b/buildman/component/test/test_buildcomponent.py index c4e026916..98d70dab0 100644 --- a/buildman/component/test/test_buildcomponent.py +++ b/buildman/component/test/test_buildcomponent.py @@ -3,34 +3,62 @@ import pytest from buildman.component.buildcomponent import BuildComponent -@pytest.mark.parametrize('input,expected_path,expected_file', [ - ("", "/", "Dockerfile"), - ("/", "/", "Dockerfile"), - ("/Dockerfile", "/", "Dockerfile"), - ("/server.Dockerfile", "/", "server.Dockerfile"), - ("/somepath", "/somepath", "Dockerfile"), - ("/somepath/", "/somepath", "Dockerfile"), - ("/somepath/Dockerfile", "/somepath", "Dockerfile"), - ("/somepath/server.Dockerfile", "/somepath", "server.Dockerfile"), - ("/somepath/some_other_path", "/somepath/some_other_path", "Dockerfile"), - ("/somepath/some_other_path/", "/somepath/some_other_path", "Dockerfile"), - ("/somepath/some_other_path/Dockerfile", "/somepath/some_other_path", "Dockerfile"), - ("/somepath/some_other_path/server.Dockerfile", "/somepath/some_other_path", "server.Dockerfile"), -]) +@pytest.mark.parametrize( + "input,expected_path,expected_file", + [ + ("", "/", "Dockerfile"), + ("/", "/", "Dockerfile"), + ("/Dockerfile", "/", "Dockerfile"), + ("/server.Dockerfile", "/", "server.Dockerfile"), + ("/somepath", "/somepath", "Dockerfile"), + ("/somepath/", "/somepath", "Dockerfile"), + ("/somepath/Dockerfile", "/somepath", "Dockerfile"), + ("/somepath/server.Dockerfile", "/somepath", "server.Dockerfile"), + ("/somepath/some_other_path", "/somepath/some_other_path", "Dockerfile"), + ("/somepath/some_other_path/", "/somepath/some_other_path", "Dockerfile"), + ( + "/somepath/some_other_path/Dockerfile", + "/somepath/some_other_path", + "Dockerfile", + ), + ( + "/somepath/some_other_path/server.Dockerfile", + "/somepath/some_other_path", + "server.Dockerfile", + ), + ], +) def test_path_is_dockerfile(input, expected_path, expected_file): - actual_path, actual_file = BuildComponent.name_and_path(input) - assert actual_path == expected_path - assert actual_file == expected_file + actual_path, actual_file = BuildComponent.name_and_path(input) + assert actual_path == expected_path + assert actual_file == expected_file -@pytest.mark.parametrize('build_config,context,dockerfile_path', [ - ({}, '', ''), - ({'build_subdir': '/builddir/Dockerfile'}, '', '/builddir/Dockerfile'), - ({'context': '/builddir'}, '/builddir', ''), - ({'context': '/builddir', 'build_subdir': '/builddir/Dockerfile'}, '/builddir', 'Dockerfile'), - ({'context': '/some_other_dir/Dockerfile', 'build_subdir': '/builddir/Dockerfile'}, '/builddir', 'Dockerfile'), - ({'context': '/', 'build_subdir':'Dockerfile'}, '/', 'Dockerfile') -]) + +@pytest.mark.parametrize( + "build_config,context,dockerfile_path", + [ + ({}, "", ""), + ({"build_subdir": "/builddir/Dockerfile"}, "", "/builddir/Dockerfile"), + ({"context": "/builddir"}, "/builddir", ""), + ( + {"context": "/builddir", "build_subdir": "/builddir/Dockerfile"}, + "/builddir", + "Dockerfile", + ), + ( + { + "context": "/some_other_dir/Dockerfile", + "build_subdir": "/builddir/Dockerfile", + }, + "/builddir", + "Dockerfile", + ), + ({"context": "/", "build_subdir": "Dockerfile"}, "/", "Dockerfile"), + ], +) def test_extract_dockerfile_args(build_config, context, dockerfile_path): - actual_context, actual_dockerfile_path = BuildComponent.extract_dockerfile_args(build_config) - assert context == actual_context - assert dockerfile_path == actual_dockerfile_path + actual_context, actual_dockerfile_path = BuildComponent.extract_dockerfile_args( + build_config + ) + assert context == actual_context + assert dockerfile_path == actual_dockerfile_path diff --git a/buildman/component/test/test_buildparse.py b/buildman/component/test/test_buildparse.py index 3bdb7295e..e40b20189 100644 --- a/buildman/component/test/test_buildparse.py +++ b/buildman/component/test/test_buildparse.py @@ -3,14 +3,17 @@ import pytest from buildman.component.buildparse import extract_current_step -@pytest.mark.parametrize('input,expected_step', [ - ("", None), - ("Step a :", None), - ("Step 1 :", 1), - ("Step 1 : ", 1), - ("Step 1/2 : ", 1), - ("Step 2/17 : ", 2), - ("Step 4/13 : ARG somearg=foo", 4), -]) +@pytest.mark.parametrize( + "input,expected_step", + [ + ("", None), + ("Step a :", None), + ("Step 1 :", 1), + ("Step 1 : ", 1), + ("Step 1/2 : ", 1), + ("Step 2/17 : ", 2), + ("Step 4/13 : ARG somearg=foo", 4), + ], +) def test_extract_current_step(input, expected_step): - assert extract_current_step(input) == expected_step + assert extract_current_step(input) == expected_step diff --git a/buildman/enums.py b/buildman/enums.py index f88d2b690..a7fe7bb99 100644 --- a/buildman/enums.py +++ b/buildman/enums.py @@ -1,21 +1,25 @@ from data.database import BUILD_PHASE + class BuildJobResult(object): - """ Build job result enum """ - INCOMPLETE = 'incomplete' - COMPLETE = 'complete' - ERROR = 'error' + """ Build job result enum """ + + INCOMPLETE = "incomplete" + COMPLETE = "complete" + ERROR = "error" class BuildServerStatus(object): - """ Build server status enum """ - STARTING = 'starting' - RUNNING = 'running' - SHUTDOWN = 'shutting_down' - EXCEPTION = 'exception' + """ Build server status enum """ + + STARTING = "starting" + RUNNING = "running" + SHUTDOWN = "shutting_down" + EXCEPTION = "exception" + RESULT_PHASES = { - BuildJobResult.INCOMPLETE: BUILD_PHASE.INTERNAL_ERROR, - BuildJobResult.COMPLETE: BUILD_PHASE.COMPLETE, - BuildJobResult.ERROR: BUILD_PHASE.ERROR, + BuildJobResult.INCOMPLETE: BUILD_PHASE.INTERNAL_ERROR, + BuildJobResult.COMPLETE: BUILD_PHASE.COMPLETE, + BuildJobResult.ERROR: BUILD_PHASE.ERROR, } diff --git a/buildman/jobutil/buildjob.py b/buildman/jobutil/buildjob.py index f245ce2bf..8ffbe5cd8 100644 --- a/buildman/jobutil/buildjob.py +++ b/buildman/jobutil/buildjob.py @@ -14,170 +14,196 @@ logger = logging.getLogger(__name__) class BuildJobLoadException(Exception): - """ Exception raised if a build job could not be instantiated for some reason. """ - pass + """ Exception raised if a build job could not be instantiated for some reason. """ + + pass class BuildJob(object): - """ Represents a single in-progress build job. """ - def __init__(self, job_item): - self.job_item = job_item + """ Represents a single in-progress build job. """ - try: - self.job_details = json.loads(job_item.body) - self.build_notifier = BuildJobNotifier(self.build_uuid) - except ValueError: - raise BuildJobLoadException( - 'Could not parse build queue item config with ID %s' % self.job_details['build_uuid'] - ) + def __init__(self, job_item): + self.job_item = job_item - @property - def retries_remaining(self): - return self.job_item.retries_remaining + try: + self.job_details = json.loads(job_item.body) + self.build_notifier = BuildJobNotifier(self.build_uuid) + except ValueError: + raise BuildJobLoadException( + "Could not parse build queue item config with ID %s" + % self.job_details["build_uuid"] + ) - def has_retries_remaining(self): - return self.job_item.retries_remaining > 0 + @property + def retries_remaining(self): + return self.job_item.retries_remaining - def send_notification(self, kind, error_message=None, image_id=None, manifest_digests=None): - self.build_notifier.send_notification(kind, error_message, image_id, manifest_digests) + def has_retries_remaining(self): + return self.job_item.retries_remaining > 0 - @lru_cache(maxsize=1) - def _load_repo_build(self): - with UseThenDisconnect(app.config): - try: - return model.build.get_repository_build(self.build_uuid) - except model.InvalidRepositoryBuildException: - raise BuildJobLoadException( - 'Could not load repository build with ID %s' % self.build_uuid) + def send_notification( + self, kind, error_message=None, image_id=None, manifest_digests=None + ): + self.build_notifier.send_notification( + kind, error_message, image_id, manifest_digests + ) - @property - def build_uuid(self): - """ Returns the unique UUID for this build job. """ - return self.job_details['build_uuid'] + @lru_cache(maxsize=1) + def _load_repo_build(self): + with UseThenDisconnect(app.config): + try: + return model.build.get_repository_build(self.build_uuid) + except model.InvalidRepositoryBuildException: + raise BuildJobLoadException( + "Could not load repository build with ID %s" % self.build_uuid + ) - @property - def namespace(self): - """ Returns the namespace under which this build is running. """ - return self.repo_build.repository.namespace_user.username + @property + def build_uuid(self): + """ Returns the unique UUID for this build job. """ + return self.job_details["build_uuid"] - @property - def repo_name(self): - """ Returns the name of the repository under which this build is running. """ - return self.repo_build.repository.name + @property + def namespace(self): + """ Returns the namespace under which this build is running. """ + return self.repo_build.repository.namespace_user.username - @property - def repo_build(self): - return self._load_repo_build() + @property + def repo_name(self): + """ Returns the name of the repository under which this build is running. """ + return self.repo_build.repository.name - def get_build_package_url(self, user_files): - """ Returns the URL of the build package for this build, if any or empty string if none. """ - archive_url = self.build_config.get('archive_url', None) - if archive_url: - return archive_url + @property + def repo_build(self): + return self._load_repo_build() - if not self.repo_build.resource_key: - return '' + def get_build_package_url(self, user_files): + """ Returns the URL of the build package for this build, if any or empty string if none. """ + archive_url = self.build_config.get("archive_url", None) + if archive_url: + return archive_url - return user_files.get_file_url(self.repo_build.resource_key, '127.0.0.1', requires_cors=False) + if not self.repo_build.resource_key: + return "" - @property - def pull_credentials(self): - """ Returns the pull credentials for this job, or None if none. """ - return self.job_details.get('pull_credentials') + return user_files.get_file_url( + self.repo_build.resource_key, "127.0.0.1", requires_cors=False + ) - @property - def build_config(self): - try: - return json.loads(self.repo_build.job_config) - except ValueError: - raise BuildJobLoadException( - 'Could not parse repository build job config with ID %s' % self.job_details['build_uuid'] - ) + @property + def pull_credentials(self): + """ Returns the pull credentials for this job, or None if none. """ + return self.job_details.get("pull_credentials") - def determine_cached_tag(self, base_image_id=None, cache_comments=None): - """ Returns the tag to pull to prime the cache or None if none. """ - cached_tag = self._determine_cached_tag_by_tag() - logger.debug('Determined cached tag %s for %s: %s', cached_tag, base_image_id, cache_comments) - return cached_tag + @property + def build_config(self): + try: + return json.loads(self.repo_build.job_config) + except ValueError: + raise BuildJobLoadException( + "Could not parse repository build job config with ID %s" + % self.job_details["build_uuid"] + ) - def _determine_cached_tag_by_tag(self): - """ Determines the cached tag by looking for one of the tags being built, and seeing if it + def determine_cached_tag(self, base_image_id=None, cache_comments=None): + """ Returns the tag to pull to prime the cache or None if none. """ + cached_tag = self._determine_cached_tag_by_tag() + logger.debug( + "Determined cached tag %s for %s: %s", + cached_tag, + base_image_id, + cache_comments, + ) + return cached_tag + + def _determine_cached_tag_by_tag(self): + """ Determines the cached tag by looking for one of the tags being built, and seeing if it exists in the repository. This is a fallback for when no comment information is available. """ - with UseThenDisconnect(app.config): - tags = self.build_config.get('docker_tags', ['latest']) - repository = RepositoryReference.for_repo_obj(self.repo_build.repository) - matching_tag = registry_model.find_matching_tag(repository, tags) - if matching_tag is not None: - return matching_tag.name + with UseThenDisconnect(app.config): + tags = self.build_config.get("docker_tags", ["latest"]) + repository = RepositoryReference.for_repo_obj(self.repo_build.repository) + matching_tag = registry_model.find_matching_tag(repository, tags) + if matching_tag is not None: + return matching_tag.name - most_recent_tag = registry_model.get_most_recent_tag(repository) - if most_recent_tag is not None: - return most_recent_tag.name + most_recent_tag = registry_model.get_most_recent_tag(repository) + if most_recent_tag is not None: + return most_recent_tag.name - return None + return None class BuildJobNotifier(object): - """ A class for sending notifications to a job that only relies on the build_uuid """ + """ A class for sending notifications to a job that only relies on the build_uuid """ - def __init__(self, build_uuid): - self.build_uuid = build_uuid + def __init__(self, build_uuid): + self.build_uuid = build_uuid - @property - def repo_build(self): - return self._load_repo_build() + @property + def repo_build(self): + return self._load_repo_build() - @lru_cache(maxsize=1) - def _load_repo_build(self): - try: - return model.build.get_repository_build(self.build_uuid) - except model.InvalidRepositoryBuildException: - raise BuildJobLoadException( - 'Could not load repository build with ID %s' % self.build_uuid) + @lru_cache(maxsize=1) + def _load_repo_build(self): + try: + return model.build.get_repository_build(self.build_uuid) + except model.InvalidRepositoryBuildException: + raise BuildJobLoadException( + "Could not load repository build with ID %s" % self.build_uuid + ) - @property - def build_config(self): - try: - return json.loads(self.repo_build.job_config) - except ValueError: - raise BuildJobLoadException( - 'Could not parse repository build job config with ID %s' % self.repo_build.uuid - ) + @property + def build_config(self): + try: + return json.loads(self.repo_build.job_config) + except ValueError: + raise BuildJobLoadException( + "Could not parse repository build job config with ID %s" + % self.repo_build.uuid + ) - def send_notification(self, kind, error_message=None, image_id=None, manifest_digests=None): - with UseThenDisconnect(app.config): - tags = self.build_config.get('docker_tags', ['latest']) - trigger = self.repo_build.trigger - if trigger is not None and trigger.id is not None: - trigger_kind = trigger.service.name - else: - trigger_kind = None + def send_notification( + self, kind, error_message=None, image_id=None, manifest_digests=None + ): + with UseThenDisconnect(app.config): + tags = self.build_config.get("docker_tags", ["latest"]) + trigger = self.repo_build.trigger + if trigger is not None and trigger.id is not None: + trigger_kind = trigger.service.name + else: + trigger_kind = None - event_data = { - 'build_id': self.repo_build.uuid, - 'build_name': self.repo_build.display_name, - 'docker_tags': tags, - 'trigger_id': trigger.uuid if trigger is not None else None, - 'trigger_kind': trigger_kind, - 'trigger_metadata': self.build_config.get('trigger_metadata', {}) - } + event_data = { + "build_id": self.repo_build.uuid, + "build_name": self.repo_build.display_name, + "docker_tags": tags, + "trigger_id": trigger.uuid if trigger is not None else None, + "trigger_kind": trigger_kind, + "trigger_metadata": self.build_config.get("trigger_metadata", {}), + } - if image_id is not None: - event_data['image_id'] = image_id + if image_id is not None: + event_data["image_id"] = image_id - if manifest_digests: - event_data['manifest_digests'] = manifest_digests + if manifest_digests: + event_data["manifest_digests"] = manifest_digests - if error_message is not None: - event_data['error_message'] = error_message + if error_message is not None: + event_data["error_message"] = error_message - # TODO: remove when more endpoints have been converted to using - # interfaces - repo = AttrDict({ - 'namespace_name': self.repo_build.repository.namespace_user.username, - 'name': self.repo_build.repository.name, - }) - spawn_notification(repo, kind, event_data, - subpage='build/%s' % self.repo_build.uuid, - pathargs=['build', self.repo_build.uuid]) + # TODO: remove when more endpoints have been converted to using + # interfaces + repo = AttrDict( + { + "namespace_name": self.repo_build.repository.namespace_user.username, + "name": self.repo_build.repository.name, + } + ) + spawn_notification( + repo, + kind, + event_data, + subpage="build/%s" % self.repo_build.uuid, + pathargs=["build", self.repo_build.uuid], + ) diff --git a/buildman/jobutil/buildstatus.py b/buildman/jobutil/buildstatus.py index 662dbaa10..f7bf4a767 100644 --- a/buildman/jobutil/buildstatus.py +++ b/buildman/jobutil/buildstatus.py @@ -13,76 +13,94 @@ logger = logging.getLogger(__name__) class StatusHandler(object): - """ Context wrapper for writing status to build logs. """ + """ Context wrapper for writing status to build logs. """ - def __init__(self, build_logs, repository_build_uuid): - self._current_phase = None - self._current_command = None - self._uuid = repository_build_uuid - self._build_logs = AsyncWrapper(build_logs) - self._sync_build_logs = build_logs - self._build_model = AsyncWrapper(model.build) + def __init__(self, build_logs, repository_build_uuid): + self._current_phase = None + self._current_command = None + self._uuid = repository_build_uuid + self._build_logs = AsyncWrapper(build_logs) + self._sync_build_logs = build_logs + self._build_model = AsyncWrapper(model.build) - self._status = { - 'total_commands': 0, - 'current_command': None, - 'push_completion': 0.0, - 'pull_completion': 0.0, - } + self._status = { + "total_commands": 0, + "current_command": None, + "push_completion": 0.0, + "pull_completion": 0.0, + } - # Write the initial status. - self.__exit__(None, None, None) + # Write the initial status. + self.__exit__(None, None, None) - @coroutine - def _append_log_message(self, log_message, log_type=None, log_data=None): - log_data = log_data or {} - log_data['datetime'] = str(datetime.datetime.now()) + @coroutine + def _append_log_message(self, log_message, log_type=None, log_data=None): + log_data = log_data or {} + log_data["datetime"] = str(datetime.datetime.now()) - try: - yield From(self._build_logs.append_log_message(self._uuid, log_message, log_type, log_data)) - except RedisError: - logger.exception('Could not save build log for build %s: %s', self._uuid, log_message) + try: + yield From( + self._build_logs.append_log_message( + self._uuid, log_message, log_type, log_data + ) + ) + except RedisError: + logger.exception( + "Could not save build log for build %s: %s", self._uuid, log_message + ) - @coroutine - def append_log(self, log_message, extra_data=None): - if log_message is None: - return + @coroutine + def append_log(self, log_message, extra_data=None): + if log_message is None: + return - yield From(self._append_log_message(log_message, log_data=extra_data)) + yield From(self._append_log_message(log_message, log_data=extra_data)) - @coroutine - def set_command(self, command, extra_data=None): - if self._current_command == command: - raise Return() + @coroutine + def set_command(self, command, extra_data=None): + if self._current_command == command: + raise Return() - self._current_command = command - yield From(self._append_log_message(command, self._build_logs.COMMAND, extra_data)) + self._current_command = command + yield From( + self._append_log_message(command, self._build_logs.COMMAND, extra_data) + ) - @coroutine - def set_error(self, error_message, extra_data=None, internal_error=False, requeued=False): - error_phase = BUILD_PHASE.INTERNAL_ERROR if internal_error and requeued else BUILD_PHASE.ERROR - yield From(self.set_phase(error_phase)) + @coroutine + def set_error( + self, error_message, extra_data=None, internal_error=False, requeued=False + ): + error_phase = ( + BUILD_PHASE.INTERNAL_ERROR + if internal_error and requeued + else BUILD_PHASE.ERROR + ) + yield From(self.set_phase(error_phase)) - extra_data = extra_data or {} - extra_data['internal_error'] = internal_error - yield From(self._append_log_message(error_message, self._build_logs.ERROR, extra_data)) + extra_data = extra_data or {} + extra_data["internal_error"] = internal_error + yield From( + self._append_log_message(error_message, self._build_logs.ERROR, extra_data) + ) - @coroutine - def set_phase(self, phase, extra_data=None): - if phase == self._current_phase: - raise Return(False) + @coroutine + def set_phase(self, phase, extra_data=None): + if phase == self._current_phase: + raise Return(False) - self._current_phase = phase - yield From(self._append_log_message(phase, self._build_logs.PHASE, extra_data)) + self._current_phase = phase + yield From(self._append_log_message(phase, self._build_logs.PHASE, extra_data)) - # Update the repository build with the new phase - raise Return(self._build_model.update_phase_then_close(self._uuid, phase)) + # Update the repository build with the new phase + raise Return(self._build_model.update_phase_then_close(self._uuid, phase)) - def __enter__(self): - return self._status + def __enter__(self): + return self._status - def __exit__(self, exc_type, value, traceback): - try: - self._sync_build_logs.set_status(self._uuid, self._status) - except RedisError: - logger.exception('Could not set status of build %s to %s', self._uuid, self._status) + def __exit__(self, exc_type, value, traceback): + try: + self._sync_build_logs.set_status(self._uuid, self._status) + except RedisError: + logger.exception( + "Could not set status of build %s to %s", self._uuid, self._status + ) diff --git a/buildman/jobutil/workererror.py b/buildman/jobutil/workererror.py index 9245f312e..111ffad2d 100644 --- a/buildman/jobutil/workererror.py +++ b/buildman/jobutil/workererror.py @@ -1,119 +1,99 @@ class WorkerError(object): - """ Helper class which represents errors raised by a build worker. """ - def __init__(self, error_code, base_message=None): - self._error_code = error_code - self._base_message = base_message + """ Helper class which represents errors raised by a build worker. """ - self._error_handlers = { - 'io.quay.builder.buildpackissue': { - 'message': 'Could not load build package', - 'is_internal': True, - }, + def __init__(self, error_code, base_message=None): + self._error_code = error_code + self._base_message = base_message - 'io.quay.builder.gitfailure': { - 'message': 'Could not clone git repository', - 'show_base_error': True, - }, + self._error_handlers = { + "io.quay.builder.buildpackissue": { + "message": "Could not load build package", + "is_internal": True, + }, + "io.quay.builder.gitfailure": { + "message": "Could not clone git repository", + "show_base_error": True, + }, + "io.quay.builder.gitcheckout": { + "message": "Could not checkout git ref. If you force pushed recently, " + + "the commit may be missing.", + "show_base_error": True, + }, + "io.quay.builder.cannotextractbuildpack": { + "message": "Could not extract the contents of the build package" + }, + "io.quay.builder.cannotpullforcache": { + "message": "Could not pull cached image", + "is_internal": True, + }, + "io.quay.builder.dockerfileissue": { + "message": "Could not find or parse Dockerfile", + "show_base_error": True, + }, + "io.quay.builder.cannotpullbaseimage": { + "message": "Could not pull base image", + "show_base_error": True, + }, + "io.quay.builder.internalerror": { + "message": "An internal error occurred while building. Please submit a ticket.", + "is_internal": True, + }, + "io.quay.builder.buildrunerror": { + "message": "Could not start the build process", + "is_internal": True, + }, + "io.quay.builder.builderror": { + "message": "A build step failed", + "show_base_error": True, + }, + "io.quay.builder.tagissue": { + "message": "Could not tag built image", + "is_internal": True, + }, + "io.quay.builder.pushissue": { + "message": "Could not push built image", + "show_base_error": True, + "is_internal": True, + }, + "io.quay.builder.dockerconnecterror": { + "message": "Could not connect to Docker daemon", + "is_internal": True, + }, + "io.quay.builder.missingorinvalidargument": { + "message": "Missing required arguments for builder", + "is_internal": True, + }, + "io.quay.builder.cachelookupissue": { + "message": "Error checking for a cached tag", + "is_internal": True, + }, + "io.quay.builder.errorduringphasetransition": { + "message": "Error during phase transition. If this problem persists " + + "please contact customer support.", + "is_internal": True, + }, + "io.quay.builder.clientrejectedtransition": { + "message": "Build can not be finished due to user cancellation." + }, + } - 'io.quay.builder.gitcheckout': { - 'message': 'Could not checkout git ref. If you force pushed recently, ' + - 'the commit may be missing.', - 'show_base_error': True, - }, + def is_internal_error(self): + handler = self._error_handlers.get(self._error_code) + return handler.get("is_internal", False) if handler else True - 'io.quay.builder.cannotextractbuildpack': { - 'message': 'Could not extract the contents of the build package' - }, + def public_message(self): + handler = self._error_handlers.get(self._error_code) + if not handler: + return "An unknown error occurred" - 'io.quay.builder.cannotpullforcache': { - 'message': 'Could not pull cached image', - 'is_internal': True - }, + message = handler["message"] + if handler.get("show_base_error", False) and self._base_message: + message = message + ": " + self._base_message - 'io.quay.builder.dockerfileissue': { - 'message': 'Could not find or parse Dockerfile', - 'show_base_error': True - }, + return message - 'io.quay.builder.cannotpullbaseimage': { - 'message': 'Could not pull base image', - 'show_base_error': True - }, + def extra_data(self): + if self._base_message: + return {"base_error": self._base_message, "error_code": self._error_code} - 'io.quay.builder.internalerror': { - 'message': 'An internal error occurred while building. Please submit a ticket.', - 'is_internal': True - }, - - 'io.quay.builder.buildrunerror': { - 'message': 'Could not start the build process', - 'is_internal': True - }, - - 'io.quay.builder.builderror': { - 'message': 'A build step failed', - 'show_base_error': True - }, - - 'io.quay.builder.tagissue': { - 'message': 'Could not tag built image', - 'is_internal': True - }, - - 'io.quay.builder.pushissue': { - 'message': 'Could not push built image', - 'show_base_error': True, - 'is_internal': True - }, - - 'io.quay.builder.dockerconnecterror': { - 'message': 'Could not connect to Docker daemon', - 'is_internal': True - }, - - 'io.quay.builder.missingorinvalidargument': { - 'message': 'Missing required arguments for builder', - 'is_internal': True - }, - - 'io.quay.builder.cachelookupissue': { - 'message': 'Error checking for a cached tag', - 'is_internal': True - }, - - 'io.quay.builder.errorduringphasetransition': { - 'message': 'Error during phase transition. If this problem persists ' + - 'please contact customer support.', - 'is_internal': True - }, - - 'io.quay.builder.clientrejectedtransition': { - 'message': 'Build can not be finished due to user cancellation.', - } - } - - def is_internal_error(self): - handler = self._error_handlers.get(self._error_code) - return handler.get('is_internal', False) if handler else True - - def public_message(self): - handler = self._error_handlers.get(self._error_code) - if not handler: - return 'An unknown error occurred' - - message = handler['message'] - if handler.get('show_base_error', False) and self._base_message: - message = message + ': ' + self._base_message - - return message - - def extra_data(self): - if self._base_message: - return { - 'base_error': self._base_message, - 'error_code': self._error_code - } - - return { - 'error_code': self._error_code - } + return {"error_code": self._error_code} diff --git a/buildman/manager/basemanager.py b/buildman/manager/basemanager.py index 23627830a..996a4eacc 100644 --- a/buildman/manager/basemanager.py +++ b/buildman/manager/basemanager.py @@ -1,71 +1,80 @@ from trollius import coroutine + class BaseManager(object): - """ Base for all worker managers. """ - def __init__(self, register_component, unregister_component, job_heartbeat_callback, - job_complete_callback, manager_hostname, heartbeat_period_sec): - self.register_component = register_component - self.unregister_component = unregister_component - self.job_heartbeat_callback = job_heartbeat_callback - self.job_complete_callback = job_complete_callback - self.manager_hostname = manager_hostname - self.heartbeat_period_sec = heartbeat_period_sec + """ Base for all worker managers. """ - @coroutine - def job_heartbeat(self, build_job): - """ Method invoked to tell the manager that a job is still running. This method will be called + def __init__( + self, + register_component, + unregister_component, + job_heartbeat_callback, + job_complete_callback, + manager_hostname, + heartbeat_period_sec, + ): + self.register_component = register_component + self.unregister_component = unregister_component + self.job_heartbeat_callback = job_heartbeat_callback + self.job_complete_callback = job_complete_callback + self.manager_hostname = manager_hostname + self.heartbeat_period_sec = heartbeat_period_sec + + @coroutine + def job_heartbeat(self, build_job): + """ Method invoked to tell the manager that a job is still running. This method will be called every few minutes. """ - self.job_heartbeat_callback(build_job) + self.job_heartbeat_callback(build_job) - def overall_setup_time(self): - """ Returns the number of seconds that the build system should wait before allowing the job + def overall_setup_time(self): + """ Returns the number of seconds that the build system should wait before allowing the job to be picked up again after called 'schedule'. """ - raise NotImplementedError + raise NotImplementedError - def shutdown(self): - """ Indicates that the build controller server is in a shutdown state and that no new jobs + def shutdown(self): + """ Indicates that the build controller server is in a shutdown state and that no new jobs or workers should be performed. Existing workers should be cleaned up once their jobs have completed """ - raise NotImplementedError + raise NotImplementedError - @coroutine - def schedule(self, build_job): - """ Schedules a queue item to be built. Returns a 2-tuple with (True, None) if the item was + @coroutine + def schedule(self, build_job): + """ Schedules a queue item to be built. Returns a 2-tuple with (True, None) if the item was properly scheduled and (False, a retry timeout in seconds) if all workers are busy or an error occurs. """ - raise NotImplementedError + raise NotImplementedError - def initialize(self, manager_config): - """ Runs any initialization code for the manager. Called once the server is in a ready state. + def initialize(self, manager_config): + """ Runs any initialization code for the manager. Called once the server is in a ready state. """ - raise NotImplementedError + raise NotImplementedError - @coroutine - def build_component_ready(self, build_component): - """ Method invoked whenever a build component announces itself as ready. + @coroutine + def build_component_ready(self, build_component): + """ Method invoked whenever a build component announces itself as ready. """ - raise NotImplementedError + raise NotImplementedError - def build_component_disposed(self, build_component, timed_out): - """ Method invoked whenever a build component has been disposed. The timed_out boolean indicates + def build_component_disposed(self, build_component, timed_out): + """ Method invoked whenever a build component has been disposed. The timed_out boolean indicates whether the component's heartbeat timed out. """ - raise NotImplementedError + raise NotImplementedError - @coroutine - def job_completed(self, build_job, job_status, build_component): - """ Method invoked once a job_item has completed, in some manner. The job_status will be + @coroutine + def job_completed(self, build_job, job_status, build_component): + """ Method invoked once a job_item has completed, in some manner. The job_status will be one of: incomplete, error, complete. Implementations of this method should call coroutine self.job_complete_callback with a status of Incomplete if they wish for the job to be automatically requeued. """ - raise NotImplementedError + raise NotImplementedError - def num_workers(self): - """ Returns the number of active build workers currently registered. This includes those + def num_workers(self): + """ Returns the number of active build workers currently registered. This includes those that are currently busy and awaiting more work. """ - raise NotImplementedError + raise NotImplementedError diff --git a/buildman/manager/buildcanceller.py b/buildman/manager/buildcanceller.py index dd49e9f38..c2ab2d9ad 100644 --- a/buildman/manager/buildcanceller.py +++ b/buildman/manager/buildcanceller.py @@ -5,23 +5,23 @@ from buildman.manager.noop_canceller import NoopCanceller logger = logging.getLogger(__name__) -CANCELLERS = {'ephemeral': OrchestratorCanceller} +CANCELLERS = {"ephemeral": OrchestratorCanceller} class BuildCanceller(object): - """ A class to manage cancelling a build """ + """ A class to manage cancelling a build """ - def __init__(self, app=None): - self.build_manager_config = app.config.get('BUILD_MANAGER') - if app is None or self.build_manager_config is None: - self.handler = NoopCanceller() - else: - self.handler = None + def __init__(self, app=None): + self.build_manager_config = app.config.get("BUILD_MANAGER") + if app is None or self.build_manager_config is None: + self.handler = NoopCanceller() + else: + self.handler = None - def try_cancel_build(self, uuid): - """ A method to kill a running build """ - if self.handler is None: - canceller = CANCELLERS.get(self.build_manager_config[0], NoopCanceller) - self.handler = canceller(self.build_manager_config[1]) + def try_cancel_build(self, uuid): + """ A method to kill a running build """ + if self.handler is None: + canceller = CANCELLERS.get(self.build_manager_config[0], NoopCanceller) + self.handler = canceller(self.build_manager_config[1]) - return self.handler.try_cancel_build(uuid) + return self.handler.try_cancel_build(uuid) diff --git a/buildman/manager/enterprise.py b/buildman/manager/enterprise.py index 3d32a61d0..0be01269c 100644 --- a/buildman/manager/enterprise.py +++ b/buildman/manager/enterprise.py @@ -7,86 +7,89 @@ from buildman.manager.basemanager import BaseManager from trollius import From, Return, coroutine -REGISTRATION_REALM = 'registration' +REGISTRATION_REALM = "registration" RETRY_TIMEOUT = 5 logger = logging.getLogger(__name__) + class DynamicRegistrationComponent(BaseComponent): - """ Component session that handles dynamic registration of the builder components. """ + """ Component session that handles dynamic registration of the builder components. """ - def onConnect(self): - self.join(REGISTRATION_REALM) + def onConnect(self): + self.join(REGISTRATION_REALM) - def onJoin(self, details): - logger.debug('Registering registration method') - yield From(self.register(self._worker_register, u'io.quay.buildworker.register')) + def onJoin(self, details): + logger.debug("Registering registration method") + yield From( + self.register(self._worker_register, u"io.quay.buildworker.register") + ) - def _worker_register(self): - realm = self.parent_manager.add_build_component() - logger.debug('Registering new build component+worker with realm %s', realm) - return realm + def _worker_register(self): + realm = self.parent_manager.add_build_component() + logger.debug("Registering new build component+worker with realm %s", realm) + return realm - def kind(self): - return 'registration' + def kind(self): + return "registration" class EnterpriseManager(BaseManager): - """ Build manager implementation for the Enterprise Registry. """ + """ Build manager implementation for the Enterprise Registry. """ - def __init__(self, *args, **kwargs): - self.ready_components = set() - self.all_components = set() - self.shutting_down = False + def __init__(self, *args, **kwargs): + self.ready_components = set() + self.all_components = set() + self.shutting_down = False - super(EnterpriseManager, self).__init__(*args, **kwargs) + super(EnterpriseManager, self).__init__(*args, **kwargs) - def initialize(self, manager_config): - # Add a component which is used by build workers for dynamic registration. Unlike - # production, build workers in enterprise are long-lived and register dynamically. - self.register_component(REGISTRATION_REALM, DynamicRegistrationComponent) + def initialize(self, manager_config): + # Add a component which is used by build workers for dynamic registration. Unlike + # production, build workers in enterprise are long-lived and register dynamically. + self.register_component(REGISTRATION_REALM, DynamicRegistrationComponent) - def overall_setup_time(self): - # Builders are already registered, so the setup time should be essentially instant. We therefore - # only return a minute here. - return 60 + def overall_setup_time(self): + # Builders are already registered, so the setup time should be essentially instant. We therefore + # only return a minute here. + return 60 - def add_build_component(self): - """ Adds a new build component for an Enterprise Registry. """ - # Generate a new unique realm ID for the build worker. - realm = str(uuid.uuid4()) - new_component = self.register_component(realm, BuildComponent, token="") - self.all_components.add(new_component) - return realm + def add_build_component(self): + """ Adds a new build component for an Enterprise Registry. """ + # Generate a new unique realm ID for the build worker. + realm = str(uuid.uuid4()) + new_component = self.register_component(realm, BuildComponent, token="") + self.all_components.add(new_component) + return realm - @coroutine - def schedule(self, build_job): - """ Schedules a build for an Enterprise Registry. """ - if self.shutting_down or not self.ready_components: - raise Return(False, RETRY_TIMEOUT) + @coroutine + def schedule(self, build_job): + """ Schedules a build for an Enterprise Registry. """ + if self.shutting_down or not self.ready_components: + raise Return(False, RETRY_TIMEOUT) - component = self.ready_components.pop() + component = self.ready_components.pop() - yield From(component.start_build(build_job)) + yield From(component.start_build(build_job)) - raise Return(True, None) + raise Return(True, None) - @coroutine - def build_component_ready(self, build_component): - self.ready_components.add(build_component) + @coroutine + def build_component_ready(self, build_component): + self.ready_components.add(build_component) - def shutdown(self): - self.shutting_down = True + def shutdown(self): + self.shutting_down = True - @coroutine - def job_completed(self, build_job, job_status, build_component): - yield From(self.job_complete_callback(build_job, job_status)) + @coroutine + def job_completed(self, build_job, job_status, build_component): + yield From(self.job_complete_callback(build_job, job_status)) - def build_component_disposed(self, build_component, timed_out): - self.all_components.remove(build_component) - if build_component in self.ready_components: - self.ready_components.remove(build_component) + def build_component_disposed(self, build_component, timed_out): + self.all_components.remove(build_component) + if build_component in self.ready_components: + self.ready_components.remove(build_component) - self.unregister_component(build_component) + self.unregister_component(build_component) - def num_workers(self): - return len(self.all_components) + def num_workers(self): + return len(self.all_components) diff --git a/buildman/manager/etcd_canceller.py b/buildman/manager/etcd_canceller.py index ce92a1bbc..d4b129e52 100644 --- a/buildman/manager/etcd_canceller.py +++ b/buildman/manager/etcd_canceller.py @@ -5,33 +5,36 @@ logger = logging.getLogger(__name__) class EtcdCanceller(object): - """ A class that sends a message to etcd to cancel a build """ + """ A class that sends a message to etcd to cancel a build """ - def __init__(self, config): - etcd_host = config.get('ETCD_HOST', '127.0.0.1') - etcd_port = config.get('ETCD_PORT', 2379) - etcd_ca_cert = config.get('ETCD_CA_CERT', None) - etcd_auth = config.get('ETCD_CERT_AND_KEY', None) - if etcd_auth is not None: - etcd_auth = tuple(etcd_auth) + def __init__(self, config): + etcd_host = config.get("ETCD_HOST", "127.0.0.1") + etcd_port = config.get("ETCD_PORT", 2379) + etcd_ca_cert = config.get("ETCD_CA_CERT", None) + etcd_auth = config.get("ETCD_CERT_AND_KEY", None) + if etcd_auth is not None: + etcd_auth = tuple(etcd_auth) - etcd_protocol = 'http' if etcd_auth is None else 'https' - logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port) - self._cancel_prefix = config.get('ETCD_CANCEL_PREFIX', 'cancel/') - self._etcd_client = etcd.Client( - host=etcd_host, - port=etcd_port, - cert=etcd_auth, - ca_cert=etcd_ca_cert, - protocol=etcd_protocol, - read_timeout=5) + etcd_protocol = "http" if etcd_auth is None else "https" + logger.debug("Connecting to etcd on %s:%s", etcd_host, etcd_port) + self._cancel_prefix = config.get("ETCD_CANCEL_PREFIX", "cancel/") + self._etcd_client = etcd.Client( + host=etcd_host, + port=etcd_port, + cert=etcd_auth, + ca_cert=etcd_ca_cert, + protocol=etcd_protocol, + read_timeout=5, + ) - def try_cancel_build(self, build_uuid): - """ Writes etcd message to cancel build_uuid. """ - logger.info("Cancelling build %s".format(build_uuid)) - try: - self._etcd_client.write("{}{}".format(self._cancel_prefix, build_uuid), build_uuid, ttl=60) - return True - except etcd.EtcdException: - logger.exception("Failed to write to etcd client %s", build_uuid) - return False + def try_cancel_build(self, build_uuid): + """ Writes etcd message to cancel build_uuid. """ + logger.info("Cancelling build %s".format(build_uuid)) + try: + self._etcd_client.write( + "{}{}".format(self._cancel_prefix, build_uuid), build_uuid, ttl=60 + ) + return True + except etcd.EtcdException: + logger.exception("Failed to write to etcd client %s", build_uuid) + return False diff --git a/buildman/manager/executor.py b/buildman/manager/executor.py index e82d7a316..7921adbcc 100644 --- a/buildman/manager/executor.py +++ b/buildman/manager/executor.py @@ -29,532 +29,605 @@ from _init import ROOT_DIR logger = logging.getLogger(__name__) -ONE_HOUR = 60*60 +ONE_HOUR = 60 * 60 -_TAG_RETRY_COUNT = 3 # Number of times to retry adding tags. -_TAG_RETRY_SLEEP = 2 # Number of seconds to wait between tag retries. +_TAG_RETRY_COUNT = 3 # Number of times to retry adding tags. +_TAG_RETRY_SLEEP = 2 # Number of seconds to wait between tag retries. ENV = Environment(loader=FileSystemLoader(os.path.join(ROOT_DIR, "buildman/templates"))) -TEMPLATE = ENV.get_template('cloudconfig.yaml') +TEMPLATE = ENV.get_template("cloudconfig.yaml") CloudConfigContext().populate_jinja_environment(ENV) + class ExecutorException(Exception): - """ Exception raised when there is a problem starting or stopping a builder. + """ Exception raised when there is a problem starting or stopping a builder. """ - pass + + pass class BuilderExecutor(object): - def __init__(self, executor_config, manager_hostname): - """ Interface which can be plugged into the EphemeralNodeManager to provide a strategy for + def __init__(self, executor_config, manager_hostname): + """ Interface which can be plugged into the EphemeralNodeManager to provide a strategy for starting and stopping builders. """ - self.executor_config = executor_config - self.manager_hostname = manager_hostname + self.executor_config = executor_config + self.manager_hostname = manager_hostname - default_websocket_scheme = 'wss' if app.config['PREFERRED_URL_SCHEME'] == 'https' else 'ws' - self.websocket_scheme = executor_config.get("WEBSOCKET_SCHEME", default_websocket_scheme) + default_websocket_scheme = ( + "wss" if app.config["PREFERRED_URL_SCHEME"] == "https" else "ws" + ) + self.websocket_scheme = executor_config.get( + "WEBSOCKET_SCHEME", default_websocket_scheme + ) - @property - def name(self): - """ Name returns the unique name for this executor. """ - return self.executor_config.get('NAME') or self.__class__.__name__ + @property + def name(self): + """ Name returns the unique name for this executor. """ + return self.executor_config.get("NAME") or self.__class__.__name__ - @property - def setup_time(self): - """ Returns the amount of time (in seconds) to wait for the execution to start for the build. + @property + def setup_time(self): + """ Returns the amount of time (in seconds) to wait for the execution to start for the build. If None, the manager's default will be used. """ - return self.executor_config.get('SETUP_TIME') + return self.executor_config.get("SETUP_TIME") - @coroutine - def start_builder(self, realm, token, build_uuid): - """ Create a builder with the specified config. Returns a unique id which can be used to manage + @coroutine + def start_builder(self, realm, token, build_uuid): + """ Create a builder with the specified config. Returns a unique id which can be used to manage the builder. """ - raise NotImplementedError + raise NotImplementedError - @coroutine - def stop_builder(self, builder_id): - """ Stop a builder which is currently running. + @coroutine + def stop_builder(self, builder_id): + """ Stop a builder which is currently running. """ - raise NotImplementedError + raise NotImplementedError - def allowed_for_namespace(self, namespace): - """ Returns true if this executor can be used for builds in the given namespace. """ + def allowed_for_namespace(self, namespace): + """ Returns true if this executor can be used for builds in the given namespace. """ - # Check for an explicit namespace whitelist. - namespace_whitelist = self.executor_config.get('NAMESPACE_WHITELIST') - if namespace_whitelist is not None and namespace in namespace_whitelist: - return True + # Check for an explicit namespace whitelist. + namespace_whitelist = self.executor_config.get("NAMESPACE_WHITELIST") + if namespace_whitelist is not None and namespace in namespace_whitelist: + return True - # Check for a staged rollout percentage. If found, we hash the namespace and, if it is found - # in the first X% of the character space, we allow this executor to be used. - staged_rollout = self.executor_config.get('STAGED_ROLLOUT') - if staged_rollout is not None: - bucket = int(hashlib.sha256(namespace).hexdigest()[-2:], 16) - return bucket < (256 * staged_rollout) + # Check for a staged rollout percentage. If found, we hash the namespace and, if it is found + # in the first X% of the character space, we allow this executor to be used. + staged_rollout = self.executor_config.get("STAGED_ROLLOUT") + if staged_rollout is not None: + bucket = int(hashlib.sha256(namespace).hexdigest()[-2:], 16) + return bucket < (256 * staged_rollout) - # If there are no restrictions in place, we are free to use this executor. - return staged_rollout is None and namespace_whitelist is None + # If there are no restrictions in place, we are free to use this executor. + return staged_rollout is None and namespace_whitelist is None - @property - def minimum_retry_threshold(self): - """ Returns the minimum number of retries required for this executor to be used or 0 if + @property + def minimum_retry_threshold(self): + """ Returns the minimum number of retries required for this executor to be used or 0 if none. """ - return self.executor_config.get('MINIMUM_RETRY_THRESHOLD', 0) + return self.executor_config.get("MINIMUM_RETRY_THRESHOLD", 0) - def generate_cloud_config(self, realm, token, build_uuid, coreos_channel, - manager_hostname, quay_username=None, - quay_password=None): - if quay_username is None: - quay_username = self.executor_config['QUAY_USERNAME'] + def generate_cloud_config( + self, + realm, + token, + build_uuid, + coreos_channel, + manager_hostname, + quay_username=None, + quay_password=None, + ): + if quay_username is None: + quay_username = self.executor_config["QUAY_USERNAME"] - if quay_password is None: - quay_password = self.executor_config['QUAY_PASSWORD'] + if quay_password is None: + quay_password = self.executor_config["QUAY_PASSWORD"] - return TEMPLATE.render( - realm=realm, - token=token, - build_uuid=build_uuid, - quay_username=quay_username, - quay_password=quay_password, - manager_hostname=manager_hostname, - websocket_scheme=self.websocket_scheme, - coreos_channel=coreos_channel, - worker_image=self.executor_config.get('WORKER_IMAGE', 'quay.io/coreos/registry-build-worker'), - worker_tag=self.executor_config['WORKER_TAG'], - logentries_token=self.executor_config.get('LOGENTRIES_TOKEN', None), - volume_size=self.executor_config.get('VOLUME_SIZE', '42G'), - max_lifetime_s=self.executor_config.get('MAX_LIFETIME_S', 10800), - ssh_authorized_keys=self.executor_config.get('SSH_AUTHORIZED_KEYS', []), - ) + return TEMPLATE.render( + realm=realm, + token=token, + build_uuid=build_uuid, + quay_username=quay_username, + quay_password=quay_password, + manager_hostname=manager_hostname, + websocket_scheme=self.websocket_scheme, + coreos_channel=coreos_channel, + worker_image=self.executor_config.get( + "WORKER_IMAGE", "quay.io/coreos/registry-build-worker" + ), + worker_tag=self.executor_config["WORKER_TAG"], + logentries_token=self.executor_config.get("LOGENTRIES_TOKEN", None), + volume_size=self.executor_config.get("VOLUME_SIZE", "42G"), + max_lifetime_s=self.executor_config.get("MAX_LIFETIME_S", 10800), + ssh_authorized_keys=self.executor_config.get("SSH_AUTHORIZED_KEYS", []), + ) class EC2Executor(BuilderExecutor): - """ Implementation of BuilderExecutor which uses libcloud to start machines on a variety of cloud + """ Implementation of BuilderExecutor which uses libcloud to start machines on a variety of cloud providers. """ - COREOS_STACK_URL = 'http://%s.release.core-os.net/amd64-usr/current/coreos_production_ami_hvm.txt' - def __init__(self, *args, **kwargs): - self._loop = get_event_loop() - super(EC2Executor, self).__init__(*args, **kwargs) - - def _get_conn(self): - """ Creates an ec2 connection which can be used to manage instances. - """ - return AsyncWrapper(boto.ec2.connect_to_region( - self.executor_config['EC2_REGION'], - aws_access_key_id=self.executor_config['AWS_ACCESS_KEY'], - aws_secret_access_key=self.executor_config['AWS_SECRET_KEY'], - )) - - @classmethod - @cachetools.func.ttl_cache(ttl=ONE_HOUR) - def _get_coreos_ami(cls, ec2_region, coreos_channel): - """ Retrieve the CoreOS AMI id from the canonical listing. - """ - stack_list_string = requests.get(EC2Executor.COREOS_STACK_URL % coreos_channel).text - stack_amis = dict([stack.split('=') for stack in stack_list_string.split('|')]) - return stack_amis[ec2_region] - - @coroutine - @duration_collector_async(metric_queue.builder_time_to_start, ['ec2']) - def start_builder(self, realm, token, build_uuid): - region = self.executor_config['EC2_REGION'] - channel = self.executor_config.get('COREOS_CHANNEL', 'stable') - - coreos_ami = self.executor_config.get('COREOS_AMI', None) - if coreos_ami is None: - get_ami_callable = partial(self._get_coreos_ami, region, channel) - coreos_ami = yield From(self._loop.run_in_executor(None, get_ami_callable)) - - user_data = self.generate_cloud_config(realm, token, build_uuid, channel, self.manager_hostname) - logger.debug('Generated cloud config for build %s: %s', build_uuid, user_data) - - ec2_conn = self._get_conn() - - ssd_root_ebs = boto.ec2.blockdevicemapping.BlockDeviceType( - size=int(self.executor_config.get('BLOCK_DEVICE_SIZE', 48)), - volume_type='gp2', - delete_on_termination=True, + COREOS_STACK_URL = ( + "http://%s.release.core-os.net/amd64-usr/current/coreos_production_ami_hvm.txt" ) - block_devices = boto.ec2.blockdevicemapping.BlockDeviceMapping() - block_devices['/dev/xvda'] = ssd_root_ebs - interfaces = None - if self.executor_config.get('EC2_VPC_SUBNET_ID', None) is not None: - interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( - subnet_id=self.executor_config['EC2_VPC_SUBNET_ID'], - groups=self.executor_config['EC2_SECURITY_GROUP_IDS'], - associate_public_ip_address=True, - ) - interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) + def __init__(self, *args, **kwargs): + self._loop = get_event_loop() + super(EC2Executor, self).__init__(*args, **kwargs) - try: - reservation = yield From(ec2_conn.run_instances( - coreos_ami, - instance_type=self.executor_config['EC2_INSTANCE_TYPE'], - key_name=self.executor_config.get('EC2_KEY_NAME', None), - user_data=user_data, - instance_initiated_shutdown_behavior='terminate', - block_device_map=block_devices, - network_interfaces=interfaces, - )) - except boto.exception.EC2ResponseError as ec2e: - logger.exception('Unable to spawn builder instance') - metric_queue.ephemeral_build_worker_failure.Inc() - raise ec2e + def _get_conn(self): + """ Creates an ec2 connection which can be used to manage instances. + """ + return AsyncWrapper( + boto.ec2.connect_to_region( + self.executor_config["EC2_REGION"], + aws_access_key_id=self.executor_config["AWS_ACCESS_KEY"], + aws_secret_access_key=self.executor_config["AWS_SECRET_KEY"], + ) + ) - if not reservation.instances: - raise ExecutorException('Unable to spawn builder instance.') - elif len(reservation.instances) != 1: - raise ExecutorException('EC2 started wrong number of instances!') + @classmethod + @cachetools.func.ttl_cache(ttl=ONE_HOUR) + def _get_coreos_ami(cls, ec2_region, coreos_channel): + """ Retrieve the CoreOS AMI id from the canonical listing. + """ + stack_list_string = requests.get( + EC2Executor.COREOS_STACK_URL % coreos_channel + ).text + stack_amis = dict([stack.split("=") for stack in stack_list_string.split("|")]) + return stack_amis[ec2_region] - launched = AsyncWrapper(reservation.instances[0]) + @coroutine + @duration_collector_async(metric_queue.builder_time_to_start, ["ec2"]) + def start_builder(self, realm, token, build_uuid): + region = self.executor_config["EC2_REGION"] + channel = self.executor_config.get("COREOS_CHANNEL", "stable") - # Sleep a few seconds to wait for AWS to spawn the instance. - yield From(trollius.sleep(_TAG_RETRY_SLEEP)) + coreos_ami = self.executor_config.get("COREOS_AMI", None) + if coreos_ami is None: + get_ami_callable = partial(self._get_coreos_ami, region, channel) + coreos_ami = yield From(self._loop.run_in_executor(None, get_ami_callable)) - # Tag the instance with its metadata. - for i in range(0, _TAG_RETRY_COUNT): - try: - yield From(launched.add_tags({ - 'Name': 'Quay Ephemeral Builder', - 'Realm': realm, - 'Token': token, - 'BuildUUID': build_uuid, - })) - except boto.exception.EC2ResponseError as ec2e: - if ec2e.error_code == 'InvalidInstanceID.NotFound': - if i < _TAG_RETRY_COUNT - 1: - logger.warning('Failed to write EC2 tags for instance %s for build %s (attempt #%s)', - launched.id, build_uuid, i) - yield From(trollius.sleep(_TAG_RETRY_SLEEP)) - continue + user_data = self.generate_cloud_config( + realm, token, build_uuid, channel, self.manager_hostname + ) + logger.debug("Generated cloud config for build %s: %s", build_uuid, user_data) - raise ExecutorException('Unable to find builder instance.') + ec2_conn = self._get_conn() - logger.exception('Failed to write EC2 tags (attempt #%s)', i) + ssd_root_ebs = boto.ec2.blockdevicemapping.BlockDeviceType( + size=int(self.executor_config.get("BLOCK_DEVICE_SIZE", 48)), + volume_type="gp2", + delete_on_termination=True, + ) + block_devices = boto.ec2.blockdevicemapping.BlockDeviceMapping() + block_devices["/dev/xvda"] = ssd_root_ebs - logger.debug('Machine with ID %s started for build %s', launched.id, build_uuid) - raise Return(launched.id) + interfaces = None + if self.executor_config.get("EC2_VPC_SUBNET_ID", None) is not None: + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + subnet_id=self.executor_config["EC2_VPC_SUBNET_ID"], + groups=self.executor_config["EC2_SECURITY_GROUP_IDS"], + associate_public_ip_address=True, + ) + interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) - @coroutine - def stop_builder(self, builder_id): - try: - ec2_conn = self._get_conn() - terminated_instances = yield From(ec2_conn.terminate_instances([builder_id])) - except boto.exception.EC2ResponseError as ec2e: - if ec2e.error_code == 'InvalidInstanceID.NotFound': - logger.debug('Instance %s already terminated', builder_id) - return + try: + reservation = yield From( + ec2_conn.run_instances( + coreos_ami, + instance_type=self.executor_config["EC2_INSTANCE_TYPE"], + key_name=self.executor_config.get("EC2_KEY_NAME", None), + user_data=user_data, + instance_initiated_shutdown_behavior="terminate", + block_device_map=block_devices, + network_interfaces=interfaces, + ) + ) + except boto.exception.EC2ResponseError as ec2e: + logger.exception("Unable to spawn builder instance") + metric_queue.ephemeral_build_worker_failure.Inc() + raise ec2e - logger.exception('Exception when trying to terminate instance %s', builder_id) - raise + if not reservation.instances: + raise ExecutorException("Unable to spawn builder instance.") + elif len(reservation.instances) != 1: + raise ExecutorException("EC2 started wrong number of instances!") - if builder_id not in [si.id for si in terminated_instances]: - raise ExecutorException('Unable to terminate instance: %s' % builder_id) + launched = AsyncWrapper(reservation.instances[0]) + + # Sleep a few seconds to wait for AWS to spawn the instance. + yield From(trollius.sleep(_TAG_RETRY_SLEEP)) + + # Tag the instance with its metadata. + for i in range(0, _TAG_RETRY_COUNT): + try: + yield From( + launched.add_tags( + { + "Name": "Quay Ephemeral Builder", + "Realm": realm, + "Token": token, + "BuildUUID": build_uuid, + } + ) + ) + except boto.exception.EC2ResponseError as ec2e: + if ec2e.error_code == "InvalidInstanceID.NotFound": + if i < _TAG_RETRY_COUNT - 1: + logger.warning( + "Failed to write EC2 tags for instance %s for build %s (attempt #%s)", + launched.id, + build_uuid, + i, + ) + yield From(trollius.sleep(_TAG_RETRY_SLEEP)) + continue + + raise ExecutorException("Unable to find builder instance.") + + logger.exception("Failed to write EC2 tags (attempt #%s)", i) + + logger.debug("Machine with ID %s started for build %s", launched.id, build_uuid) + raise Return(launched.id) + + @coroutine + def stop_builder(self, builder_id): + try: + ec2_conn = self._get_conn() + terminated_instances = yield From( + ec2_conn.terminate_instances([builder_id]) + ) + except boto.exception.EC2ResponseError as ec2e: + if ec2e.error_code == "InvalidInstanceID.NotFound": + logger.debug("Instance %s already terminated", builder_id) + return + + logger.exception( + "Exception when trying to terminate instance %s", builder_id + ) + raise + + if builder_id not in [si.id for si in terminated_instances]: + raise ExecutorException("Unable to terminate instance: %s" % builder_id) class PopenExecutor(BuilderExecutor): - """ Implementation of BuilderExecutor which uses Popen to fork a quay-builder process. + """ Implementation of BuilderExecutor which uses Popen to fork a quay-builder process. """ - def __init__(self, executor_config, manager_hostname): - self._jobs = {} - super(PopenExecutor, self).__init__(executor_config, manager_hostname) + def __init__(self, executor_config, manager_hostname): + self._jobs = {} - """ Executor which uses Popen to fork a quay-builder process. + super(PopenExecutor, self).__init__(executor_config, manager_hostname) + + """ Executor which uses Popen to fork a quay-builder process. """ - @coroutine - @duration_collector_async(metric_queue.builder_time_to_start, ['fork']) - def start_builder(self, realm, token, build_uuid): - # Now start a machine for this job, adding the machine id to the etcd information - logger.debug('Forking process for build') - ws_host = os.environ.get("BUILDMAN_WS_HOST", "localhost") - ws_port = os.environ.get("BUILDMAN_WS_PORT", "8787") - builder_env = { - 'TOKEN': token, - 'REALM': realm, - 'ENDPOINT': 'ws://%s:%s' % (ws_host, ws_port), - 'DOCKER_TLS_VERIFY': os.environ.get('DOCKER_TLS_VERIFY', ''), - 'DOCKER_CERT_PATH': os.environ.get('DOCKER_CERT_PATH', ''), - 'DOCKER_HOST': os.environ.get('DOCKER_HOST', ''), - 'PATH': "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - } + @coroutine + @duration_collector_async(metric_queue.builder_time_to_start, ["fork"]) + def start_builder(self, realm, token, build_uuid): + # Now start a machine for this job, adding the machine id to the etcd information + logger.debug("Forking process for build") - logpipe = LogPipe(logging.INFO) - spawned = subprocess.Popen(os.environ.get('BUILDER_BINARY_LOCATION', - '/usr/local/bin/quay-builder'), - stdout=logpipe, - stderr=logpipe, - env=builder_env) + ws_host = os.environ.get("BUILDMAN_WS_HOST", "localhost") + ws_port = os.environ.get("BUILDMAN_WS_PORT", "8787") + builder_env = { + "TOKEN": token, + "REALM": realm, + "ENDPOINT": "ws://%s:%s" % (ws_host, ws_port), + "DOCKER_TLS_VERIFY": os.environ.get("DOCKER_TLS_VERIFY", ""), + "DOCKER_CERT_PATH": os.environ.get("DOCKER_CERT_PATH", ""), + "DOCKER_HOST": os.environ.get("DOCKER_HOST", ""), + "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + } - builder_id = str(uuid.uuid4()) - self._jobs[builder_id] = (spawned, logpipe) - logger.debug('Builder spawned with id: %s', builder_id) - raise Return(builder_id) + logpipe = LogPipe(logging.INFO) + spawned = subprocess.Popen( + os.environ.get("BUILDER_BINARY_LOCATION", "/usr/local/bin/quay-builder"), + stdout=logpipe, + stderr=logpipe, + env=builder_env, + ) - @coroutine - def stop_builder(self, builder_id): - if builder_id not in self._jobs: - raise ExecutorException('Builder id not being tracked by executor.') + builder_id = str(uuid.uuid4()) + self._jobs[builder_id] = (spawned, logpipe) + logger.debug("Builder spawned with id: %s", builder_id) + raise Return(builder_id) - logger.debug('Killing builder with id: %s', builder_id) - spawned, logpipe = self._jobs[builder_id] + @coroutine + def stop_builder(self, builder_id): + if builder_id not in self._jobs: + raise ExecutorException("Builder id not being tracked by executor.") - if spawned.poll() is None: - spawned.kill() - logpipe.close() + logger.debug("Killing builder with id: %s", builder_id) + spawned, logpipe = self._jobs[builder_id] + + if spawned.poll() is None: + spawned.kill() + logpipe.close() class KubernetesExecutor(BuilderExecutor): - """ Executes build jobs by creating Kubernetes jobs which run a qemu-kvm virtual + """ Executes build jobs by creating Kubernetes jobs which run a qemu-kvm virtual machine in a pod """ - def __init__(self, *args, **kwargs): - super(KubernetesExecutor, self).__init__(*args, **kwargs) - self._loop = get_event_loop() - self.namespace = self.executor_config.get('BUILDER_NAMESPACE', 'builder') - self.image = self.executor_config.get('BUILDER_VM_CONTAINER_IMAGE', - 'quay.io/quay/quay-builder-qemu-coreos:stable') - @coroutine - def _request(self, method, path, **kwargs): - request_options = dict(kwargs) + def __init__(self, *args, **kwargs): + super(KubernetesExecutor, self).__init__(*args, **kwargs) + self._loop = get_event_loop() + self.namespace = self.executor_config.get("BUILDER_NAMESPACE", "builder") + self.image = self.executor_config.get( + "BUILDER_VM_CONTAINER_IMAGE", "quay.io/quay/quay-builder-qemu-coreos:stable" + ) - tls_cert = self.executor_config.get('K8S_API_TLS_CERT') - tls_key = self.executor_config.get('K8S_API_TLS_KEY') - tls_ca = self.executor_config.get('K8S_API_TLS_CA') - service_account_token = self.executor_config.get('SERVICE_ACCOUNT_TOKEN') + @coroutine + def _request(self, method, path, **kwargs): + request_options = dict(kwargs) - if 'timeout' not in request_options: - request_options['timeout'] = self.executor_config.get("K8S_API_TIMEOUT", 20) + tls_cert = self.executor_config.get("K8S_API_TLS_CERT") + tls_key = self.executor_config.get("K8S_API_TLS_KEY") + tls_ca = self.executor_config.get("K8S_API_TLS_CA") + service_account_token = self.executor_config.get("SERVICE_ACCOUNT_TOKEN") - if service_account_token: - scheme = 'https' - request_options['headers'] = {'Authorization': 'Bearer ' + service_account_token} - logger.debug('Using service account token for Kubernetes authentication') - elif tls_cert and tls_key: - scheme = 'https' - request_options['cert'] = (tls_cert, tls_key) - logger.debug('Using tls certificate and key for Kubernetes authentication') - if tls_ca: - request_options['verify'] = tls_ca - else: - scheme = 'http' + if "timeout" not in request_options: + request_options["timeout"] = self.executor_config.get("K8S_API_TIMEOUT", 20) - server = self.executor_config.get('K8S_API_SERVER', 'localhost:8080') - url = '%s://%s%s' % (scheme, server, path) + if service_account_token: + scheme = "https" + request_options["headers"] = { + "Authorization": "Bearer " + service_account_token + } + logger.debug("Using service account token for Kubernetes authentication") + elif tls_cert and tls_key: + scheme = "https" + request_options["cert"] = (tls_cert, tls_key) + logger.debug("Using tls certificate and key for Kubernetes authentication") + if tls_ca: + request_options["verify"] = tls_ca + else: + scheme = "http" - logger.debug('Executor config: %s', self.executor_config) - logger.debug('Kubernetes request: %s %s: %s', method, url, request_options) - res = requests.request(method, url, **request_options) - logger.debug('Kubernetes response: %s: %s', res.status_code, res.text) - raise Return(res) + server = self.executor_config.get("K8S_API_SERVER", "localhost:8080") + url = "%s://%s%s" % (scheme, server, path) - def _jobs_path(self): - return '/apis/batch/v1/namespaces/%s/jobs' % self.namespace + logger.debug("Executor config: %s", self.executor_config) + logger.debug("Kubernetes request: %s %s: %s", method, url, request_options) + res = requests.request(method, url, **request_options) + logger.debug("Kubernetes response: %s: %s", res.status_code, res.text) + raise Return(res) - def _job_path(self, build_uuid): - return '%s/%s' % (self._jobs_path(), build_uuid) + def _jobs_path(self): + return "/apis/batch/v1/namespaces/%s/jobs" % self.namespace - def _kubernetes_distribution(self): - return self.executor_config.get('KUBERNETES_DISTRIBUTION', 'basic').lower() + def _job_path(self, build_uuid): + return "%s/%s" % (self._jobs_path(), build_uuid) - def _is_basic_kubernetes_distribution(self): - return self._kubernetes_distribution() == 'basic' + def _kubernetes_distribution(self): + return self.executor_config.get("KUBERNETES_DISTRIBUTION", "basic").lower() - def _is_openshift_kubernetes_distribution(self): - return self._kubernetes_distribution() == 'openshift' + def _is_basic_kubernetes_distribution(self): + return self._kubernetes_distribution() == "basic" - def _build_job_container_resources(self): - # Minimum acceptable free resources for this container to "fit" in a quota - # These may be lower than the absolute limits if the cluster is knowingly - # oversubscribed by some amount. - container_requests = { - 'memory' : self.executor_config.get('CONTAINER_MEMORY_REQUEST', '3968Mi'), - } + def _is_openshift_kubernetes_distribution(self): + return self._kubernetes_distribution() == "openshift" - container_limits = { - 'memory' : self.executor_config.get('CONTAINER_MEMORY_LIMITS', '5120Mi'), - 'cpu' : self.executor_config.get('CONTAINER_CPU_LIMITS', '1000m'), - } + def _build_job_container_resources(self): + # Minimum acceptable free resources for this container to "fit" in a quota + # These may be lower than the absolute limits if the cluster is knowingly + # oversubscribed by some amount. + container_requests = { + "memory": self.executor_config.get("CONTAINER_MEMORY_REQUEST", "3968Mi") + } - resources = { - 'requests': container_requests, - } + container_limits = { + "memory": self.executor_config.get("CONTAINER_MEMORY_LIMITS", "5120Mi"), + "cpu": self.executor_config.get("CONTAINER_CPU_LIMITS", "1000m"), + } - if self._is_openshift_kubernetes_distribution(): - resources['requests']['cpu'] = self.executor_config.get('CONTAINER_CPU_REQUEST', '500m') - resources['limits'] = container_limits + resources = {"requests": container_requests} - return resources + if self._is_openshift_kubernetes_distribution(): + resources["requests"]["cpu"] = self.executor_config.get( + "CONTAINER_CPU_REQUEST", "500m" + ) + resources["limits"] = container_limits - def _build_job_containers(self, user_data): - vm_memory_limit = self.executor_config.get('VM_MEMORY_LIMIT', '4G') - vm_volume_size = self.executor_config.get('VOLUME_SIZE', '32G') + return resources - container = { - 'name': 'builder', - 'imagePullPolicy': 'IfNotPresent', - 'image': self.image, - 'securityContext': {'privileged': True}, - 'env': [ - {'name': 'USERDATA', 'value': user_data}, - {'name': 'VM_MEMORY', 'value': vm_memory_limit}, - {'name': 'VM_VOLUME_SIZE', 'value': vm_volume_size}, - ], - 'resources': self._build_job_container_resources(), - } + def _build_job_containers(self, user_data): + vm_memory_limit = self.executor_config.get("VM_MEMORY_LIMIT", "4G") + vm_volume_size = self.executor_config.get("VOLUME_SIZE", "32G") - if self._is_basic_kubernetes_distribution(): - container['volumeMounts'] = [{'name': 'secrets-mask','mountPath': '/var/run/secrets/kubernetes.io/serviceaccount'}] + container = { + "name": "builder", + "imagePullPolicy": "IfNotPresent", + "image": self.image, + "securityContext": {"privileged": True}, + "env": [ + {"name": "USERDATA", "value": user_data}, + {"name": "VM_MEMORY", "value": vm_memory_limit}, + {"name": "VM_VOLUME_SIZE", "value": vm_volume_size}, + ], + "resources": self._build_job_container_resources(), + } - return container + if self._is_basic_kubernetes_distribution(): + container["volumeMounts"] = [ + { + "name": "secrets-mask", + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + } + ] - def _job_resource(self, build_uuid, user_data, coreos_channel='stable'): - image_pull_secret_name = self.executor_config.get('IMAGE_PULL_SECRET_NAME', 'builder') - service_account = self.executor_config.get('SERVICE_ACCOUNT_NAME', 'quay-builder-sa') - node_selector_label_key = self.executor_config.get('NODE_SELECTOR_LABEL_KEY', 'beta.kubernetes.io/instance-type') - node_selector_label_value = self.executor_config.get('NODE_SELECTOR_LABEL_VALUE', '') + return container - node_selector = { - node_selector_label_key : node_selector_label_value - } + def _job_resource(self, build_uuid, user_data, coreos_channel="stable"): + image_pull_secret_name = self.executor_config.get( + "IMAGE_PULL_SECRET_NAME", "builder" + ) + service_account = self.executor_config.get( + "SERVICE_ACCOUNT_NAME", "quay-builder-sa" + ) + node_selector_label_key = self.executor_config.get( + "NODE_SELECTOR_LABEL_KEY", "beta.kubernetes.io/instance-type" + ) + node_selector_label_value = self.executor_config.get( + "NODE_SELECTOR_LABEL_VALUE", "" + ) - release_sha = release.GIT_HEAD or 'none' - if ' ' in release_sha: - release_sha = 'HEAD' + node_selector = {node_selector_label_key: node_selector_label_value} - job_resource = { - 'apiVersion': 'batch/v1', - 'kind': 'Job', - 'metadata': { - 'namespace': self.namespace, - 'generateName': build_uuid + '-', - 'labels': { - 'build': build_uuid, - 'time': datetime.datetime.now().strftime('%Y-%m-%d-%H'), - 'manager': socket.gethostname(), - 'quay-sha': release_sha, - }, - }, - 'spec' : { - 'activeDeadlineSeconds': self.executor_config.get('MAXIMUM_JOB_TIME', 7200), - 'template': { - 'metadata': { - 'labels': { - 'build': build_uuid, - 'time': datetime.datetime.now().strftime('%Y-%m-%d-%H'), - 'manager': socket.gethostname(), - 'quay-sha': release_sha, + release_sha = release.GIT_HEAD or "none" + if " " in release_sha: + release_sha = "HEAD" + + job_resource = { + "apiVersion": "batch/v1", + "kind": "Job", + "metadata": { + "namespace": self.namespace, + "generateName": build_uuid + "-", + "labels": { + "build": build_uuid, + "time": datetime.datetime.now().strftime("%Y-%m-%d-%H"), + "manager": socket.gethostname(), + "quay-sha": release_sha, + }, }, - }, - 'spec': { - 'imagePullSecrets': [{ 'name': image_pull_secret_name }], - 'restartPolicy': 'Never', - 'dnsPolicy': 'Default', - 'containers': [self._build_job_containers(user_data)], - }, - }, - }, - } + "spec": { + "activeDeadlineSeconds": self.executor_config.get( + "MAXIMUM_JOB_TIME", 7200 + ), + "template": { + "metadata": { + "labels": { + "build": build_uuid, + "time": datetime.datetime.now().strftime("%Y-%m-%d-%H"), + "manager": socket.gethostname(), + "quay-sha": release_sha, + } + }, + "spec": { + "imagePullSecrets": [{"name": image_pull_secret_name}], + "restartPolicy": "Never", + "dnsPolicy": "Default", + "containers": [self._build_job_containers(user_data)], + }, + }, + }, + } - if self._is_openshift_kubernetes_distribution(): - # Setting `automountServiceAccountToken` to false will prevent automounting API credentials for a service account. - job_resource['spec']['template']['spec']['automountServiceAccountToken'] = False + if self._is_openshift_kubernetes_distribution(): + # Setting `automountServiceAccountToken` to false will prevent automounting API credentials for a service account. + job_resource["spec"]["template"]["spec"][ + "automountServiceAccountToken" + ] = False - # Use dedicated service account that has no authorization to any resources. - job_resource['spec']['template']['spec']['serviceAccount'] = service_account + # Use dedicated service account that has no authorization to any resources. + job_resource["spec"]["template"]["spec"]["serviceAccount"] = service_account - # Setting `enableServiceLinks` to false prevents information about other services from being injected into pod's - # environment variables. Pod has no visibility into other services on the cluster. - job_resource['spec']['template']['spec']['enableServiceLinks'] = False + # Setting `enableServiceLinks` to false prevents information about other services from being injected into pod's + # environment variables. Pod has no visibility into other services on the cluster. + job_resource["spec"]["template"]["spec"]["enableServiceLinks"] = False - if node_selector_label_value.strip() != '': - job_resource['spec']['template']['spec']['nodeSelector'] = node_selector + if node_selector_label_value.strip() != "": + job_resource["spec"]["template"]["spec"]["nodeSelector"] = node_selector - if self._is_basic_kubernetes_distribution(): - # This volume is a hack to mask the token for the namespace's - # default service account, which is placed in a file mounted under - # `/var/run/secrets/kubernetes.io/serviceaccount` in all pods. - # There's currently no other way to just disable the service - # account at either the pod or namespace level. - # - # https://github.com/kubernetes/kubernetes/issues/16779 - # - job_resource['spec']['template']['spec']['volumes'] = [{'name': 'secrets-mask','emptyDir': {'medium': 'Memory'}}] + if self._is_basic_kubernetes_distribution(): + # This volume is a hack to mask the token for the namespace's + # default service account, which is placed in a file mounted under + # `/var/run/secrets/kubernetes.io/serviceaccount` in all pods. + # There's currently no other way to just disable the service + # account at either the pod or namespace level. + # + # https://github.com/kubernetes/kubernetes/issues/16779 + # + job_resource["spec"]["template"]["spec"]["volumes"] = [ + {"name": "secrets-mask", "emptyDir": {"medium": "Memory"}} + ] - return job_resource + return job_resource - @coroutine - @duration_collector_async(metric_queue.builder_time_to_start, ['k8s']) - def start_builder(self, realm, token, build_uuid): - # generate resource - channel = self.executor_config.get('COREOS_CHANNEL', 'stable') - user_data = self.generate_cloud_config(realm, token, build_uuid, channel, self.manager_hostname) - resource = self._job_resource(build_uuid, user_data, channel) - logger.debug('Using Kubernetes Distribution: %s', self._kubernetes_distribution()) - logger.debug('Generated kubernetes resource:\n%s', resource) + @coroutine + @duration_collector_async(metric_queue.builder_time_to_start, ["k8s"]) + def start_builder(self, realm, token, build_uuid): + # generate resource + channel = self.executor_config.get("COREOS_CHANNEL", "stable") + user_data = self.generate_cloud_config( + realm, token, build_uuid, channel, self.manager_hostname + ) + resource = self._job_resource(build_uuid, user_data, channel) + logger.debug( + "Using Kubernetes Distribution: %s", self._kubernetes_distribution() + ) + logger.debug("Generated kubernetes resource:\n%s", resource) - # schedule - create_job = yield From(self._request('POST', self._jobs_path(), json=resource)) - if int(create_job.status_code / 100) != 2: - raise ExecutorException('Failed to create job: %s: %s: %s' % - (build_uuid, create_job.status_code, create_job.text)) + # schedule + create_job = yield From(self._request("POST", self._jobs_path(), json=resource)) + if int(create_job.status_code / 100) != 2: + raise ExecutorException( + "Failed to create job: %s: %s: %s" + % (build_uuid, create_job.status_code, create_job.text) + ) - job = create_job.json() - raise Return(job['metadata']['name']) + job = create_job.json() + raise Return(job["metadata"]["name"]) - @coroutine - def stop_builder(self, builder_id): - pods_path = '/api/v1/namespaces/%s/pods' % self.namespace + @coroutine + def stop_builder(self, builder_id): + pods_path = "/api/v1/namespaces/%s/pods" % self.namespace - # Delete the job itself. - try: - yield From(self._request('DELETE', self._job_path(builder_id))) - except: - logger.exception('Failed to send delete job call for job %s', builder_id) + # Delete the job itself. + try: + yield From(self._request("DELETE", self._job_path(builder_id))) + except: + logger.exception("Failed to send delete job call for job %s", builder_id) - # Delete the pod(s) for the job. - selectorString = "job-name=%s" % builder_id - try: - yield From(self._request('DELETE', pods_path, params=dict(labelSelector=selectorString))) - except: - logger.exception("Failed to send delete pod call for job %s", builder_id) + # Delete the pod(s) for the job. + selectorString = "job-name=%s" % builder_id + try: + yield From( + self._request( + "DELETE", pods_path, params=dict(labelSelector=selectorString) + ) + ) + except: + logger.exception("Failed to send delete pod call for job %s", builder_id) class LogPipe(threading.Thread): - """ Adapted from http://codereview.stackexchange.com/a/17959 + """ Adapted from http://codereview.stackexchange.com/a/17959 """ - def __init__(self, level): - """Setup the object with a logger and a loglevel + + def __init__(self, level): + """Setup the object with a logger and a loglevel and start the thread """ - threading.Thread.__init__(self) - self.daemon = False - self.level = level - self.fd_read, self.fd_write = os.pipe() - self.pipe_reader = os.fdopen(self.fd_read) - self.start() + threading.Thread.__init__(self) + self.daemon = False + self.level = level + self.fd_read, self.fd_write = os.pipe() + self.pipe_reader = os.fdopen(self.fd_read) + self.start() - def fileno(self): - """Return the write file descriptor of the pipe + def fileno(self): + """Return the write file descriptor of the pipe """ - return self.fd_write + return self.fd_write - def run(self): - """Run the thread, logging everything. + def run(self): + """Run the thread, logging everything. """ - for line in iter(self.pipe_reader.readline, ''): - logging.log(self.level, line.strip('\n')) + for line in iter(self.pipe_reader.readline, ""): + logging.log(self.level, line.strip("\n")) - self.pipe_reader.close() + self.pipe_reader.close() - def close(self): - """Close the write end of the pipe. + def close(self): + """Close the write end of the pipe. """ - os.close(self.fd_write) + os.close(self.fd_write) diff --git a/buildman/manager/noop_canceller.py b/buildman/manager/noop_canceller.py index 2adf17ad7..51c023fcc 100644 --- a/buildman/manager/noop_canceller.py +++ b/buildman/manager/noop_canceller.py @@ -1,8 +1,9 @@ class NoopCanceller(object): - """ A class that can not cancel a build """ - def __init__(self, config=None): - pass + """ A class that can not cancel a build """ - def try_cancel_build(self, uuid): - """ Does nothing and fails to cancel build. """ - return False + def __init__(self, config=None): + pass + + def try_cancel_build(self, uuid): + """ Does nothing and fails to cancel build. """ + return False diff --git a/buildman/manager/orchestrator_canceller.py b/buildman/manager/orchestrator_canceller.py index f3f821d5e..64ae4f8d7 100644 --- a/buildman/manager/orchestrator_canceller.py +++ b/buildman/manager/orchestrator_canceller.py @@ -7,20 +7,23 @@ from util import slash_join logger = logging.getLogger(__name__) -CANCEL_PREFIX = 'cancel/' +CANCEL_PREFIX = "cancel/" class OrchestratorCanceller(object): - """ An asynchronous way to cancel a build with any Orchestrator. """ - def __init__(self, config): - self._orchestrator = orchestrator_from_config(config, canceller_only=True) + """ An asynchronous way to cancel a build with any Orchestrator. """ - def try_cancel_build(self, build_uuid): - logger.info('Cancelling build %s', build_uuid) - cancel_key = slash_join(CANCEL_PREFIX, build_uuid) - try: - self._orchestrator.set_key_sync(cancel_key, build_uuid, expiration=60) - return True - except OrchestratorError: - logger.exception('Failed to write cancel action to redis with uuid %s', build_uuid) - return False + def __init__(self, config): + self._orchestrator = orchestrator_from_config(config, canceller_only=True) + + def try_cancel_build(self, build_uuid): + logger.info("Cancelling build %s", build_uuid) + cancel_key = slash_join(CANCEL_PREFIX, build_uuid) + try: + self._orchestrator.set_key_sync(cancel_key, build_uuid, expiration=60) + return True + except OrchestratorError: + logger.exception( + "Failed to write cancel action to redis with uuid %s", build_uuid + ) + return False diff --git a/buildman/test/test_buildman.py b/buildman/test/test_buildman.py index 49b9a20fc..ec6192ae2 100644 --- a/buildman/test/test_buildman.py +++ b/buildman/test/test_buildman.py @@ -9,8 +9,7 @@ from trollius import coroutine, get_event_loop, From, Future, Return from app import metric_queue from buildman.asyncutil import AsyncWrapper from buildman.component.buildcomponent import BuildComponent -from buildman.manager.ephemeral import (EphemeralBuilderManager, REALM_PREFIX, - JOB_PREFIX) +from buildman.manager.ephemeral import EphemeralBuilderManager, REALM_PREFIX, JOB_PREFIX from buildman.manager.executor import BuilderExecutor, ExecutorException from buildman.orchestrator import KeyEvent, KeyChange from buildman.server import BuildJobResult @@ -18,662 +17,767 @@ from util import slash_join from util.metrics.metricqueue import duration_collector_async -BUILD_UUID = 'deadbeef-dead-beef-dead-deadbeefdead' -REALM_ID = '1234-realm' +BUILD_UUID = "deadbeef-dead-beef-dead-deadbeefdead" +REALM_ID = "1234-realm" def async_test(f): - def wrapper(*args, **kwargs): - coro = coroutine(f) - future = coro(*args, **kwargs) - loop = get_event_loop() - loop.run_until_complete(future) - return wrapper + def wrapper(*args, **kwargs): + coro = coroutine(f) + future = coro(*args, **kwargs) + loop = get_event_loop() + loop.run_until_complete(future) + + return wrapper class TestExecutor(BuilderExecutor): - job_started = None - job_stopped = None + job_started = None + job_stopped = None - @coroutine - @duration_collector_async(metric_queue.builder_time_to_start, labelvalues=["testlabel"]) - def start_builder(self, realm, token, build_uuid): - self.job_started = str(uuid.uuid4()) - raise Return(self.job_started) + @coroutine + @duration_collector_async( + metric_queue.builder_time_to_start, labelvalues=["testlabel"] + ) + def start_builder(self, realm, token, build_uuid): + self.job_started = str(uuid.uuid4()) + raise Return(self.job_started) - @coroutine - def stop_builder(self, execution_id): - self.job_stopped = execution_id + @coroutine + def stop_builder(self, execution_id): + self.job_stopped = execution_id class BadExecutor(BuilderExecutor): - @coroutine - @duration_collector_async(metric_queue.builder_time_to_start, labelvalues=["testlabel"]) - def start_builder(self, realm, token, build_uuid): - raise ExecutorException('raised on purpose!') + @coroutine + @duration_collector_async( + metric_queue.builder_time_to_start, labelvalues=["testlabel"] + ) + def start_builder(self, realm, token, build_uuid): + raise ExecutorException("raised on purpose!") class EphemeralBuilderTestCase(unittest.TestCase): - def __init__(self, *args, **kwargs): - self.etcd_client_mock = None - super(EphemeralBuilderTestCase, self).__init__(*args, **kwargs) + def __init__(self, *args, **kwargs): + self.etcd_client_mock = None + super(EphemeralBuilderTestCase, self).__init__(*args, **kwargs) - @staticmethod - def _create_completed_future(result=None): - def inner(*args, **kwargs): - new_future = Future() - new_future.set_result(result) - return new_future - return inner + @staticmethod + def _create_completed_future(result=None): + def inner(*args, **kwargs): + new_future = Future() + new_future.set_result(result) + return new_future - def setUp(self): - self._existing_executors = dict(EphemeralBuilderManager.EXECUTORS) + return inner - def tearDown(self): - EphemeralBuilderManager.EXECUTORS = self._existing_executors + def setUp(self): + self._existing_executors = dict(EphemeralBuilderManager.EXECUTORS) - @coroutine - def _register_component(self, realm_spec, build_component, token): - raise Return('hello') + def tearDown(self): + EphemeralBuilderManager.EXECUTORS = self._existing_executors - def _create_build_job(self, namespace='namespace', retries=3): - mock_job = Mock() - mock_job.job_details = {'build_uuid': BUILD_UUID} - mock_job.job_item = { - 'body': json.dumps(mock_job.job_details), - 'id': 1, - } + @coroutine + def _register_component(self, realm_spec, build_component, token): + raise Return("hello") - mock_job.namespace = namespace - mock_job.retries_remaining = retries - mock_job.build_uuid = BUILD_UUID - return mock_job + def _create_build_job(self, namespace="namespace", retries=3): + mock_job = Mock() + mock_job.job_details = {"build_uuid": BUILD_UUID} + mock_job.job_item = {"body": json.dumps(mock_job.job_details), "id": 1} + + mock_job.namespace = namespace + mock_job.retries_remaining = retries + mock_job.build_uuid = BUILD_UUID + return mock_job class TestEphemeralLifecycle(EphemeralBuilderTestCase): - """ Tests the various lifecycles of the ephemeral builder and its interaction with etcd. """ + """ Tests the various lifecycles of the ephemeral builder and its interaction with etcd. """ - def __init__(self, *args, **kwargs): - super(TestEphemeralLifecycle, self).__init__(*args, **kwargs) - self.etcd_client_mock = None - self.test_executor = None + def __init__(self, *args, **kwargs): + super(TestEphemeralLifecycle, self).__init__(*args, **kwargs) + self.etcd_client_mock = None + self.test_executor = None - def _create_completed_future(self, result=None): - def inner(*args, **kwargs): - new_future = Future() - new_future.set_result(result) - return new_future - return inner + def _create_completed_future(self, result=None): + def inner(*args, **kwargs): + new_future = Future() + new_future.set_result(result) + return new_future - def _create_mock_executor(self, *args, **kwargs): - self.test_executor = Mock(spec=BuilderExecutor) - self.test_executor.start_builder = Mock(side_effect=self._create_completed_future('123')) - self.test_executor.stop_builder = Mock(side_effect=self._create_completed_future()) - self.test_executor.setup_time = 60 - self.test_executor.name = 'MockExecutor' - self.test_executor.minimum_retry_threshold = 0 - return self.test_executor + return inner - def setUp(self): - super(TestEphemeralLifecycle, self).setUp() + def _create_mock_executor(self, *args, **kwargs): + self.test_executor = Mock(spec=BuilderExecutor) + self.test_executor.start_builder = Mock( + side_effect=self._create_completed_future("123") + ) + self.test_executor.stop_builder = Mock( + side_effect=self._create_completed_future() + ) + self.test_executor.setup_time = 60 + self.test_executor.name = "MockExecutor" + self.test_executor.minimum_retry_threshold = 0 + return self.test_executor - EphemeralBuilderManager.EXECUTORS['test'] = self._create_mock_executor + def setUp(self): + super(TestEphemeralLifecycle, self).setUp() - self.register_component_callback = Mock() - self.unregister_component_callback = Mock() - self.job_heartbeat_callback = Mock() - self.job_complete_callback = AsyncWrapper(Mock()) + EphemeralBuilderManager.EXECUTORS["test"] = self._create_mock_executor - self.manager = EphemeralBuilderManager( - self.register_component_callback, - self.unregister_component_callback, - self.job_heartbeat_callback, - self.job_complete_callback, - '127.0.0.1', - 30, - ) + self.register_component_callback = Mock() + self.unregister_component_callback = Mock() + self.job_heartbeat_callback = Mock() + self.job_complete_callback = AsyncWrapper(Mock()) - self.manager.initialize({ - 'EXECUTOR': 'test', - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) + self.manager = EphemeralBuilderManager( + self.register_component_callback, + self.unregister_component_callback, + self.job_heartbeat_callback, + self.job_complete_callback, + "127.0.0.1", + 30, + ) - # Ensure that that the realm and building callbacks have been registered - callback_keys = [key for key in self.manager._orchestrator.callbacks] - self.assertIn(REALM_PREFIX, callback_keys) - self.assertIn(JOB_PREFIX, callback_keys) + self.manager.initialize( + {"EXECUTOR": "test", "ORCHESTRATOR": {"MEM_CONFIG": None}} + ) - self.mock_job = self._create_build_job() - self.mock_job_key = slash_join('building', BUILD_UUID) + # Ensure that that the realm and building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(REALM_PREFIX, callback_keys) + self.assertIn(JOB_PREFIX, callback_keys) - def tearDown(self): - super(TestEphemeralLifecycle, self).tearDown() - self.manager.shutdown() + self.mock_job = self._create_build_job() + self.mock_job_key = slash_join("building", BUILD_UUID) + def tearDown(self): + super(TestEphemeralLifecycle, self).tearDown() + self.manager.shutdown() - @coroutine - def _setup_job_for_managers(self): - test_component = Mock(spec=BuildComponent) - test_component.builder_realm = REALM_ID - test_component.start_build = Mock(side_effect=self._create_completed_future()) - self.register_component_callback.return_value = test_component + @coroutine + def _setup_job_for_managers(self): + test_component = Mock(spec=BuildComponent) + test_component.builder_realm = REALM_ID + test_component.start_build = Mock(side_effect=self._create_completed_future()) + self.register_component_callback.return_value = test_component - is_scheduled = yield From(self.manager.schedule(self.mock_job)) - self.assertTrue(is_scheduled) - self.assertEqual(self.test_executor.start_builder.call_count, 1) + is_scheduled = yield From(self.manager.schedule(self.mock_job)) + self.assertTrue(is_scheduled) + self.assertEqual(self.test_executor.start_builder.call_count, 1) - # Ensure that that the job, realm, and metric callbacks have been registered - callback_keys = [key for key in self.manager._orchestrator.callbacks] - self.assertIn(self.mock_job_key, self.manager._orchestrator.state) - self.assertIn(REALM_PREFIX, callback_keys) - # TODO: assert metric key has been set + # Ensure that that the job, realm, and metric callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(self.mock_job_key, self.manager._orchestrator.state) + self.assertIn(REALM_PREFIX, callback_keys) + # TODO: assert metric key has been set - realm_for_build = self._find_realm_key(self.manager._orchestrator, BUILD_UUID) + realm_for_build = self._find_realm_key(self.manager._orchestrator, BUILD_UUID) - raw_realm_data = yield From(self.manager._orchestrator.get_key(slash_join('realm', - realm_for_build))) - realm_data = json.loads(raw_realm_data) - realm_data['realm'] = REALM_ID + raw_realm_data = yield From( + self.manager._orchestrator.get_key(slash_join("realm", realm_for_build)) + ) + realm_data = json.loads(raw_realm_data) + realm_data["realm"] = REALM_ID - # Right now the job is not registered with any managers because etcd has not accepted the job - self.assertEqual(self.register_component_callback.call_count, 0) + # Right now the job is not registered with any managers because etcd has not accepted the job + self.assertEqual(self.register_component_callback.call_count, 0) - # Fire off a realm changed with the same data. - yield From(self.manager._realm_callback( - KeyChange(KeyEvent.CREATE, - slash_join(REALM_PREFIX, REALM_ID), - json.dumps(realm_data)))) + # Fire off a realm changed with the same data. + yield From( + self.manager._realm_callback( + KeyChange( + KeyEvent.CREATE, + slash_join(REALM_PREFIX, REALM_ID), + json.dumps(realm_data), + ) + ) + ) - # Ensure that we have at least one component node. - self.assertEqual(self.register_component_callback.call_count, 1) - self.assertEqual(1, self.manager.num_workers()) + # Ensure that we have at least one component node. + self.assertEqual(self.register_component_callback.call_count, 1) + self.assertEqual(1, self.manager.num_workers()) - # Ensure that the build info exists. - self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + # Ensure that the build info exists. + self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) - raise Return(test_component) + raise Return(test_component) - @staticmethod - def _find_realm_key(orchestrator, build_uuid): - for key, value in iteritems(orchestrator.state): - if key.startswith(REALM_PREFIX): - parsed_value = json.loads(value) - body = json.loads(parsed_value['job_queue_item']['body']) - if body['build_uuid'] == build_uuid: - return parsed_value['realm'] - continue - raise KeyError + @staticmethod + def _find_realm_key(orchestrator, build_uuid): + for key, value in iteritems(orchestrator.state): + if key.startswith(REALM_PREFIX): + parsed_value = json.loads(value) + body = json.loads(parsed_value["job_queue_item"]["body"]) + if body["build_uuid"] == build_uuid: + return parsed_value["realm"] + continue + raise KeyError + @async_test + def test_schedule_and_complete(self): + # Test that a job is properly registered with all of the managers + test_component = yield From(self._setup_job_for_managers()) - @async_test - def test_schedule_and_complete(self): - # Test that a job is properly registered with all of the managers - test_component = yield From(self._setup_job_for_managers()) + # Take the job ourselves + yield From(self.manager.build_component_ready(test_component)) - # Take the job ourselves - yield From(self.manager.build_component_ready(test_component)) + self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) - self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + # Finish the job + yield From( + self.manager.job_completed( + self.mock_job, BuildJobResult.COMPLETE, test_component + ) + ) - # Finish the job - yield From(self.manager.job_completed(self.mock_job, BuildJobResult.COMPLETE, test_component)) + # Ensure that the executor kills the job. + self.assertEqual(self.test_executor.stop_builder.call_count, 1) - # Ensure that the executor kills the job. - self.assertEqual(self.test_executor.stop_builder.call_count, 1) + # Ensure the build information is cleaned up. + self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + self.assertEqual(0, self.manager.num_workers()) - # Ensure the build information is cleaned up. - self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) - self.assertEqual(0, self.manager.num_workers()) + @async_test + def test_another_manager_takes_job(self): + # Prepare a job to be taken by another manager + test_component = yield From(self._setup_job_for_managers()) - @async_test - def test_another_manager_takes_job(self): - # Prepare a job to be taken by another manager - test_component = yield From(self._setup_job_for_managers()) + yield From( + self.manager._realm_callback( + KeyChange( + KeyEvent.DELETE, + slash_join(REALM_PREFIX, REALM_ID), + json.dumps( + { + "realm": REALM_ID, + "token": "beef", + "execution_id": "123", + "job_queue_item": self.mock_job.job_item, + } + ), + ) + ) + ) - yield From(self.manager._realm_callback( - KeyChange(KeyEvent.DELETE, - slash_join(REALM_PREFIX, REALM_ID), - json.dumps({'realm': REALM_ID, - 'token': 'beef', - 'execution_id': '123', - 'job_queue_item': self.mock_job.job_item})))) + self.unregister_component_callback.assert_called_once_with(test_component) - self.unregister_component_callback.assert_called_once_with(test_component) + # Ensure that the executor does not kill the job. + self.assertEqual(self.test_executor.stop_builder.call_count, 0) - # Ensure that the executor does not kill the job. - self.assertEqual(self.test_executor.stop_builder.call_count, 0) + # Ensure that we still have the build info, but not the component. + self.assertEqual(0, self.manager.num_workers()) + self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) - # Ensure that we still have the build info, but not the component. - self.assertEqual(0, self.manager.num_workers()) - self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + # Delete the job once it has "completed". + yield From( + self.manager._job_callback( + KeyChange( + KeyEvent.DELETE, + self.mock_job_key, + json.dumps( + { + "had_heartbeat": False, + "job_queue_item": self.mock_job.job_item, + } + ), + ) + ) + ) - # Delete the job once it has "completed". - yield From(self.manager._job_callback( - KeyChange(KeyEvent.DELETE, - self.mock_job_key, - json.dumps({'had_heartbeat': False, - 'job_queue_item': self.mock_job.job_item})))) + # Ensure the job was removed from the info, but stop was not called. + self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + self.assertEqual(self.test_executor.stop_builder.call_count, 0) - # Ensure the job was removed from the info, but stop was not called. - self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) - self.assertEqual(self.test_executor.stop_builder.call_count, 0) + @async_test + def test_job_started_by_other_manager(self): + # Ensure that that the building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(JOB_PREFIX, callback_keys) - @async_test - def test_job_started_by_other_manager(self): - # Ensure that that the building callbacks have been registered - callback_keys = [key for key in self.manager._orchestrator.callbacks] - self.assertIn(JOB_PREFIX, callback_keys) + # Send a signal to the callback that the job has been created. + yield From( + self.manager._job_callback( + KeyChange( + KeyEvent.CREATE, + self.mock_job_key, + json.dumps( + { + "had_heartbeat": False, + "job_queue_item": self.mock_job.job_item, + } + ), + ) + ) + ) - # Send a signal to the callback that the job has been created. - yield From(self.manager._job_callback( - KeyChange(KeyEvent.CREATE, - self.mock_job_key, - json.dumps({'had_heartbeat': False, - 'job_queue_item': self.mock_job.job_item})))) + # Ensure the create does nothing. + self.assertEqual(self.test_executor.stop_builder.call_count, 0) - # Ensure the create does nothing. - self.assertEqual(self.test_executor.stop_builder.call_count, 0) + @async_test + def test_expiring_worker_not_started(self): + # Ensure that that the building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(JOB_PREFIX, callback_keys) - @async_test - def test_expiring_worker_not_started(self): - # Ensure that that the building callbacks have been registered - callback_keys = [key for key in self.manager._orchestrator.callbacks] - self.assertIn(JOB_PREFIX, callback_keys) + # Send a signal to the callback that a worker has expired + yield From( + self.manager._job_callback( + KeyChange( + KeyEvent.EXPIRE, + self.mock_job_key, + json.dumps( + { + "had_heartbeat": True, + "job_queue_item": self.mock_job.job_item, + } + ), + ) + ) + ) - # Send a signal to the callback that a worker has expired - yield From(self.manager._job_callback( - KeyChange(KeyEvent.EXPIRE, - self.mock_job_key, - json.dumps({'had_heartbeat': True, - 'job_queue_item': self.mock_job.job_item})))) + # Since the realm was never registered, expiration should do nothing. + self.assertEqual(self.test_executor.stop_builder.call_count, 0) - # Since the realm was never registered, expiration should do nothing. - self.assertEqual(self.test_executor.stop_builder.call_count, 0) + @async_test + def test_expiring_worker_started(self): + test_component = yield From(self._setup_job_for_managers()) - @async_test - def test_expiring_worker_started(self): - test_component = yield From(self._setup_job_for_managers()) + # Ensure that that the building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(JOB_PREFIX, callback_keys) - # Ensure that that the building callbacks have been registered - callback_keys = [key for key in self.manager._orchestrator.callbacks] - self.assertIn(JOB_PREFIX, callback_keys) + yield From( + self.manager._job_callback( + KeyChange( + KeyEvent.EXPIRE, + self.mock_job_key, + json.dumps( + { + "had_heartbeat": True, + "job_queue_item": self.mock_job.job_item, + } + ), + ) + ) + ) - yield From(self.manager._job_callback( - KeyChange(KeyEvent.EXPIRE, - self.mock_job_key, - json.dumps({'had_heartbeat': True, - 'job_queue_item': self.mock_job.job_item})))) + self.test_executor.stop_builder.assert_called_once_with("123") + self.assertEqual(self.test_executor.stop_builder.call_count, 1) - self.test_executor.stop_builder.assert_called_once_with('123') - self.assertEqual(self.test_executor.stop_builder.call_count, 1) + @async_test + def test_buildjob_deleted(self): + test_component = yield From(self._setup_job_for_managers()) - @async_test - def test_buildjob_deleted(self): - test_component = yield From(self._setup_job_for_managers()) + # Ensure that that the building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(JOB_PREFIX, callback_keys) - # Ensure that that the building callbacks have been registered - callback_keys = [key for key in self.manager._orchestrator.callbacks] - self.assertIn(JOB_PREFIX, callback_keys) + # Send a signal to the callback that a worker has expired + yield From( + self.manager._job_callback( + KeyChange( + KeyEvent.DELETE, + self.mock_job_key, + json.dumps( + { + "had_heartbeat": False, + "job_queue_item": self.mock_job.job_item, + } + ), + ) + ) + ) - # Send a signal to the callback that a worker has expired - yield From(self.manager._job_callback( - KeyChange(KeyEvent.DELETE, - self.mock_job_key, - json.dumps({'had_heartbeat': False, - 'job_queue_item': self.mock_job.job_item})))) + self.assertEqual(self.test_executor.stop_builder.call_count, 0) + self.assertEqual(self.job_complete_callback.call_count, 0) + self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) - self.assertEqual(self.test_executor.stop_builder.call_count, 0) - self.assertEqual(self.job_complete_callback.call_count, 0) - self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + @async_test + def test_builder_never_starts(self): + test_component = yield From(self._setup_job_for_managers()) - @async_test - def test_builder_never_starts(self): - test_component = yield From(self._setup_job_for_managers()) + # Ensure that that the building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(JOB_PREFIX, callback_keys) - # Ensure that that the building callbacks have been registered - callback_keys = [key for key in self.manager._orchestrator.callbacks] - self.assertIn(JOB_PREFIX, callback_keys) + # Send a signal to the callback that a worker has expired + yield From( + self.manager._job_callback( + KeyChange( + KeyEvent.EXPIRE, + self.mock_job_key, + json.dumps( + { + "had_heartbeat": False, + "job_queue_item": self.mock_job.job_item, + } + ), + ) + ) + ) - # Send a signal to the callback that a worker has expired - yield From(self.manager._job_callback( - KeyChange(KeyEvent.EXPIRE, - self.mock_job_key, - json.dumps({'had_heartbeat': False, - 'job_queue_item': self.mock_job.job_item})))) + self.test_executor.stop_builder.assert_called_once_with("123") + self.assertEqual(self.test_executor.stop_builder.call_count, 1) - self.test_executor.stop_builder.assert_called_once_with('123') - self.assertEqual(self.test_executor.stop_builder.call_count, 1) + # Ensure the job was marked as incomplete, with an update_phase to True (so the DB record and + # logs are updated as well) + yield From( + self.job_complete_callback.assert_called_once_with( + ANY, BuildJobResult.INCOMPLETE, "MockExecutor", update_phase=True + ) + ) - # Ensure the job was marked as incomplete, with an update_phase to True (so the DB record and - # logs are updated as well) - yield From(self.job_complete_callback.assert_called_once_with(ANY, BuildJobResult.INCOMPLETE, - 'MockExecutor', - update_phase=True)) + @async_test + def test_change_worker(self): + # Send a signal to the callback that a worker key has been changed + self.manager._job_callback(KeyChange(KeyEvent.SET, self.mock_job_key, "value")) + self.assertEqual(self.test_executor.stop_builder.call_count, 0) - @async_test - def test_change_worker(self): - # Send a signal to the callback that a worker key has been changed - self.manager._job_callback(KeyChange(KeyEvent.SET, self.mock_job_key, 'value')) - self.assertEqual(self.test_executor.stop_builder.call_count, 0) + @async_test + def test_realm_expired(self): + test_component = yield From(self._setup_job_for_managers()) - @async_test - def test_realm_expired(self): - test_component = yield From(self._setup_job_for_managers()) + # Send a signal to the callback that a realm has expired + yield From( + self.manager._realm_callback( + KeyChange( + KeyEvent.EXPIRE, + self.mock_job_key, + json.dumps( + { + "realm": REALM_ID, + "execution_id": "foobar", + "executor_name": "MockExecutor", + "job_queue_item": {"body": '{"build_uuid": "fakeid"}'}, + } + ), + ) + ) + ) - # Send a signal to the callback that a realm has expired - yield From(self.manager._realm_callback(KeyChange( - KeyEvent.EXPIRE, - self.mock_job_key, - json.dumps({ - 'realm': REALM_ID, - 'execution_id': 'foobar', - 'executor_name': 'MockExecutor', - 'job_queue_item': {'body': '{"build_uuid": "fakeid"}'}, - })))) - - # Ensure that the cleanup code for the executor was called. - self.test_executor.stop_builder.assert_called_once_with('foobar') - self.assertEqual(self.test_executor.stop_builder.call_count, 1) + # Ensure that the cleanup code for the executor was called. + self.test_executor.stop_builder.assert_called_once_with("foobar") + self.assertEqual(self.test_executor.stop_builder.call_count, 1) class TestEphemeral(EphemeralBuilderTestCase): - """ Simple unit tests for the ephemeral builder around config management, starting and stopping + """ Simple unit tests for the ephemeral builder around config management, starting and stopping jobs. """ - def setUp(self): - super(TestEphemeral, self).setUp() + def setUp(self): + super(TestEphemeral, self).setUp() - unregister_component_callback = Mock() - job_heartbeat_callback = Mock() + unregister_component_callback = Mock() + job_heartbeat_callback = Mock() - @coroutine - def job_complete_callback(*args, **kwargs): - raise Return() + @coroutine + def job_complete_callback(*args, **kwargs): + raise Return() - self.manager = EphemeralBuilderManager( - self._register_component, - unregister_component_callback, - job_heartbeat_callback, - job_complete_callback, - '127.0.0.1', - 30, - ) + self.manager = EphemeralBuilderManager( + self._register_component, + unregister_component_callback, + job_heartbeat_callback, + job_complete_callback, + "127.0.0.1", + 30, + ) - def tearDown(self): - super(TestEphemeral, self).tearDown() - self.manager.shutdown() + def tearDown(self): + super(TestEphemeral, self).tearDown() + self.manager.shutdown() - def test_verify_executor_oldconfig(self): - EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor - self.manager.initialize({ - 'EXECUTOR': 'test', - 'EXECUTOR_CONFIG': dict(MINIMUM_RETRY_THRESHOLD=42), - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) + def test_verify_executor_oldconfig(self): + EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor + self.manager.initialize( + { + "EXECUTOR": "test", + "EXECUTOR_CONFIG": dict(MINIMUM_RETRY_THRESHOLD=42), + "ORCHESTRATOR": {"MEM_CONFIG": None}, + } + ) - # Ensure that we have a single test executor. - self.assertEqual(1, len(self.manager.registered_executors)) - self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold) - self.assertEqual('TestExecutor', self.manager.registered_executors[0].name) + # Ensure that we have a single test executor. + self.assertEqual(1, len(self.manager.registered_executors)) + self.assertEqual( + 42, self.manager.registered_executors[0].minimum_retry_threshold + ) + self.assertEqual("TestExecutor", self.manager.registered_executors[0].name) - def test_verify_executor_newconfig(self): - EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor - self.manager.initialize({ - 'EXECUTORS': [{ - 'EXECUTOR': 'test', - 'MINIMUM_RETRY_THRESHOLD': 42 - }], - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) + def test_verify_executor_newconfig(self): + EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor + self.manager.initialize( + { + "EXECUTORS": [{"EXECUTOR": "test", "MINIMUM_RETRY_THRESHOLD": 42}], + "ORCHESTRATOR": {"MEM_CONFIG": None}, + } + ) - # Ensure that we have a single test executor. - self.assertEqual(1, len(self.manager.registered_executors)) - self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold) + # Ensure that we have a single test executor. + self.assertEqual(1, len(self.manager.registered_executors)) + self.assertEqual( + 42, self.manager.registered_executors[0].minimum_retry_threshold + ) + + def test_multiple_executors_samename(self): + EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor + EphemeralBuilderManager.EXECUTORS["anotherexecutor"] = TestExecutor + + with self.assertRaises(Exception): + self.manager.initialize( + { + "EXECUTORS": [ + { + "NAME": "primary", + "EXECUTOR": "test", + "MINIMUM_RETRY_THRESHOLD": 42, + }, + { + "NAME": "primary", + "EXECUTOR": "anotherexecutor", + "MINIMUM_RETRY_THRESHOLD": 24, + }, + ], + "ORCHESTRATOR": {"MEM_CONFIG": None}, + } + ) + + def test_verify_multiple_executors(self): + EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor + EphemeralBuilderManager.EXECUTORS["anotherexecutor"] = TestExecutor + + self.manager.initialize( + { + "EXECUTORS": [ + { + "NAME": "primary", + "EXECUTOR": "test", + "MINIMUM_RETRY_THRESHOLD": 42, + }, + { + "NAME": "secondary", + "EXECUTOR": "anotherexecutor", + "MINIMUM_RETRY_THRESHOLD": 24, + }, + ], + "ORCHESTRATOR": {"MEM_CONFIG": None}, + } + ) + + # Ensure that we have a two test executors. + self.assertEqual(2, len(self.manager.registered_executors)) + self.assertEqual( + 42, self.manager.registered_executors[0].minimum_retry_threshold + ) + self.assertEqual( + 24, self.manager.registered_executors[1].minimum_retry_threshold + ) + + def test_skip_invalid_executor(self): + self.manager.initialize( + { + "EXECUTORS": [{"EXECUTOR": "unknown", "MINIMUM_RETRY_THRESHOLD": 42}], + "ORCHESTRATOR": {"MEM_CONFIG": None}, + } + ) + + self.assertEqual(0, len(self.manager.registered_executors)) + + @async_test + def test_schedule_job_namespace_filter(self): + EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor + self.manager.initialize( + { + "EXECUTORS": [ + {"EXECUTOR": "test", "NAMESPACE_WHITELIST": ["something"]} + ], + "ORCHESTRATOR": {"MEM_CONFIG": None}, + } + ) + + # Try with a build job in an invalid namespace. + build_job = self._create_build_job(namespace="somethingelse") + result = yield From(self.manager.schedule(build_job)) + self.assertFalse(result[0]) + + # Try with a valid namespace. + build_job = self._create_build_job(namespace="something") + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + @async_test + def test_schedule_job_retries_filter(self): + EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor + self.manager.initialize( + { + "EXECUTORS": [{"EXECUTOR": "test", "MINIMUM_RETRY_THRESHOLD": 2}], + "ORCHESTRATOR": {"MEM_CONFIG": None}, + } + ) + + # Try with a build job that has too few retries. + build_job = self._create_build_job(retries=1) + result = yield From(self.manager.schedule(build_job)) + self.assertFalse(result[0]) + + # Try with a valid job. + build_job = self._create_build_job(retries=2) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + @async_test + def test_schedule_job_executor_fallback(self): + EphemeralBuilderManager.EXECUTORS["primary"] = TestExecutor + EphemeralBuilderManager.EXECUTORS["secondary"] = TestExecutor + + self.manager.initialize( + { + "EXECUTORS": [ + { + "NAME": "primary", + "EXECUTOR": "primary", + "NAMESPACE_WHITELIST": ["something"], + "MINIMUM_RETRY_THRESHOLD": 3, + }, + { + "NAME": "secondary", + "EXECUTOR": "secondary", + "MINIMUM_RETRY_THRESHOLD": 2, + }, + ], + "ALLOWED_WORKER_COUNT": 5, + "ORCHESTRATOR": {"MEM_CONFIG": None}, + } + ) + + # Try a job not matching the primary's namespace filter. Should schedule on secondary. + build_job = self._create_build_job(namespace="somethingelse") + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + self.assertIsNone(self.manager.registered_executors[0].job_started) + self.assertIsNotNone(self.manager.registered_executors[1].job_started) + + self.manager.registered_executors[0].job_started = None + self.manager.registered_executors[1].job_started = None + + # Try a job not matching the primary's retry minimum. Should schedule on secondary. + build_job = self._create_build_job(namespace="something", retries=2) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + self.assertIsNone(self.manager.registered_executors[0].job_started) + self.assertIsNotNone(self.manager.registered_executors[1].job_started) + + self.manager.registered_executors[0].job_started = None + self.manager.registered_executors[1].job_started = None + + # Try a job matching the primary. Should schedule on the primary. + build_job = self._create_build_job(namespace="something", retries=3) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + self.assertIsNotNone(self.manager.registered_executors[0].job_started) + self.assertIsNone(self.manager.registered_executors[1].job_started) + + self.manager.registered_executors[0].job_started = None + self.manager.registered_executors[1].job_started = None + + # Try a job not matching either's restrictions. + build_job = self._create_build_job(namespace="somethingelse", retries=1) + result = yield From(self.manager.schedule(build_job)) + self.assertFalse(result[0]) + + self.assertIsNone(self.manager.registered_executors[0].job_started) + self.assertIsNone(self.manager.registered_executors[1].job_started) + + self.manager.registered_executors[0].job_started = None + self.manager.registered_executors[1].job_started = None + + @async_test + def test_schedule_job_single_executor(self): + EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor + + self.manager.initialize( + { + "EXECUTOR": "test", + "EXECUTOR_CONFIG": {}, + "ALLOWED_WORKER_COUNT": 5, + "ORCHESTRATOR": {"MEM_CONFIG": None}, + } + ) + + build_job = self._create_build_job(namespace="something", retries=3) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + self.assertIsNotNone(self.manager.registered_executors[0].job_started) + self.manager.registered_executors[0].job_started = None + + build_job = self._create_build_job(namespace="something", retries=0) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + self.assertIsNotNone(self.manager.registered_executors[0].job_started) + self.manager.registered_executors[0].job_started = None + + @async_test + def test_executor_exception(self): + EphemeralBuilderManager.EXECUTORS["bad"] = BadExecutor + + self.manager.initialize( + { + "EXECUTOR": "bad", + "EXECUTOR_CONFIG": {}, + "ORCHESTRATOR": {"MEM_CONFIG": None}, + } + ) + + build_job = self._create_build_job(namespace="something", retries=3) + result = yield From(self.manager.schedule(build_job)) + self.assertFalse(result[0]) + + @async_test + def test_schedule_and_stop(self): + EphemeralBuilderManager.EXECUTORS["test"] = TestExecutor + + self.manager.initialize( + { + "EXECUTOR": "test", + "EXECUTOR_CONFIG": {}, + "ORCHESTRATOR": {"MEM_CONFIG": None}, + } + ) + + # Start the build job. + build_job = self._create_build_job(namespace="something", retries=3) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + executor = self.manager.registered_executors[0] + self.assertIsNotNone(executor.job_started) + + # Register the realm so the build information is added. + yield From( + self.manager._register_realm( + { + "realm": str(uuid.uuid4()), + "token": str(uuid.uuid4()), + "execution_id": executor.job_started, + "executor_name": "TestExecutor", + "build_uuid": build_job.build_uuid, + "job_queue_item": build_job.job_item, + } + ) + ) + + # Stop the build job. + yield From(self.manager.kill_builder_executor(build_job.build_uuid)) + self.assertEqual(executor.job_stopped, executor.job_started) - def test_multiple_executors_samename(self): - EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor - EphemeralBuilderManager.EXECUTORS['anotherexecutor'] = TestExecutor - - with self.assertRaises(Exception): - self.manager.initialize({ - 'EXECUTORS': [ - { - 'NAME': 'primary', - 'EXECUTOR': 'test', - 'MINIMUM_RETRY_THRESHOLD': 42 - }, - { - 'NAME': 'primary', - 'EXECUTOR': 'anotherexecutor', - 'MINIMUM_RETRY_THRESHOLD': 24 - }, - ], - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) - - - def test_verify_multiple_executors(self): - EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor - EphemeralBuilderManager.EXECUTORS['anotherexecutor'] = TestExecutor - - self.manager.initialize({ - 'EXECUTORS': [ - { - 'NAME': 'primary', - 'EXECUTOR': 'test', - 'MINIMUM_RETRY_THRESHOLD': 42 - }, - { - 'NAME': 'secondary', - 'EXECUTOR': 'anotherexecutor', - 'MINIMUM_RETRY_THRESHOLD': 24 - }, - ], - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) - - # Ensure that we have a two test executors. - self.assertEqual(2, len(self.manager.registered_executors)) - self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold) - self.assertEqual(24, self.manager.registered_executors[1].minimum_retry_threshold) - - def test_skip_invalid_executor(self): - self.manager.initialize({ - 'EXECUTORS': [ - { - 'EXECUTOR': 'unknown', - 'MINIMUM_RETRY_THRESHOLD': 42 - }, - ], - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) - - self.assertEqual(0, len(self.manager.registered_executors)) - - @async_test - def test_schedule_job_namespace_filter(self): - EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor - self.manager.initialize({ - 'EXECUTORS': [{ - 'EXECUTOR': 'test', - 'NAMESPACE_WHITELIST': ['something'], - }], - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) - - # Try with a build job in an invalid namespace. - build_job = self._create_build_job(namespace='somethingelse') - result = yield From(self.manager.schedule(build_job)) - self.assertFalse(result[0]) - - # Try with a valid namespace. - build_job = self._create_build_job(namespace='something') - result = yield From(self.manager.schedule(build_job)) - self.assertTrue(result[0]) - - @async_test - def test_schedule_job_retries_filter(self): - EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor - self.manager.initialize({ - 'EXECUTORS': [{ - 'EXECUTOR': 'test', - 'MINIMUM_RETRY_THRESHOLD': 2, - }], - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) - - # Try with a build job that has too few retries. - build_job = self._create_build_job(retries=1) - result = yield From(self.manager.schedule(build_job)) - self.assertFalse(result[0]) - - # Try with a valid job. - build_job = self._create_build_job(retries=2) - result = yield From(self.manager.schedule(build_job)) - self.assertTrue(result[0]) - - @async_test - def test_schedule_job_executor_fallback(self): - EphemeralBuilderManager.EXECUTORS['primary'] = TestExecutor - EphemeralBuilderManager.EXECUTORS['secondary'] = TestExecutor - - self.manager.initialize({ - 'EXECUTORS': [ - { - 'NAME': 'primary', - 'EXECUTOR': 'primary', - 'NAMESPACE_WHITELIST': ['something'], - 'MINIMUM_RETRY_THRESHOLD': 3, - }, - { - 'NAME': 'secondary', - 'EXECUTOR': 'secondary', - 'MINIMUM_RETRY_THRESHOLD': 2, - }, - ], - 'ALLOWED_WORKER_COUNT': 5, - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) - - # Try a job not matching the primary's namespace filter. Should schedule on secondary. - build_job = self._create_build_job(namespace='somethingelse') - result = yield From(self.manager.schedule(build_job)) - self.assertTrue(result[0]) - - self.assertIsNone(self.manager.registered_executors[0].job_started) - self.assertIsNotNone(self.manager.registered_executors[1].job_started) - - self.manager.registered_executors[0].job_started = None - self.manager.registered_executors[1].job_started = None - - # Try a job not matching the primary's retry minimum. Should schedule on secondary. - build_job = self._create_build_job(namespace='something', retries=2) - result = yield From(self.manager.schedule(build_job)) - self.assertTrue(result[0]) - - self.assertIsNone(self.manager.registered_executors[0].job_started) - self.assertIsNotNone(self.manager.registered_executors[1].job_started) - - self.manager.registered_executors[0].job_started = None - self.manager.registered_executors[1].job_started = None - - # Try a job matching the primary. Should schedule on the primary. - build_job = self._create_build_job(namespace='something', retries=3) - result = yield From(self.manager.schedule(build_job)) - self.assertTrue(result[0]) - - self.assertIsNotNone(self.manager.registered_executors[0].job_started) - self.assertIsNone(self.manager.registered_executors[1].job_started) - - self.manager.registered_executors[0].job_started = None - self.manager.registered_executors[1].job_started = None - - # Try a job not matching either's restrictions. - build_job = self._create_build_job(namespace='somethingelse', retries=1) - result = yield From(self.manager.schedule(build_job)) - self.assertFalse(result[0]) - - self.assertIsNone(self.manager.registered_executors[0].job_started) - self.assertIsNone(self.manager.registered_executors[1].job_started) - - self.manager.registered_executors[0].job_started = None - self.manager.registered_executors[1].job_started = None - - - @async_test - def test_schedule_job_single_executor(self): - EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor - - self.manager.initialize({ - 'EXECUTOR': 'test', - 'EXECUTOR_CONFIG': {}, - 'ALLOWED_WORKER_COUNT': 5, - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) - - build_job = self._create_build_job(namespace='something', retries=3) - result = yield From(self.manager.schedule(build_job)) - self.assertTrue(result[0]) - - self.assertIsNotNone(self.manager.registered_executors[0].job_started) - self.manager.registered_executors[0].job_started = None - - - build_job = self._create_build_job(namespace='something', retries=0) - result = yield From(self.manager.schedule(build_job)) - self.assertTrue(result[0]) - - self.assertIsNotNone(self.manager.registered_executors[0].job_started) - self.manager.registered_executors[0].job_started = None - - @async_test - def test_executor_exception(self): - EphemeralBuilderManager.EXECUTORS['bad'] = BadExecutor - - self.manager.initialize({ - 'EXECUTOR': 'bad', - 'EXECUTOR_CONFIG': {}, - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) - - build_job = self._create_build_job(namespace='something', retries=3) - result = yield From(self.manager.schedule(build_job)) - self.assertFalse(result[0]) - - @async_test - def test_schedule_and_stop(self): - EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor - - self.manager.initialize({ - 'EXECUTOR': 'test', - 'EXECUTOR_CONFIG': {}, - 'ORCHESTRATOR': {'MEM_CONFIG': None}, - }) - - # Start the build job. - build_job = self._create_build_job(namespace='something', retries=3) - result = yield From(self.manager.schedule(build_job)) - self.assertTrue(result[0]) - - executor = self.manager.registered_executors[0] - self.assertIsNotNone(executor.job_started) - - # Register the realm so the build information is added. - yield From(self.manager._register_realm({ - 'realm': str(uuid.uuid4()), - 'token': str(uuid.uuid4()), - 'execution_id': executor.job_started, - 'executor_name': 'TestExecutor', - 'build_uuid': build_job.build_uuid, - 'job_queue_item': build_job.job_item, - })) - - # Stop the build job. - yield From(self.manager.kill_builder_executor(build_job.build_uuid)) - self.assertEqual(executor.job_stopped, executor.job_started) - - -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/buildtrigger/__init__.py b/buildtrigger/__init__.py index 8a794cf96..9c21d9025 100644 --- a/buildtrigger/__init__.py +++ b/buildtrigger/__init__.py @@ -2,4 +2,3 @@ import buildtrigger.bitbuckethandler import buildtrigger.customhandler import buildtrigger.githubhandler import buildtrigger.gitlabhandler - diff --git a/buildtrigger/basehandler.py b/buildtrigger/basehandler.py index 8d9b0f753..08bdb68ea 100644 --- a/buildtrigger/basehandler.py +++ b/buildtrigger/basehandler.py @@ -9,359 +9,360 @@ from data import model from buildtrigger.triggerutil import get_trigger_config, InvalidServiceException NAMESPACES_SCHEMA = { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'personal': { - 'type': 'boolean', - 'description': 'True if the namespace is the user\'s personal namespace', - }, - 'score': { - 'type': 'number', - 'description': 'Score of the relevance of the namespace', - }, - 'avatar_url': { - 'type': ['string', 'null'], - 'description': 'URL of the avatar for this namespace', - }, - 'url': { - 'type': 'string', - 'description': 'URL of the website to view the namespace', - }, - 'id': { - 'type': 'string', - 'description': 'Trigger-internal ID of the namespace', - }, - 'title': { - 'type': 'string', - 'description': 'Human-readable title of the namespace', - }, + "type": "array", + "items": { + "type": "object", + "properties": { + "personal": { + "type": "boolean", + "description": "True if the namespace is the user's personal namespace", + }, + "score": { + "type": "number", + "description": "Score of the relevance of the namespace", + }, + "avatar_url": { + "type": ["string", "null"], + "description": "URL of the avatar for this namespace", + }, + "url": { + "type": "string", + "description": "URL of the website to view the namespace", + }, + "id": { + "type": "string", + "description": "Trigger-internal ID of the namespace", + }, + "title": { + "type": "string", + "description": "Human-readable title of the namespace", + }, + }, + "required": ["personal", "score", "avatar_url", "id", "title"], }, - 'required': ['personal', 'score', 'avatar_url', 'id', 'title'], - }, } BUILD_SOURCES_SCHEMA = { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': { - 'type': 'string', - 'description': 'The name of the repository, without its namespace', - }, - 'full_name': { - 'type': 'string', - 'description': 'The name of the repository, with its namespace', - }, - 'description': { - 'type': 'string', - 'description': 'The description of the repository. May be an empty string', - }, - 'last_updated': { - 'type': 'number', - 'description': 'The date/time when the repository was last updated, since epoch in UTC', - }, - 'url': { - 'type': 'string', - 'description': 'The URL at which to view the repository in the browser', - }, - 'has_admin_permissions': { - 'type': 'boolean', - 'description': 'True if the current user has admin permissions on the repository', - }, - 'private': { - 'type': 'boolean', - 'description': 'True if the repository is private', - }, + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the repository, without its namespace", + }, + "full_name": { + "type": "string", + "description": "The name of the repository, with its namespace", + }, + "description": { + "type": "string", + "description": "The description of the repository. May be an empty string", + }, + "last_updated": { + "type": "number", + "description": "The date/time when the repository was last updated, since epoch in UTC", + }, + "url": { + "type": "string", + "description": "The URL at which to view the repository in the browser", + }, + "has_admin_permissions": { + "type": "boolean", + "description": "True if the current user has admin permissions on the repository", + }, + "private": { + "type": "boolean", + "description": "True if the repository is private", + }, + }, + "required": [ + "name", + "full_name", + "description", + "last_updated", + "has_admin_permissions", + "private", + ], }, - 'required': ['name', 'full_name', 'description', 'last_updated', - 'has_admin_permissions', 'private'], - }, } METADATA_SCHEMA = { - 'type': 'object', - 'properties': { - 'commit': { - 'type': 'string', - 'description': 'first 7 characters of the SHA-1 identifier for a git commit', - 'pattern': '^([A-Fa-f0-9]{7,})$', - }, - 'git_url': { - 'type': 'string', - 'description': 'The GIT url to use for the checkout', - }, - 'ref': { - 'type': 'string', - 'description': 'git reference for a git commit', - 'pattern': r'^refs\/(heads|tags|remotes)\/(.+)$', - }, - 'default_branch': { - 'type': 'string', - 'description': 'default branch of the git repository', - }, - 'commit_info': { - 'type': 'object', - 'description': 'metadata about a git commit', - 'properties': { - 'url': { - 'type': 'string', - 'description': 'URL to view a git commit', + "type": "object", + "properties": { + "commit": { + "type": "string", + "description": "first 7 characters of the SHA-1 identifier for a git commit", + "pattern": "^([A-Fa-f0-9]{7,})$", }, - 'message': { - 'type': 'string', - 'description': 'git commit message', + "git_url": { + "type": "string", + "description": "The GIT url to use for the checkout", }, - 'date': { - 'type': 'string', - 'description': 'timestamp for a git commit' + "ref": { + "type": "string", + "description": "git reference for a git commit", + "pattern": r"^refs\/(heads|tags|remotes)\/(.+)$", }, - 'author': { - 'type': 'object', - 'description': 'metadata about the author of a git commit', - 'properties': { - 'username': { - 'type': 'string', - 'description': 'username of the author', - }, - 'url': { - 'type': 'string', - 'description': 'URL to view the profile of the author', - }, - 'avatar_url': { - 'type': 'string', - 'description': 'URL to view the avatar of the author', - }, - }, - 'required': ['username'], + "default_branch": { + "type": "string", + "description": "default branch of the git repository", }, - 'committer': { - 'type': 'object', - 'description': 'metadata about the committer of a git commit', - 'properties': { - 'username': { - 'type': 'string', - 'description': 'username of the committer', + "commit_info": { + "type": "object", + "description": "metadata about a git commit", + "properties": { + "url": {"type": "string", "description": "URL to view a git commit"}, + "message": {"type": "string", "description": "git commit message"}, + "date": {"type": "string", "description": "timestamp for a git commit"}, + "author": { + "type": "object", + "description": "metadata about the author of a git commit", + "properties": { + "username": { + "type": "string", + "description": "username of the author", + }, + "url": { + "type": "string", + "description": "URL to view the profile of the author", + }, + "avatar_url": { + "type": "string", + "description": "URL to view the avatar of the author", + }, + }, + "required": ["username"], + }, + "committer": { + "type": "object", + "description": "metadata about the committer of a git commit", + "properties": { + "username": { + "type": "string", + "description": "username of the committer", + }, + "url": { + "type": "string", + "description": "URL to view the profile of the committer", + }, + "avatar_url": { + "type": "string", + "description": "URL to view the avatar of the committer", + }, + }, + "required": ["username"], + }, }, - 'url': { - 'type': 'string', - 'description': 'URL to view the profile of the committer', - }, - 'avatar_url': { - 'type': 'string', - 'description': 'URL to view the avatar of the committer', - }, - }, - 'required': ['username'], + "required": ["message"], }, - }, - 'required': ['message'], }, - }, - 'required': ['commit', 'git_url'], + "required": ["commit", "git_url"], } @add_metaclass(ABCMeta) class BuildTriggerHandler(object): - def __init__(self, trigger, override_config=None): - self.trigger = trigger - self.config = override_config or get_trigger_config(trigger) + def __init__(self, trigger, override_config=None): + self.trigger = trigger + self.config = override_config or get_trigger_config(trigger) - @property - def auth_token(self): - """ Returns the auth token for the trigger. """ - # NOTE: This check is for testing. - if isinstance(self.trigger.auth_token, str): - return self.trigger.auth_token + @property + def auth_token(self): + """ Returns the auth token for the trigger. """ + # NOTE: This check is for testing. + if isinstance(self.trigger.auth_token, str): + return self.trigger.auth_token - # TODO(remove-unenc): Remove legacy field. - if self.trigger.secure_auth_token is not None: - return self.trigger.secure_auth_token.decrypt() + # TODO(remove-unenc): Remove legacy field. + if self.trigger.secure_auth_token is not None: + return self.trigger.secure_auth_token.decrypt() - if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS): - return self.trigger.auth_token + if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS): + return self.trigger.auth_token - return None + return None - @abstractmethod - def load_dockerfile_contents(self): - """ + @abstractmethod + def load_dockerfile_contents(self): + """ Loads the Dockerfile found for the trigger's config and returns them or None if none could be found/loaded. """ - pass + pass - @abstractmethod - def list_build_source_namespaces(self): - """ + @abstractmethod + def list_build_source_namespaces(self): + """ Take the auth information for the specific trigger type and load the list of namespaces that can contain build sources. """ - pass + pass - @abstractmethod - def list_build_sources_for_namespace(self, namespace): - """ + @abstractmethod + def list_build_sources_for_namespace(self, namespace): + """ Take the auth information for the specific trigger type and load the list of repositories under the given namespace. """ - pass + pass - @abstractmethod - def list_build_subdirs(self): - """ + @abstractmethod + def list_build_subdirs(self): + """ Take the auth information and the specified config so far and list all of the possible subdirs containing dockerfiles. """ - pass + pass - @abstractmethod - def handle_trigger_request(self, request): - """ + @abstractmethod + def handle_trigger_request(self, request): + """ Transform the incoming request data into a set of actions. Returns a PreparedBuild. """ - pass + pass - @abstractmethod - def is_active(self): - """ + @abstractmethod + def is_active(self): + """ Returns True if the current build trigger is active. Inactive means further setup is needed. """ - pass + pass - @abstractmethod - def activate(self, standard_webhook_url): - """ + @abstractmethod + def activate(self, standard_webhook_url): + """ Activates the trigger for the service, with the given new configuration. Returns new public and private config that should be stored if successful. """ - pass + pass - @abstractmethod - def deactivate(self): - """ + @abstractmethod + def deactivate(self): + """ Deactivates the trigger for the service, removing any hooks installed in the remote service. Returns the new config that should be stored if this trigger is going to be re-activated. """ - pass + pass - @abstractmethod - def manual_start(self, run_parameters=None): - """ + @abstractmethod + def manual_start(self, run_parameters=None): + """ Manually creates a repository build for this trigger. Returns a PreparedBuild. """ - pass + pass - @abstractmethod - def list_field_values(self, field_name, limit=None): - """ + @abstractmethod + def list_field_values(self, field_name, limit=None): + """ Lists all values for the given custom trigger field. For example, a trigger might have a field named "branches", and this method would return all branches. """ - pass + pass - @abstractmethod - def get_repository_url(self): - """ Returns the URL of the current trigger's repository. Note that this operation + @abstractmethod + def get_repository_url(self): + """ Returns the URL of the current trigger's repository. Note that this operation can be called in a loop, so it should be as fast as possible. """ - pass + pass - @classmethod - def filename_is_dockerfile(cls, file_name): - """ Returns whether the file is named Dockerfile or follows the convention .Dockerfile""" - return file_name.endswith(".Dockerfile") or u"Dockerfile" == file_name + @classmethod + def filename_is_dockerfile(cls, file_name): + """ Returns whether the file is named Dockerfile or follows the convention .Dockerfile""" + return file_name.endswith(".Dockerfile") or u"Dockerfile" == file_name - @classmethod - def service_name(cls): - """ + @classmethod + def service_name(cls): + """ Particular service implemented by subclasses. """ - raise NotImplementedError + raise NotImplementedError - @classmethod - def get_handler(cls, trigger, override_config=None): - for subc in cls.__subclasses__(): - if subc.service_name() == trigger.service.name: - return subc(trigger, override_config) + @classmethod + def get_handler(cls, trigger, override_config=None): + for subc in cls.__subclasses__(): + if subc.service_name() == trigger.service.name: + return subc(trigger, override_config) - raise InvalidServiceException('Unable to find service: %s' % trigger.service.name) + raise InvalidServiceException( + "Unable to find service: %s" % trigger.service.name + ) - def put_config_key(self, key, value): - """ Updates a config key in the trigger, saving it to the DB. """ - self.config[key] = value - model.build.update_build_trigger(self.trigger, self.config) + def put_config_key(self, key, value): + """ Updates a config key in the trigger, saving it to the DB. """ + self.config[key] = value + model.build.update_build_trigger(self.trigger, self.config) - def set_auth_token(self, auth_token): - """ Sets the auth token for the trigger, saving it to the DB. """ - model.build.update_build_trigger(self.trigger, self.config, auth_token=auth_token) + def set_auth_token(self, auth_token): + """ Sets the auth token for the trigger, saving it to the DB. """ + model.build.update_build_trigger( + self.trigger, self.config, auth_token=auth_token + ) - def get_dockerfile_path(self): - """ Returns the normalized path to the Dockerfile found in the subdirectory + def get_dockerfile_path(self): + """ Returns the normalized path to the Dockerfile found in the subdirectory in the config. """ - dockerfile_path = self.config.get('dockerfile_path') or 'Dockerfile' - if dockerfile_path[0] == '/': - dockerfile_path = dockerfile_path[1:] - return dockerfile_path + dockerfile_path = self.config.get("dockerfile_path") or "Dockerfile" + if dockerfile_path[0] == "/": + dockerfile_path = dockerfile_path[1:] + return dockerfile_path - def prepare_build(self, metadata, is_manual=False): - # Ensure that the metadata meets the scheme. - validate(metadata, METADATA_SCHEMA) + def prepare_build(self, metadata, is_manual=False): + # Ensure that the metadata meets the scheme. + validate(metadata, METADATA_SCHEMA) - config = self.config - ref = metadata.get('ref', None) - commit_sha = metadata['commit'] - default_branch = metadata.get('default_branch', None) - prepared = PreparedBuild(self.trigger) - prepared.name_from_sha(commit_sha) - prepared.subdirectory = config.get('dockerfile_path', None) - prepared.context = config.get('context', None) - prepared.is_manual = is_manual - prepared.metadata = metadata + config = self.config + ref = metadata.get("ref", None) + commit_sha = metadata["commit"] + default_branch = metadata.get("default_branch", None) + prepared = PreparedBuild(self.trigger) + prepared.name_from_sha(commit_sha) + prepared.subdirectory = config.get("dockerfile_path", None) + prepared.context = config.get("context", None) + prepared.is_manual = is_manual + prepared.metadata = metadata - if ref is not None: - prepared.tags_from_ref(ref, default_branch) - else: - prepared.tags = [commit_sha[:7]] + if ref is not None: + prepared.tags_from_ref(ref, default_branch) + else: + prepared.tags = [commit_sha[:7]] - return prepared + return prepared - @classmethod - def build_sources_response(cls, sources): - validate(sources, BUILD_SOURCES_SCHEMA) - return sources + @classmethod + def build_sources_response(cls, sources): + validate(sources, BUILD_SOURCES_SCHEMA) + return sources - @classmethod - def build_namespaces_response(cls, namespaces_dict): - namespaces = list(namespaces_dict.values()) - validate(namespaces, NAMESPACES_SCHEMA) - return namespaces + @classmethod + def build_namespaces_response(cls, namespaces_dict): + namespaces = list(namespaces_dict.values()) + validate(namespaces, NAMESPACES_SCHEMA) + return namespaces - @classmethod - def get_parent_directory_mappings(cls, dockerfile_path, current_paths=None): - """ Returns a map of dockerfile_paths to it's possible contexts. """ - if dockerfile_path == "": - return {} + @classmethod + def get_parent_directory_mappings(cls, dockerfile_path, current_paths=None): + """ Returns a map of dockerfile_paths to it's possible contexts. """ + if dockerfile_path == "": + return {} - if dockerfile_path[0] != os.path.sep: - dockerfile_path = os.path.sep + dockerfile_path + if dockerfile_path[0] != os.path.sep: + dockerfile_path = os.path.sep + dockerfile_path - dockerfile_path = os.path.normpath(dockerfile_path) - all_paths = set() - path, _ = os.path.split(dockerfile_path) - if path == "": - path = os.path.sep + dockerfile_path = os.path.normpath(dockerfile_path) + all_paths = set() + path, _ = os.path.split(dockerfile_path) + if path == "": + path = os.path.sep - all_paths.add(path) - for i in range(1, len(path.split(os.path.sep))): - path, _ = os.path.split(path) - all_paths.add(path) + all_paths.add(path) + for i in range(1, len(path.split(os.path.sep))): + path, _ = os.path.split(path) + all_paths.add(path) - if current_paths: - return dict({dockerfile_path: list(all_paths)}, **current_paths) + if current_paths: + return dict({dockerfile_path: list(all_paths)}, **current_paths) - return {dockerfile_path: list(all_paths)} + return {dockerfile_path: list(all_paths)} diff --git a/buildtrigger/bitbuckethandler.py b/buildtrigger/bitbuckethandler.py index 9573f5c60..c74a06c0d 100644 --- a/buildtrigger/bitbuckethandler.py +++ b/buildtrigger/bitbuckethandler.py @@ -9,537 +9,551 @@ from jsonschema import validate from app import app, get_app_url from buildtrigger.basehandler import BuildTriggerHandler -from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException, - TriggerDeactivationException, TriggerStartException, - InvalidPayloadException, TriggerProviderException, - SkipRequestException, - determine_build_ref, raise_if_skipped_build, - find_matching_branches) +from buildtrigger.triggerutil import ( + RepositoryReadException, + TriggerActivationException, + TriggerDeactivationException, + TriggerStartException, + InvalidPayloadException, + TriggerProviderException, + SkipRequestException, + determine_build_ref, + raise_if_skipped_build, + find_matching_branches, +) from util.dict_wrappers import JSONPathDict, SafeDictSetter from util.security.ssh import generate_ssh_keypair logger = logging.getLogger(__name__) -_BITBUCKET_COMMIT_URL = 'https://bitbucket.org/%s/commits/%s' -_RAW_AUTHOR_REGEX = re.compile(r'.*<(.+)>') +_BITBUCKET_COMMIT_URL = "https://bitbucket.org/%s/commits/%s" +_RAW_AUTHOR_REGEX = re.compile(r".*<(.+)>") BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = { - 'type': 'object', - 'properties': { - 'repository': { - 'type': 'object', - 'properties': { - 'full_name': { - 'type': 'string', - }, - }, - 'required': ['full_name'], - }, # /Repository - 'push': { - 'type': 'object', - 'properties': { - 'changes': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'new': { - 'type': 'object', - 'properties': { - 'target': { - 'type': 'object', - 'properties': { - 'hash': { - 'type': 'string' - }, - 'message': { - 'type': 'string' - }, - 'date': { - 'type': 'string' - }, - 'author': { - 'type': 'object', - 'properties': { - 'user': { - 'type': 'object', - 'properties': { - 'display_name': { - 'type': 'string', - }, - 'account_id': { - 'type': 'string', - }, - 'links': { - 'type': 'object', - 'properties': { - 'avatar': { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - }, - }, - 'required': ['href'], - }, + "type": "object", + "properties": { + "repository": { + "type": "object", + "properties": {"full_name": {"type": "string"}}, + "required": ["full_name"], + }, # /Repository + "push": { + "type": "object", + "properties": { + "changes": { + "type": "array", + "items": { + "type": "object", + "properties": { + "new": { + "type": "object", + "properties": { + "target": { + "type": "object", + "properties": { + "hash": {"type": "string"}, + "message": {"type": "string"}, + "date": {"type": "string"}, + "author": { + "type": "object", + "properties": { + "user": { + "type": "object", + "properties": { + "display_name": { + "type": "string" + }, + "account_id": { + "type": "string" + }, + "links": { + "type": "object", + "properties": { + "avatar": { + "type": "object", + "properties": { + "href": { + "type": "string" + } + }, + "required": [ + "href" + ], + } + }, + "required": ["avatar"], + }, # /User + }, + } # /Author + }, + }, + }, + "required": ["hash", "message", "date"], + } # /Target }, - 'required': ['avatar'], - }, # /User - }, - }, # /Author + "required": ["name", "target"], + } # /New }, - }, - }, - 'required': ['hash', 'message', 'date'], - }, # /Target - }, - 'required': ['name', 'target'], - }, # /New + }, # /Changes item + } # /Changes }, - }, # /Changes item - }, # /Changes - }, - 'required': ['changes'], - }, # / Push - }, - 'actor': { - 'type': 'object', - 'properties': { - 'account_id': { - 'type': 'string', - }, - 'display_name': { - 'type': 'string', - }, - 'links': { - 'type': 'object', - 'properties': { - 'avatar': { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - }, - }, - 'required': ['href'], - }, - }, - 'required': ['avatar'], - }, + "required": ["changes"], + }, # / Push }, - }, # /Actor - 'required': ['push', 'repository'], -} # /Root + "actor": { + "type": "object", + "properties": { + "account_id": {"type": "string"}, + "display_name": {"type": "string"}, + "links": { + "type": "object", + "properties": { + "avatar": { + "type": "object", + "properties": {"href": {"type": "string"}}, + "required": ["href"], + } + }, + "required": ["avatar"], + }, + }, + }, # /Actor + "required": ["push", "repository"], +} # /Root BITBUCKET_COMMIT_INFO_SCHEMA = { - 'type': 'object', - 'properties': { - 'node': { - 'type': 'string', + "type": "object", + "properties": { + "node": {"type": "string"}, + "message": {"type": "string"}, + "timestamp": {"type": "string"}, + "raw_author": {"type": "string"}, }, - 'message': { - 'type': 'string', - }, - 'timestamp': { - 'type': 'string', - }, - 'raw_author': { - 'type': 'string', - }, - }, - 'required': ['node', 'message', 'timestamp'] + "required": ["node", "message", "timestamp"], } -def get_transformed_commit_info(bb_commit, ref, default_branch, repository_name, lookup_author): - """ Returns the BitBucket commit information transformed into our own + +def get_transformed_commit_info( + bb_commit, ref, default_branch, repository_name, lookup_author +): + """ Returns the BitBucket commit information transformed into our own payload format. """ - try: - validate(bb_commit, BITBUCKET_COMMIT_INFO_SCHEMA) - except Exception as exc: - logger.exception('Exception when validating Bitbucket commit information: %s from %s', exc.message, bb_commit) - raise InvalidPayloadException(exc.message) + try: + validate(bb_commit, BITBUCKET_COMMIT_INFO_SCHEMA) + except Exception as exc: + logger.exception( + "Exception when validating Bitbucket commit information: %s from %s", + exc.message, + bb_commit, + ) + raise InvalidPayloadException(exc.message) - commit = JSONPathDict(bb_commit) + commit = JSONPathDict(bb_commit) - config = SafeDictSetter() - config['commit'] = commit['node'] - config['ref'] = ref - config['default_branch'] = default_branch - config['git_url'] = 'git@bitbucket.org:%s.git' % repository_name + config = SafeDictSetter() + config["commit"] = commit["node"] + config["ref"] = ref + config["default_branch"] = default_branch + config["git_url"] = "git@bitbucket.org:%s.git" % repository_name - config['commit_info.url'] = _BITBUCKET_COMMIT_URL % (repository_name, commit['node']) - config['commit_info.message'] = commit['message'] - config['commit_info.date'] = commit['timestamp'] + config["commit_info.url"] = _BITBUCKET_COMMIT_URL % ( + repository_name, + commit["node"], + ) + config["commit_info.message"] = commit["message"] + config["commit_info.date"] = commit["timestamp"] - match = _RAW_AUTHOR_REGEX.match(commit['raw_author']) - if match: - author = lookup_author(match.group(1)) - author_info = JSONPathDict(author) if author is not None else None - if author_info: - config['commit_info.author.username'] = author_info['user.display_name'] - config['commit_info.author.avatar_url'] = author_info['user.avatar'] + match = _RAW_AUTHOR_REGEX.match(commit["raw_author"]) + if match: + author = lookup_author(match.group(1)) + author_info = JSONPathDict(author) if author is not None else None + if author_info: + config["commit_info.author.username"] = author_info["user.display_name"] + config["commit_info.author.avatar_url"] = author_info["user.avatar"] - return config.dict_value() + return config.dict_value() def get_transformed_webhook_payload(bb_payload, default_branch=None): - """ Returns the BitBucket webhook JSON payload transformed into our own payload + """ Returns the BitBucket webhook JSON payload transformed into our own payload format. If the bb_payload is not valid, returns None. """ - try: - validate(bb_payload, BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA) - except Exception as exc: - logger.exception('Exception when validating Bitbucket webhook payload: %s from %s', exc.message, - bb_payload) - raise InvalidPayloadException(exc.message) + try: + validate(bb_payload, BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA) + except Exception as exc: + logger.exception( + "Exception when validating Bitbucket webhook payload: %s from %s", + exc.message, + bb_payload, + ) + raise InvalidPayloadException(exc.message) - payload = JSONPathDict(bb_payload) - change = payload['push.changes[-1].new'] - if not change: - raise SkipRequestException + payload = JSONPathDict(bb_payload) + change = payload["push.changes[-1].new"] + if not change: + raise SkipRequestException - is_branch = change['type'] == 'branch' - ref = 'refs/heads/' + change['name'] if is_branch else 'refs/tags/' + change['name'] + is_branch = change["type"] == "branch" + ref = "refs/heads/" + change["name"] if is_branch else "refs/tags/" + change["name"] - repository_name = payload['repository.full_name'] - target = change['target'] + repository_name = payload["repository.full_name"] + target = change["target"] - config = SafeDictSetter() - config['commit'] = target['hash'] - config['ref'] = ref - config['default_branch'] = default_branch - config['git_url'] = 'git@bitbucket.org:%s.git' % repository_name + config = SafeDictSetter() + config["commit"] = target["hash"] + config["ref"] = ref + config["default_branch"] = default_branch + config["git_url"] = "git@bitbucket.org:%s.git" % repository_name - config['commit_info.url'] = target['links.html.href'] or '' - config['commit_info.message'] = target['message'] - config['commit_info.date'] = target['date'] + config["commit_info.url"] = target["links.html.href"] or "" + config["commit_info.message"] = target["message"] + config["commit_info.date"] = target["date"] - config['commit_info.author.username'] = target['author.user.display_name'] - config['commit_info.author.avatar_url'] = target['author.user.links.avatar.href'] + config["commit_info.author.username"] = target["author.user.display_name"] + config["commit_info.author.avatar_url"] = target["author.user.links.avatar.href"] - config['commit_info.committer.username'] = payload['actor.display_name'] - config['commit_info.committer.avatar_url'] = payload['actor.links.avatar.href'] - return config.dict_value() + config["commit_info.committer.username"] = payload["actor.display_name"] + config["commit_info.committer.avatar_url"] = payload["actor.links.avatar.href"] + return config.dict_value() class BitbucketBuildTrigger(BuildTriggerHandler): - """ + """ BuildTrigger for Bitbucket. """ - @classmethod - def service_name(cls): - return 'bitbucket' - def _get_client(self): - """ Returns a BitBucket API client for this trigger's config. """ - key = app.config.get('BITBUCKET_TRIGGER_CONFIG', {}).get('CONSUMER_KEY', '') - secret = app.config.get('BITBUCKET_TRIGGER_CONFIG', {}).get('CONSUMER_SECRET', '') + @classmethod + def service_name(cls): + return "bitbucket" - trigger_uuid = self.trigger.uuid - callback_url = '%s/oauth1/bitbucket/callback/trigger/%s' % (get_app_url(), trigger_uuid) + def _get_client(self): + """ Returns a BitBucket API client for this trigger's config. """ + key = app.config.get("BITBUCKET_TRIGGER_CONFIG", {}).get("CONSUMER_KEY", "") + secret = app.config.get("BITBUCKET_TRIGGER_CONFIG", {}).get( + "CONSUMER_SECRET", "" + ) - return BitBucket(key, secret, callback_url, timeout=15) + trigger_uuid = self.trigger.uuid + callback_url = "%s/oauth1/bitbucket/callback/trigger/%s" % ( + get_app_url(), + trigger_uuid, + ) - def _get_authorized_client(self): - """ Returns an authorized API client. """ - base_client = self._get_client() - auth_token = self.auth_token or 'invalid:invalid' - token_parts = auth_token.split(':') - if len(token_parts) != 2: - token_parts = ['invalid', 'invalid'] + return BitBucket(key, secret, callback_url, timeout=15) - (access_token, access_token_secret) = token_parts - return base_client.get_authorized_client(access_token, access_token_secret) + def _get_authorized_client(self): + """ Returns an authorized API client. """ + base_client = self._get_client() + auth_token = self.auth_token or "invalid:invalid" + token_parts = auth_token.split(":") + if len(token_parts) != 2: + token_parts = ["invalid", "invalid"] - def _get_repository_client(self): - """ Returns an API client for working with this config's BB repository. """ - source = self.config['build_source'] - (namespace, name) = source.split('/') - bitbucket_client = self._get_authorized_client() - return bitbucket_client.for_namespace(namespace).repositories().get(name) + (access_token, access_token_secret) = token_parts + return base_client.get_authorized_client(access_token, access_token_secret) - def _get_default_branch(self, repository, default_value='master'): - """ Returns the default branch for the repository or the value given. """ - (result, data, _) = repository.get_main_branch() - if result: - return data['name'] + def _get_repository_client(self): + """ Returns an API client for working with this config's BB repository. """ + source = self.config["build_source"] + (namespace, name) = source.split("/") + bitbucket_client = self._get_authorized_client() + return bitbucket_client.for_namespace(namespace).repositories().get(name) - return default_value + def _get_default_branch(self, repository, default_value="master"): + """ Returns the default branch for the repository or the value given. """ + (result, data, _) = repository.get_main_branch() + if result: + return data["name"] - def get_oauth_url(self): - """ Returns the OAuth URL to authorize Bitbucket. """ - bitbucket_client = self._get_client() - (result, data, err_msg) = bitbucket_client.get_authorization_url() - if not result: - raise TriggerProviderException(err_msg) + return default_value - return data + def get_oauth_url(self): + """ Returns the OAuth URL to authorize Bitbucket. """ + bitbucket_client = self._get_client() + (result, data, err_msg) = bitbucket_client.get_authorization_url() + if not result: + raise TriggerProviderException(err_msg) - def exchange_verifier(self, verifier): - """ Exchanges the given verifier token to setup this trigger. """ - bitbucket_client = self._get_client() - access_token = self.config.get('access_token', '') - access_token_secret = self.auth_token + return data - # Exchange the verifier for a new access token. - (result, data, _) = bitbucket_client.verify_token(access_token, access_token_secret, verifier) - if not result: - return False + def exchange_verifier(self, verifier): + """ Exchanges the given verifier token to setup this trigger. """ + bitbucket_client = self._get_client() + access_token = self.config.get("access_token", "") + access_token_secret = self.auth_token - # Save the updated access token and secret. - self.set_auth_token(data[0] + ':' + data[1]) + # Exchange the verifier for a new access token. + (result, data, _) = bitbucket_client.verify_token( + access_token, access_token_secret, verifier + ) + if not result: + return False - # Retrieve the current authorized user's information and store the username in the config. - authorized_client = self._get_authorized_client() - (result, data, _) = authorized_client.get_current_user() - if not result: - return False + # Save the updated access token and secret. + self.set_auth_token(data[0] + ":" + data[1]) - self.put_config_key('account_id', data['user']['account_id']) - self.put_config_key('nickname', data['user']['nickname']) - return True + # Retrieve the current authorized user's information and store the username in the config. + authorized_client = self._get_authorized_client() + (result, data, _) = authorized_client.get_current_user() + if not result: + return False - def is_active(self): - return 'webhook_id' in self.config + self.put_config_key("account_id", data["user"]["account_id"]) + self.put_config_key("nickname", data["user"]["nickname"]) + return True - def activate(self, standard_webhook_url): - config = self.config + def is_active(self): + return "webhook_id" in self.config - # Add a deploy key to the repository. - public_key, private_key = generate_ssh_keypair() - config['credentials'] = [ - { - 'name': 'SSH Public Key', - 'value': public_key, - }, - ] + def activate(self, standard_webhook_url): + config = self.config - repository = self._get_repository_client() - (result, created_deploykey, err_msg) = repository.deploykeys().create( - app.config['REGISTRY_TITLE'] + ' webhook key', public_key) + # Add a deploy key to the repository. + public_key, private_key = generate_ssh_keypair() + config["credentials"] = [{"name": "SSH Public Key", "value": public_key}] - if not result: - msg = 'Unable to add deploy key to repository: %s' % err_msg - raise TriggerActivationException(msg) + repository = self._get_repository_client() + (result, created_deploykey, err_msg) = repository.deploykeys().create( + app.config["REGISTRY_TITLE"] + " webhook key", public_key + ) - config['deploy_key_id'] = created_deploykey['pk'] + if not result: + msg = "Unable to add deploy key to repository: %s" % err_msg + raise TriggerActivationException(msg) - # Add a webhook callback. - description = 'Webhook for invoking builds on %s' % app.config['REGISTRY_TITLE_SHORT'] - webhook_events = ['repo:push'] - (result, created_webhook, err_msg) = repository.webhooks().create( - description, standard_webhook_url, webhook_events) + config["deploy_key_id"] = created_deploykey["pk"] - if not result: - msg = 'Unable to add webhook to repository: %s' % err_msg - raise TriggerActivationException(msg) + # Add a webhook callback. + description = ( + "Webhook for invoking builds on %s" % app.config["REGISTRY_TITLE_SHORT"] + ) + webhook_events = ["repo:push"] + (result, created_webhook, err_msg) = repository.webhooks().create( + description, standard_webhook_url, webhook_events + ) - config['webhook_id'] = created_webhook['uuid'] - self.config = config - return config, {'private_key': private_key} + if not result: + msg = "Unable to add webhook to repository: %s" % err_msg + raise TriggerActivationException(msg) - def deactivate(self): - config = self.config + config["webhook_id"] = created_webhook["uuid"] + self.config = config + return config, {"private_key": private_key} - webhook_id = config.pop('webhook_id', None) - deploy_key_id = config.pop('deploy_key_id', None) - repository = self._get_repository_client() + def deactivate(self): + config = self.config - # Remove the webhook. - if webhook_id is not None: - (result, _, err_msg) = repository.webhooks().delete(webhook_id) - if not result: - msg = 'Unable to remove webhook from repository: %s' % err_msg - raise TriggerDeactivationException(msg) + webhook_id = config.pop("webhook_id", None) + deploy_key_id = config.pop("deploy_key_id", None) + repository = self._get_repository_client() - # Remove the public key. - if deploy_key_id is not None: - (result, _, err_msg) = repository.deploykeys().delete(deploy_key_id) - if not result: - msg = 'Unable to remove deploy key from repository: %s' % err_msg - raise TriggerDeactivationException(msg) + # Remove the webhook. + if webhook_id is not None: + (result, _, err_msg) = repository.webhooks().delete(webhook_id) + if not result: + msg = "Unable to remove webhook from repository: %s" % err_msg + raise TriggerDeactivationException(msg) - return config + # Remove the public key. + if deploy_key_id is not None: + (result, _, err_msg) = repository.deploykeys().delete(deploy_key_id) + if not result: + msg = "Unable to remove deploy key from repository: %s" % err_msg + raise TriggerDeactivationException(msg) - def list_build_source_namespaces(self): - bitbucket_client = self._get_authorized_client() - (result, data, err_msg) = bitbucket_client.get_visible_repositories() - if not result: - raise RepositoryReadException('Could not read repository list: ' + err_msg) + return config - namespaces = {} - for repo in data: - owner = repo['owner'] + def list_build_source_namespaces(self): + bitbucket_client = self._get_authorized_client() + (result, data, err_msg) = bitbucket_client.get_visible_repositories() + if not result: + raise RepositoryReadException("Could not read repository list: " + err_msg) - if owner in namespaces: - namespaces[owner]['score'] = namespaces[owner]['score'] + 1 - else: - namespaces[owner] = { - 'personal': owner == self.config.get('nickname', self.config.get('username')), - 'id': owner, - 'title': owner, - 'avatar_url': repo['logo'], - 'url': 'https://bitbucket.org/%s' % (owner), - 'score': 1, - } + namespaces = {} + for repo in data: + owner = repo["owner"] - return BuildTriggerHandler.build_namespaces_response(namespaces) + if owner in namespaces: + namespaces[owner]["score"] = namespaces[owner]["score"] + 1 + else: + namespaces[owner] = { + "personal": owner + == self.config.get("nickname", self.config.get("username")), + "id": owner, + "title": owner, + "avatar_url": repo["logo"], + "url": "https://bitbucket.org/%s" % (owner), + "score": 1, + } - def list_build_sources_for_namespace(self, namespace): - def repo_view(repo): - last_modified = dateutil.parser.parse(repo['utc_last_updated']) + return BuildTriggerHandler.build_namespaces_response(namespaces) - return { - 'name': repo['slug'], - 'full_name': '%s/%s' % (repo['owner'], repo['slug']), - 'description': repo['description'] or '', - 'last_updated': timegm(last_modified.utctimetuple()), - 'url': 'https://bitbucket.org/%s/%s' % (repo['owner'], repo['slug']), - 'has_admin_permissions': repo['read_only'] is False, - 'private': repo['is_private'], - } + def list_build_sources_for_namespace(self, namespace): + def repo_view(repo): + last_modified = dateutil.parser.parse(repo["utc_last_updated"]) - bitbucket_client = self._get_authorized_client() - (result, data, err_msg) = bitbucket_client.get_visible_repositories() - if not result: - raise RepositoryReadException('Could not read repository list: ' + err_msg) + return { + "name": repo["slug"], + "full_name": "%s/%s" % (repo["owner"], repo["slug"]), + "description": repo["description"] or "", + "last_updated": timegm(last_modified.utctimetuple()), + "url": "https://bitbucket.org/%s/%s" % (repo["owner"], repo["slug"]), + "has_admin_permissions": repo["read_only"] is False, + "private": repo["is_private"], + } - repos = [repo_view(repo) for repo in data if repo['owner'] == namespace] - return BuildTriggerHandler.build_sources_response(repos) + bitbucket_client = self._get_authorized_client() + (result, data, err_msg) = bitbucket_client.get_visible_repositories() + if not result: + raise RepositoryReadException("Could not read repository list: " + err_msg) - def list_build_subdirs(self): - config = self.config - repository = self._get_repository_client() + repos = [repo_view(repo) for repo in data if repo["owner"] == namespace] + return BuildTriggerHandler.build_sources_response(repos) - # Find the first matching branch. - repo_branches = self.list_field_values('branch_name') or [] - branches = find_matching_branches(config, repo_branches) - if not branches: - branches = [self._get_default_branch(repository)] + def list_build_subdirs(self): + config = self.config + repository = self._get_repository_client() - (result, data, err_msg) = repository.get_path_contents('', revision=branches[0]) - if not result: - raise RepositoryReadException(err_msg) + # Find the first matching branch. + repo_branches = self.list_field_values("branch_name") or [] + branches = find_matching_branches(config, repo_branches) + if not branches: + branches = [self._get_default_branch(repository)] - files = set([f['path'] for f in data['files']]) - return ["/" + file_path for file_path in files if self.filename_is_dockerfile(os.path.basename(file_path))] + (result, data, err_msg) = repository.get_path_contents("", revision=branches[0]) + if not result: + raise RepositoryReadException(err_msg) - def load_dockerfile_contents(self): - repository = self._get_repository_client() - path = self.get_dockerfile_path() + files = set([f["path"] for f in data["files"]]) + return [ + "/" + file_path + for file_path in files + if self.filename_is_dockerfile(os.path.basename(file_path)) + ] - (result, data, err_msg) = repository.get_raw_path_contents(path, revision='master') - if not result: - return None + def load_dockerfile_contents(self): + repository = self._get_repository_client() + path = self.get_dockerfile_path() - return data + (result, data, err_msg) = repository.get_raw_path_contents( + path, revision="master" + ) + if not result: + return None - def list_field_values(self, field_name, limit=None): - if 'build_source' not in self.config: - return None + return data - source = self.config['build_source'] - (namespace, name) = source.split('/') + def list_field_values(self, field_name, limit=None): + if "build_source" not in self.config: + return None - bitbucket_client = self._get_authorized_client() - repository = bitbucket_client.for_namespace(namespace).repositories().get(name) + source = self.config["build_source"] + (namespace, name) = source.split("/") + + bitbucket_client = self._get_authorized_client() + repository = bitbucket_client.for_namespace(namespace).repositories().get(name) + + if field_name == "refs": + (result, data, _) = repository.get_branches_and_tags() + if not result: + return None + + branches = [b["name"] for b in data["branches"]] + tags = [t["name"] for t in data["tags"]] + + return [{"kind": "branch", "name": b} for b in branches] + [ + {"kind": "tag", "name": tag} for tag in tags + ] + + if field_name == "tag_name": + (result, data, _) = repository.get_tags() + if not result: + return None + + tags = list(data.keys()) + if limit: + tags = tags[0:limit] + + return tags + + if field_name == "branch_name": + (result, data, _) = repository.get_branches() + if not result: + return None + + branches = list(data.keys()) + if limit: + branches = branches[0:limit] + + return branches - if field_name == 'refs': - (result, data, _) = repository.get_branches_and_tags() - if not result: return None - branches = [b['name'] for b in data['branches']] - tags = [t['name'] for t in data['tags']] + def get_repository_url(self): + source = self.config["build_source"] + (namespace, name) = source.split("/") + return "https://bitbucket.org/%s/%s" % (namespace, name) - return ([{'kind': 'branch', 'name': b} for b in branches] + - [{'kind': 'tag', 'name': tag} for tag in tags]) + def handle_trigger_request(self, request): + payload = request.get_json() + if payload is None: + raise InvalidPayloadException("Missing payload") - if field_name == 'tag_name': - (result, data, _) = repository.get_tags() - if not result: - return None + logger.debug("Got BitBucket request: %s", payload) - tags = list(data.keys()) - if limit: - tags = tags[0:limit] + repository = self._get_repository_client() + default_branch = self._get_default_branch(repository) - return tags + metadata = get_transformed_webhook_payload( + payload, default_branch=default_branch + ) + prepared = self.prepare_build(metadata) - if field_name == 'branch_name': - (result, data, _) = repository.get_branches() - if not result: - return None + # Check if we should skip this build. + raise_if_skipped_build(prepared, self.config) + return prepared - branches = list(data.keys()) - if limit: - branches = branches[0:limit] + def manual_start(self, run_parameters=None): + run_parameters = run_parameters or {} + repository = self._get_repository_client() + bitbucket_client = self._get_authorized_client() - return branches + def get_branch_sha(branch_name): + # Lookup the commit SHA for the branch. + (result, data, _) = repository.get_branch(branch_name) + if not result: + raise TriggerStartException("Could not find branch in repository") - return None + return data["target"]["hash"] - def get_repository_url(self): - source = self.config['build_source'] - (namespace, name) = source.split('/') - return 'https://bitbucket.org/%s/%s' % (namespace, name) + def get_tag_sha(tag_name): + # Lookup the commit SHA for the tag. + (result, data, _) = repository.get_tag(tag_name) + if not result: + raise TriggerStartException("Could not find tag in repository") - def handle_trigger_request(self, request): - payload = request.get_json() - if payload is None: - raise InvalidPayloadException('Missing payload') + return data["target"]["hash"] - logger.debug('Got BitBucket request: %s', payload) + def lookup_author(email_address): + (result, data, _) = bitbucket_client.accounts().get_profile(email_address) + return data if result else None - repository = self._get_repository_client() - default_branch = self._get_default_branch(repository) + # Find the branch or tag to build. + default_branch = self._get_default_branch(repository) + (commit_sha, ref) = determine_build_ref( + run_parameters, get_branch_sha, get_tag_sha, default_branch + ) - metadata = get_transformed_webhook_payload(payload, default_branch=default_branch) - prepared = self.prepare_build(metadata) + # Lookup the commit SHA in BitBucket. + (result, commit_info, _) = repository.changesets().get(commit_sha) + if not result: + raise TriggerStartException("Could not lookup commit SHA") - # Check if we should skip this build. - raise_if_skipped_build(prepared, self.config) - return prepared + # Return a prepared build for the commit. + repository_name = "%s/%s" % (repository.namespace, repository.repository_name) + metadata = get_transformed_commit_info( + commit_info, ref, default_branch, repository_name, lookup_author + ) - def manual_start(self, run_parameters=None): - run_parameters = run_parameters or {} - repository = self._get_repository_client() - bitbucket_client = self._get_authorized_client() - - def get_branch_sha(branch_name): - # Lookup the commit SHA for the branch. - (result, data, _) = repository.get_branch(branch_name) - if not result: - raise TriggerStartException('Could not find branch in repository') - - return data['target']['hash'] - - def get_tag_sha(tag_name): - # Lookup the commit SHA for the tag. - (result, data, _) = repository.get_tag(tag_name) - if not result: - raise TriggerStartException('Could not find tag in repository') - - return data['target']['hash'] - - def lookup_author(email_address): - (result, data, _) = bitbucket_client.accounts().get_profile(email_address) - return data if result else None - - # Find the branch or tag to build. - default_branch = self._get_default_branch(repository) - (commit_sha, ref) = determine_build_ref(run_parameters, get_branch_sha, get_tag_sha, - default_branch) - - # Lookup the commit SHA in BitBucket. - (result, commit_info, _) = repository.changesets().get(commit_sha) - if not result: - raise TriggerStartException('Could not lookup commit SHA') - - # Return a prepared build for the commit. - repository_name = '%s/%s' % (repository.namespace, repository.repository_name) - metadata = get_transformed_commit_info(commit_info, ref, default_branch, - repository_name, lookup_author) - - return self.prepare_build(metadata, is_manual=True) + return self.prepare_build(metadata, is_manual=True) diff --git a/buildtrigger/customhandler.py b/buildtrigger/customhandler.py index 193445ee2..6ed6e08c7 100644 --- a/buildtrigger/customhandler.py +++ b/buildtrigger/customhandler.py @@ -2,22 +2,33 @@ import logging import json from jsonschema import validate, ValidationError -from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException, - TriggerStartException, ValidationRequestException, - InvalidPayloadException, - SkipRequestException, raise_if_skipped_build, - find_matching_branches) +from buildtrigger.triggerutil import ( + RepositoryReadException, + TriggerActivationException, + TriggerStartException, + ValidationRequestException, + InvalidPayloadException, + SkipRequestException, + raise_if_skipped_build, + find_matching_branches, +) from buildtrigger.basehandler import BuildTriggerHandler -from buildtrigger.bitbuckethandler import (BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA as bb_schema, - get_transformed_webhook_payload as bb_payload) +from buildtrigger.bitbuckethandler import ( + BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA as bb_schema, + get_transformed_webhook_payload as bb_payload, +) -from buildtrigger.githubhandler import (GITHUB_WEBHOOK_PAYLOAD_SCHEMA as gh_schema, - get_transformed_webhook_payload as gh_payload) +from buildtrigger.githubhandler import ( + GITHUB_WEBHOOK_PAYLOAD_SCHEMA as gh_schema, + get_transformed_webhook_payload as gh_payload, +) -from buildtrigger.gitlabhandler import (GITLAB_WEBHOOK_PAYLOAD_SCHEMA as gl_schema, - get_transformed_webhook_payload as gl_payload) +from buildtrigger.gitlabhandler import ( + GITLAB_WEBHOOK_PAYLOAD_SCHEMA as gl_schema, + get_transformed_webhook_payload as gl_payload, +) from util.security.ssh import generate_ssh_keypair @@ -27,203 +38,191 @@ logger = logging.getLogger(__name__) # Defines an ordered set of tuples of the schemas and associated transformation functions # for incoming webhook payloads. SCHEMA_AND_HANDLERS = [ - (gh_schema, gh_payload), - (bb_schema, bb_payload), - (gl_schema, gl_payload), + (gh_schema, gh_payload), + (bb_schema, bb_payload), + (gl_schema, gl_payload), ] def custom_trigger_payload(metadata, git_url): - # First try the customhandler schema. If it matches, nothing more to do. - custom_handler_validation_error = None - try: - validate(metadata, CustomBuildTrigger.payload_schema) - except ValidationError as vex: - custom_handler_validation_error = vex - - # Otherwise, try the defined schemas, in order, until we find a match. - for schema, handler in SCHEMA_AND_HANDLERS: + # First try the customhandler schema. If it matches, nothing more to do. + custom_handler_validation_error = None try: - validate(metadata, schema) - except ValidationError: - continue + validate(metadata, CustomBuildTrigger.payload_schema) + except ValidationError as vex: + custom_handler_validation_error = vex - result = handler(metadata) - result['git_url'] = git_url - return result + # Otherwise, try the defined schemas, in order, until we find a match. + for schema, handler in SCHEMA_AND_HANDLERS: + try: + validate(metadata, schema) + except ValidationError: + continue - # If we have reached this point and no other schemas validated, then raise the error for the - # custom schema. - if custom_handler_validation_error is not None: - raise InvalidPayloadException(custom_handler_validation_error.message) + result = handler(metadata) + result["git_url"] = git_url + return result - metadata['git_url'] = git_url - return metadata + # If we have reached this point and no other schemas validated, then raise the error for the + # custom schema. + if custom_handler_validation_error is not None: + raise InvalidPayloadException(custom_handler_validation_error.message) + + metadata["git_url"] = git_url + return metadata class CustomBuildTrigger(BuildTriggerHandler): - payload_schema = { - 'type': 'object', - 'properties': { - 'commit': { - 'type': 'string', - 'description': 'first 7 characters of the SHA-1 identifier for a git commit', - 'pattern': '^([A-Fa-f0-9]{7,})$', - }, - 'ref': { - 'type': 'string', - 'description': 'git reference for a git commit', - 'pattern': '^refs\/(heads|tags|remotes)\/(.+)$', - }, - 'default_branch': { - 'type': 'string', - 'description': 'default branch of the git repository', - }, - 'commit_info': { - 'type': 'object', - 'description': 'metadata about a git commit', - 'properties': { - 'url': { - 'type': 'string', - 'description': 'URL to view a git commit', - }, - 'message': { - 'type': 'string', - 'description': 'git commit message', - }, - 'date': { - 'type': 'string', - 'description': 'timestamp for a git commit' - }, - 'author': { - 'type': 'object', - 'description': 'metadata about the author of a git commit', - 'properties': { - 'username': { - 'type': 'string', - 'description': 'username of the author', - }, - 'url': { - 'type': 'string', - 'description': 'URL to view the profile of the author', - }, - 'avatar_url': { - 'type': 'string', - 'description': 'URL to view the avatar of the author', - }, + payload_schema = { + "type": "object", + "properties": { + "commit": { + "type": "string", + "description": "first 7 characters of the SHA-1 identifier for a git commit", + "pattern": "^([A-Fa-f0-9]{7,})$", }, - 'required': ['username', 'url', 'avatar_url'], - }, - 'committer': { - 'type': 'object', - 'description': 'metadata about the committer of a git commit', - 'properties': { - 'username': { - 'type': 'string', - 'description': 'username of the committer', - }, - 'url': { - 'type': 'string', - 'description': 'URL to view the profile of the committer', - }, - 'avatar_url': { - 'type': 'string', - 'description': 'URL to view the avatar of the committer', - }, + "ref": { + "type": "string", + "description": "git reference for a git commit", + "pattern": "^refs\/(heads|tags|remotes)\/(.+)$", + }, + "default_branch": { + "type": "string", + "description": "default branch of the git repository", + }, + "commit_info": { + "type": "object", + "description": "metadata about a git commit", + "properties": { + "url": { + "type": "string", + "description": "URL to view a git commit", + }, + "message": {"type": "string", "description": "git commit message"}, + "date": { + "type": "string", + "description": "timestamp for a git commit", + }, + "author": { + "type": "object", + "description": "metadata about the author of a git commit", + "properties": { + "username": { + "type": "string", + "description": "username of the author", + }, + "url": { + "type": "string", + "description": "URL to view the profile of the author", + }, + "avatar_url": { + "type": "string", + "description": "URL to view the avatar of the author", + }, + }, + "required": ["username", "url", "avatar_url"], + }, + "committer": { + "type": "object", + "description": "metadata about the committer of a git commit", + "properties": { + "username": { + "type": "string", + "description": "username of the committer", + }, + "url": { + "type": "string", + "description": "URL to view the profile of the committer", + }, + "avatar_url": { + "type": "string", + "description": "URL to view the avatar of the committer", + }, + }, + "required": ["username", "url", "avatar_url"], + }, + }, + "required": ["url", "message", "date"], }, - 'required': ['username', 'url', 'avatar_url'], - }, }, - 'required': ['url', 'message', 'date'], - }, - }, - 'required': ['commit', 'ref', 'default_branch'], - } - - @classmethod - def service_name(cls): - return 'custom-git' - - def is_active(self): - return self.config.has_key('credentials') - - def _metadata_from_payload(self, payload, git_url): - # Parse the JSON payload. - try: - metadata = json.loads(payload) - except ValueError as vex: - raise InvalidPayloadException(vex.message) - - return custom_trigger_payload(metadata, git_url) - - def handle_trigger_request(self, request): - payload = request.data - if not payload: - raise InvalidPayloadException('Missing expected payload') - - logger.debug('Payload %s', payload) - - metadata = self._metadata_from_payload(payload, self.config['build_source']) - prepared = self.prepare_build(metadata) - - # Check if we should skip this build. - raise_if_skipped_build(prepared, self.config) - - return prepared - - def manual_start(self, run_parameters=None): - # commit_sha is the only required parameter - commit_sha = run_parameters.get('commit_sha') - if commit_sha is None: - raise TriggerStartException('missing required parameter') - - config = self.config - metadata = { - 'commit': commit_sha, - 'git_url': config['build_source'], + "required": ["commit", "ref", "default_branch"], } - try: - return self.prepare_build(metadata, is_manual=True) - except ValidationError as ve: - raise TriggerStartException(ve.message) + @classmethod + def service_name(cls): + return "custom-git" - def activate(self, standard_webhook_url): - config = self.config - public_key, private_key = generate_ssh_keypair() - config['credentials'] = [ - { - 'name': 'SSH Public Key', - 'value': public_key, - }, - { - 'name': 'Webhook Endpoint URL', - 'value': standard_webhook_url, - }, - ] - self.config = config - return config, {'private_key': private_key} + def is_active(self): + return self.config.has_key("credentials") - def deactivate(self): - config = self.config - config.pop('credentials', None) - self.config = config - return config + def _metadata_from_payload(self, payload, git_url): + # Parse the JSON payload. + try: + metadata = json.loads(payload) + except ValueError as vex: + raise InvalidPayloadException(vex.message) - def get_repository_url(self): - return None + return custom_trigger_payload(metadata, git_url) - def list_build_source_namespaces(self): - raise NotImplementedError + def handle_trigger_request(self, request): + payload = request.data + if not payload: + raise InvalidPayloadException("Missing expected payload") - def list_build_sources_for_namespace(self, namespace): - raise NotImplementedError + logger.debug("Payload %s", payload) - def list_build_subdirs(self): - raise NotImplementedError + metadata = self._metadata_from_payload(payload, self.config["build_source"]) + prepared = self.prepare_build(metadata) - def list_field_values(self, field_name, limit=None): - raise NotImplementedError + # Check if we should skip this build. + raise_if_skipped_build(prepared, self.config) - def load_dockerfile_contents(self): - raise NotImplementedError + return prepared + + def manual_start(self, run_parameters=None): + # commit_sha is the only required parameter + commit_sha = run_parameters.get("commit_sha") + if commit_sha is None: + raise TriggerStartException("missing required parameter") + + config = self.config + metadata = {"commit": commit_sha, "git_url": config["build_source"]} + + try: + return self.prepare_build(metadata, is_manual=True) + except ValidationError as ve: + raise TriggerStartException(ve.message) + + def activate(self, standard_webhook_url): + config = self.config + public_key, private_key = generate_ssh_keypair() + config["credentials"] = [ + {"name": "SSH Public Key", "value": public_key}, + {"name": "Webhook Endpoint URL", "value": standard_webhook_url}, + ] + self.config = config + return config, {"private_key": private_key} + + def deactivate(self): + config = self.config + config.pop("credentials", None) + self.config = config + return config + + def get_repository_url(self): + return None + + def list_build_source_namespaces(self): + raise NotImplementedError + + def list_build_sources_for_namespace(self, namespace): + raise NotImplementedError + + def list_build_subdirs(self): + raise NotImplementedError + + def list_field_values(self, field_name, limit=None): + raise NotImplementedError + + def load_dockerfile_contents(self): + raise NotImplementedError diff --git a/buildtrigger/githubhandler.py b/buildtrigger/githubhandler.py index bc40f993c..7fc12135b 100644 --- a/buildtrigger/githubhandler.py +++ b/buildtrigger/githubhandler.py @@ -7,18 +7,29 @@ from calendar import timegm from functools import wraps from ssl import SSLError -from github import (Github, UnknownObjectException, GithubException, - BadCredentialsException as GitHubBadCredentialsException) +from github import ( + Github, + UnknownObjectException, + GithubException, + BadCredentialsException as GitHubBadCredentialsException, +) from jsonschema import validate from app import app, github_trigger -from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException, - TriggerDeactivationException, TriggerStartException, - EmptyRepositoryException, ValidationRequestException, - SkipRequestException, InvalidPayloadException, - determine_build_ref, raise_if_skipped_build, - find_matching_branches) +from buildtrigger.triggerutil import ( + RepositoryReadException, + TriggerActivationException, + TriggerDeactivationException, + TriggerStartException, + EmptyRepositoryException, + ValidationRequestException, + SkipRequestException, + InvalidPayloadException, + determine_build_ref, + raise_if_skipped_build, + find_matching_branches, +) from buildtrigger.basehandler import BuildTriggerHandler from endpoints.exception import ExternalServiceError from util.security.ssh import generate_ssh_keypair @@ -27,561 +38,576 @@ from util.dict_wrappers import JSONPathDict, SafeDictSetter logger = logging.getLogger(__name__) GITHUB_WEBHOOK_PAYLOAD_SCHEMA = { - 'type': 'object', - 'properties': { - 'ref': { - 'type': 'string', + "type": "object", + "properties": { + "ref": {"type": "string"}, + "head_commit": { + "type": ["object", "null"], + "properties": { + "id": {"type": "string"}, + "url": {"type": "string"}, + "message": {"type": "string"}, + "timestamp": {"type": "string"}, + "author": { + "type": "object", + "properties": { + "username": {"type": "string"}, + "html_url": {"type": "string"}, + "avatar_url": {"type": "string"}, + }, + }, + "committer": { + "type": "object", + "properties": { + "username": {"type": "string"}, + "html_url": {"type": "string"}, + "avatar_url": {"type": "string"}, + }, + }, + }, + "required": ["id", "url", "message", "timestamp"], + }, + "repository": { + "type": "object", + "properties": {"ssh_url": {"type": "string"}}, + "required": ["ssh_url"], + }, }, - 'head_commit': { - 'type': ['object', 'null'], - 'properties': { - 'id': { - 'type': 'string', - }, - 'url': { - 'type': 'string', - }, - 'message': { - 'type': 'string', - }, - 'timestamp': { - 'type': 'string', - }, - 'author': { - 'type': 'object', - 'properties': { - 'username': { - 'type': 'string' - }, - 'html_url': { - 'type': 'string' - }, - 'avatar_url': { - 'type': 'string' - }, - }, - }, - 'committer': { - 'type': 'object', - 'properties': { - 'username': { - 'type': 'string' - }, - 'html_url': { - 'type': 'string' - }, - 'avatar_url': { - 'type': 'string' - }, - }, - }, - }, - 'required': ['id', 'url', 'message', 'timestamp'], - }, - 'repository': { - 'type': 'object', - 'properties': { - 'ssh_url': { - 'type': 'string', - }, - }, - 'required': ['ssh_url'], - }, - }, - 'required': ['ref', 'head_commit', 'repository'], + "required": ["ref", "head_commit", "repository"], } + def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user=None): - """ Returns the GitHub webhook JSON payload transformed into our own payload + """ Returns the GitHub webhook JSON payload transformed into our own payload format. If the gh_payload is not valid, returns None. """ - try: - validate(gh_payload, GITHUB_WEBHOOK_PAYLOAD_SCHEMA) - except Exception as exc: - raise InvalidPayloadException(exc.message) + try: + validate(gh_payload, GITHUB_WEBHOOK_PAYLOAD_SCHEMA) + except Exception as exc: + raise InvalidPayloadException(exc.message) - payload = JSONPathDict(gh_payload) + payload = JSONPathDict(gh_payload) - if payload['head_commit'] is None: - raise SkipRequestException + if payload["head_commit"] is None: + raise SkipRequestException - config = SafeDictSetter() - config['commit'] = payload['head_commit.id'] - config['ref'] = payload['ref'] - config['default_branch'] = payload['repository.default_branch'] or default_branch - config['git_url'] = payload['repository.ssh_url'] + config = SafeDictSetter() + config["commit"] = payload["head_commit.id"] + config["ref"] = payload["ref"] + config["default_branch"] = payload["repository.default_branch"] or default_branch + config["git_url"] = payload["repository.ssh_url"] - config['commit_info.url'] = payload['head_commit.url'] - config['commit_info.message'] = payload['head_commit.message'] - config['commit_info.date'] = payload['head_commit.timestamp'] + config["commit_info.url"] = payload["head_commit.url"] + config["commit_info.message"] = payload["head_commit.message"] + config["commit_info.date"] = payload["head_commit.timestamp"] - config['commit_info.author.username'] = payload['head_commit.author.username'] - config['commit_info.author.url'] = payload.get('head_commit.author.html_url') - config['commit_info.author.avatar_url'] = payload.get('head_commit.author.avatar_url') + config["commit_info.author.username"] = payload["head_commit.author.username"] + config["commit_info.author.url"] = payload.get("head_commit.author.html_url") + config["commit_info.author.avatar_url"] = payload.get( + "head_commit.author.avatar_url" + ) - config['commit_info.committer.username'] = payload.get('head_commit.committer.username') - config['commit_info.committer.url'] = payload.get('head_commit.committer.html_url') - config['commit_info.committer.avatar_url'] = payload.get('head_commit.committer.avatar_url') + config["commit_info.committer.username"] = payload.get( + "head_commit.committer.username" + ) + config["commit_info.committer.url"] = payload.get("head_commit.committer.html_url") + config["commit_info.committer.avatar_url"] = payload.get( + "head_commit.committer.avatar_url" + ) - # Note: GitHub doesn't always return the extra information for users, so we do the lookup - # manually if possible. - if (lookup_user and not payload.get('head_commit.author.html_url') and - payload.get('head_commit.author.username')): - author_info = lookup_user(payload['head_commit.author.username']) - if author_info: - config['commit_info.author.url'] = author_info['html_url'] - config['commit_info.author.avatar_url'] = author_info['avatar_url'] + # Note: GitHub doesn't always return the extra information for users, so we do the lookup + # manually if possible. + if ( + lookup_user + and not payload.get("head_commit.author.html_url") + and payload.get("head_commit.author.username") + ): + author_info = lookup_user(payload["head_commit.author.username"]) + if author_info: + config["commit_info.author.url"] = author_info["html_url"] + config["commit_info.author.avatar_url"] = author_info["avatar_url"] - if (lookup_user and - payload.get('head_commit.committer.username') and - not payload.get('head_commit.committer.html_url')): - committer_info = lookup_user(payload['head_commit.committer.username']) - if committer_info: - config['commit_info.committer.url'] = committer_info['html_url'] - config['commit_info.committer.avatar_url'] = committer_info['avatar_url'] + if ( + lookup_user + and payload.get("head_commit.committer.username") + and not payload.get("head_commit.committer.html_url") + ): + committer_info = lookup_user(payload["head_commit.committer.username"]) + if committer_info: + config["commit_info.committer.url"] = committer_info["html_url"] + config["commit_info.committer.avatar_url"] = committer_info["avatar_url"] - return config.dict_value() + return config.dict_value() def _catch_ssl_errors(func): - @wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except SSLError as se: - msg = 'Request to the GitHub API failed: %s' % se.message - logger.exception(msg) - raise ExternalServiceError(msg) - return wrapper + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except SSLError as se: + msg = "Request to the GitHub API failed: %s" % se.message + logger.exception(msg) + raise ExternalServiceError(msg) + + return wrapper class GithubBuildTrigger(BuildTriggerHandler): - """ + """ BuildTrigger for GitHub that uses the archive API and buildpacks. """ - def _get_client(self): - """ Returns an authenticated client for talking to the GitHub API. """ - return Github(self.auth_token, - base_url=github_trigger.api_endpoint(), - client_id=github_trigger.client_id(), - client_secret=github_trigger.client_secret(), - timeout=5) - @classmethod - def service_name(cls): - return 'github' + def _get_client(self): + """ Returns an authenticated client for talking to the GitHub API. """ + return Github( + self.auth_token, + base_url=github_trigger.api_endpoint(), + client_id=github_trigger.client_id(), + client_secret=github_trigger.client_secret(), + timeout=5, + ) - def is_active(self): - return 'hook_id' in self.config + @classmethod + def service_name(cls): + return "github" - def get_repository_url(self): - source = self.config['build_source'] - return github_trigger.get_public_url(source) + def is_active(self): + return "hook_id" in self.config - @staticmethod - def _get_error_message(ghe, default_msg): - if ghe.data.get('errors') and ghe.data['errors'][0].get('message'): - return ghe.data['errors'][0]['message'] + def get_repository_url(self): + source = self.config["build_source"] + return github_trigger.get_public_url(source) - return default_msg + @staticmethod + def _get_error_message(ghe, default_msg): + if ghe.data.get("errors") and ghe.data["errors"][0].get("message"): + return ghe.data["errors"][0]["message"] - @_catch_ssl_errors - def activate(self, standard_webhook_url): - config = self.config - new_build_source = config['build_source'] - gh_client = self._get_client() + return default_msg - # Find the GitHub repository. - try: - gh_repo = gh_client.get_repo(new_build_source) - except UnknownObjectException: - msg = 'Unable to find GitHub repository for source: %s' % new_build_source - raise TriggerActivationException(msg) - - # Add a deploy key to the GitHub repository. - public_key, private_key = generate_ssh_keypair() - config['credentials'] = [ - { - 'name': 'SSH Public Key', - 'value': public_key, - }, - ] - - try: - deploy_key = gh_repo.create_key('%s Builder' % app.config['REGISTRY_TITLE'], - public_key) - config['deploy_key_id'] = deploy_key.id - except GithubException as ghe: - default_msg = 'Unable to add deploy key to repository: %s' % new_build_source - msg = GithubBuildTrigger._get_error_message(ghe, default_msg) - raise TriggerActivationException(msg) - - # Add the webhook to the GitHub repository. - webhook_config = { - 'url': standard_webhook_url, - 'content_type': 'json', - } - - try: - hook = gh_repo.create_hook('web', webhook_config) - config['hook_id'] = hook.id - config['master_branch'] = gh_repo.default_branch - except GithubException as ghe: - default_msg = 'Unable to create webhook on repository: %s' % new_build_source - msg = GithubBuildTrigger._get_error_message(ghe, default_msg) - raise TriggerActivationException(msg) - - return config, {'private_key': private_key} - - @_catch_ssl_errors - def deactivate(self): - config = self.config - gh_client = self._get_client() - - # Find the GitHub repository. - try: - repo = gh_client.get_repo(config['build_source']) - except UnknownObjectException: - msg = 'Unable to find GitHub repository for source: %s' % config['build_source'] - raise TriggerDeactivationException(msg) - except GitHubBadCredentialsException: - msg = 'Unable to access repository to disable trigger' - raise TriggerDeactivationException(msg) - - # If the trigger uses a deploy key, remove it. - try: - if config['deploy_key_id']: - deploy_key = repo.get_key(config['deploy_key_id']) - deploy_key.delete() - except KeyError: - # There was no config['deploy_key_id'], thus this is an old trigger without a deploy key. - pass - except GithubException as ghe: - default_msg = 'Unable to remove deploy key: %s' % config['deploy_key_id'] - msg = GithubBuildTrigger._get_error_message(ghe, default_msg) - raise TriggerDeactivationException(msg) - - # Remove the webhook. - if 'hook_id' in config: - try: - hook = repo.get_hook(config['hook_id']) - hook.delete() - except GithubException as ghe: - default_msg = 'Unable to remove hook: %s' % config['hook_id'] - msg = GithubBuildTrigger._get_error_message(ghe, default_msg) - raise TriggerDeactivationException(msg) - - config.pop('hook_id', None) - self.config = config - return config - - @_catch_ssl_errors - def list_build_source_namespaces(self): - gh_client = self._get_client() - usr = gh_client.get_user() - - # Build the full set of namespaces for the user, starting with their own. - namespaces = {} - namespaces[usr.login] = { - 'personal': True, - 'id': usr.login, - 'title': usr.name or usr.login, - 'avatar_url': usr.avatar_url, - 'url': usr.html_url, - 'score': usr.plan.private_repos if usr.plan else 0, - } - - for org in usr.get_orgs(): - organization = org.login if org.login else org.name - - # NOTE: We don't load the organization's html_url nor its plan, because doing - # so requires loading *each organization* via its own API call in this tight - # loop, which was massively slowing down the load time for users when setting - # up triggers. - namespaces[organization] = { - 'personal': False, - 'id': organization, - 'title': organization, - 'avatar_url': org.avatar_url, - 'url': '', - 'score': 0, - } - - return BuildTriggerHandler.build_namespaces_response(namespaces) - - @_catch_ssl_errors - def list_build_sources_for_namespace(self, namespace): - def repo_view(repo): - return { - 'name': repo.name, - 'full_name': repo.full_name, - 'description': repo.description or '', - 'last_updated': timegm(repo.pushed_at.utctimetuple()) if repo.pushed_at else 0, - 'url': repo.html_url, - 'has_admin_permissions': repo.permissions.admin, - 'private': repo.private, - } - - gh_client = self._get_client() - usr = gh_client.get_user() - if namespace == usr.login: - repos = [repo_view(repo) for repo in usr.get_repos(type='owner', sort='updated')] - return BuildTriggerHandler.build_sources_response(repos) - - try: - org = gh_client.get_organization(namespace) - if org is None: - return [] - except GithubException: - return [] - - repos = [repo_view(repo) for repo in org.get_repos(type='member')] - return BuildTriggerHandler.build_sources_response(repos) - - - @_catch_ssl_errors - def list_build_subdirs(self): - config = self.config - gh_client = self._get_client() - source = config['build_source'] - - try: - repo = gh_client.get_repo(source) - - # Find the first matching branch. - repo_branches = self.list_field_values('branch_name') or [] - branches = find_matching_branches(config, repo_branches) - branches = branches or [repo.default_branch or 'master'] - default_commit = repo.get_branch(branches[0]).commit - commit_tree = repo.get_git_tree(default_commit.sha, recursive=True) - - return [elem.path for elem in commit_tree.tree - if (elem.type == u'blob' and self.filename_is_dockerfile(os.path.basename(elem.path)))] - except GithubException as ghe: - message = ghe.data.get('message', 'Unable to list contents of repository: %s' % source) - if message == 'Branch not found': - raise EmptyRepositoryException() - - raise RepositoryReadException(message) - - @_catch_ssl_errors - def load_dockerfile_contents(self): - config = self.config - gh_client = self._get_client() - source = config['build_source'] - - try: - repo = gh_client.get_repo(source) - except GithubException as ghe: - message = ghe.data.get('message', 'Unable to list contents of repository: %s' % source) - raise RepositoryReadException(message) - - path = self.get_dockerfile_path() - if not path: - return None - - try: - file_info = repo.get_contents(path) - # TypeError is needed because directory inputs cause a TypeError - except (GithubException, TypeError) as ghe: - logger.error("got error from trying to find github file %s" % ghe) - return None - - if file_info is None: - return None - - if isinstance(file_info, list): - return None - - content = file_info.content - if file_info.encoding == 'base64': - content = base64.b64decode(content) - return content - - @_catch_ssl_errors - def list_field_values(self, field_name, limit=None): - if field_name == 'refs': - branches = self.list_field_values('branch_name') - tags = self.list_field_values('tag_name') - - return ([{'kind': 'branch', 'name': b} for b in branches] + - [{'kind': 'tag', 'name': tag} for tag in tags]) - - config = self.config - source = config.get('build_source') - if source is None: - return [] - - if field_name == 'tag_name': - try: + @_catch_ssl_errors + def activate(self, standard_webhook_url): + config = self.config + new_build_source = config["build_source"] gh_client = self._get_client() - repo = gh_client.get_repo(source) - gh_tags = repo.get_tags() - if limit: - gh_tags = repo.get_tags()[0:limit] - return [tag.name for tag in gh_tags] - except GitHubBadCredentialsException: - return [] - except GithubException: - logger.exception("Got GitHub Exception when trying to list tags for trigger %s", - self.trigger.id) - return [] + # Find the GitHub repository. + try: + gh_repo = gh_client.get_repo(new_build_source) + except UnknownObjectException: + msg = "Unable to find GitHub repository for source: %s" % new_build_source + raise TriggerActivationException(msg) - if field_name == 'branch_name': - try: + # Add a deploy key to the GitHub repository. + public_key, private_key = generate_ssh_keypair() + config["credentials"] = [{"name": "SSH Public Key", "value": public_key}] + + try: + deploy_key = gh_repo.create_key( + "%s Builder" % app.config["REGISTRY_TITLE"], public_key + ) + config["deploy_key_id"] = deploy_key.id + except GithubException as ghe: + default_msg = ( + "Unable to add deploy key to repository: %s" % new_build_source + ) + msg = GithubBuildTrigger._get_error_message(ghe, default_msg) + raise TriggerActivationException(msg) + + # Add the webhook to the GitHub repository. + webhook_config = {"url": standard_webhook_url, "content_type": "json"} + + try: + hook = gh_repo.create_hook("web", webhook_config) + config["hook_id"] = hook.id + config["master_branch"] = gh_repo.default_branch + except GithubException as ghe: + default_msg = ( + "Unable to create webhook on repository: %s" % new_build_source + ) + msg = GithubBuildTrigger._get_error_message(ghe, default_msg) + raise TriggerActivationException(msg) + + return config, {"private_key": private_key} + + @_catch_ssl_errors + def deactivate(self): + config = self.config gh_client = self._get_client() - repo = gh_client.get_repo(source) - gh_branches = repo.get_branches() - if limit: - gh_branches = repo.get_branches()[0:limit] - branches = [branch.name for branch in gh_branches] + # Find the GitHub repository. + try: + repo = gh_client.get_repo(config["build_source"]) + except UnknownObjectException: + msg = ( + "Unable to find GitHub repository for source: %s" + % config["build_source"] + ) + raise TriggerDeactivationException(msg) + except GitHubBadCredentialsException: + msg = "Unable to access repository to disable trigger" + raise TriggerDeactivationException(msg) - if not repo.default_branch in branches: - branches.insert(0, repo.default_branch) + # If the trigger uses a deploy key, remove it. + try: + if config["deploy_key_id"]: + deploy_key = repo.get_key(config["deploy_key_id"]) + deploy_key.delete() + except KeyError: + # There was no config['deploy_key_id'], thus this is an old trigger without a deploy key. + pass + except GithubException as ghe: + default_msg = "Unable to remove deploy key: %s" % config["deploy_key_id"] + msg = GithubBuildTrigger._get_error_message(ghe, default_msg) + raise TriggerDeactivationException(msg) - if branches[0] != repo.default_branch: - branches.remove(repo.default_branch) - branches.insert(0, repo.default_branch) + # Remove the webhook. + if "hook_id" in config: + try: + hook = repo.get_hook(config["hook_id"]) + hook.delete() + except GithubException as ghe: + default_msg = "Unable to remove hook: %s" % config["hook_id"] + msg = GithubBuildTrigger._get_error_message(ghe, default_msg) + raise TriggerDeactivationException(msg) - return branches - except GitHubBadCredentialsException: - return ['master'] - except GithubException: - logger.exception("Got GitHub Exception when trying to list branches for trigger %s", - self.trigger.id) - return ['master'] + config.pop("hook_id", None) + self.config = config + return config - return None + @_catch_ssl_errors + def list_build_source_namespaces(self): + gh_client = self._get_client() + usr = gh_client.get_user() - @classmethod - def _build_metadata_for_commit(cls, commit_sha, ref, repo): - try: - commit = repo.get_commit(commit_sha) - except GithubException: - logger.exception('Could not load commit information from GitHub') - return None + # Build the full set of namespaces for the user, starting with their own. + namespaces = {} + namespaces[usr.login] = { + "personal": True, + "id": usr.login, + "title": usr.name or usr.login, + "avatar_url": usr.avatar_url, + "url": usr.html_url, + "score": usr.plan.private_repos if usr.plan else 0, + } - commit_info = { - 'url': commit.html_url, - 'message': commit.commit.message, - 'date': commit.last_modified - } + for org in usr.get_orgs(): + organization = org.login if org.login else org.name - if commit.author: - commit_info['author'] = { - 'username': commit.author.login, - 'avatar_url': commit.author.avatar_url, - 'url': commit.author.html_url - } + # NOTE: We don't load the organization's html_url nor its plan, because doing + # so requires loading *each organization* via its own API call in this tight + # loop, which was massively slowing down the load time for users when setting + # up triggers. + namespaces[organization] = { + "personal": False, + "id": organization, + "title": organization, + "avatar_url": org.avatar_url, + "url": "", + "score": 0, + } - if commit.committer: - commit_info['committer'] = { - 'username': commit.committer.login, - 'avatar_url': commit.committer.avatar_url, - 'url': commit.committer.html_url - } + return BuildTriggerHandler.build_namespaces_response(namespaces) - return { - 'commit': commit_sha, - 'ref': ref, - 'default_branch': repo.default_branch, - 'git_url': repo.ssh_url, - 'commit_info': commit_info - } + @_catch_ssl_errors + def list_build_sources_for_namespace(self, namespace): + def repo_view(repo): + return { + "name": repo.name, + "full_name": repo.full_name, + "description": repo.description or "", + "last_updated": timegm(repo.pushed_at.utctimetuple()) + if repo.pushed_at + else 0, + "url": repo.html_url, + "has_admin_permissions": repo.permissions.admin, + "private": repo.private, + } - @_catch_ssl_errors - def manual_start(self, run_parameters=None): - config = self.config - source = config['build_source'] + gh_client = self._get_client() + usr = gh_client.get_user() + if namespace == usr.login: + repos = [ + repo_view(repo) for repo in usr.get_repos(type="owner", sort="updated") + ] + return BuildTriggerHandler.build_sources_response(repos) - try: - gh_client = self._get_client() - repo = gh_client.get_repo(source) - default_branch = repo.default_branch - except GithubException as ghe: - msg = GithubBuildTrigger._get_error_message(ghe, 'Unable to start build trigger') - raise TriggerStartException(msg) + try: + org = gh_client.get_organization(namespace) + if org is None: + return [] + except GithubException: + return [] - def get_branch_sha(branch_name): - try: - branch = repo.get_branch(branch_name) - return branch.commit.sha - except GithubException: - raise TriggerStartException('Could not find branch in repository') + repos = [repo_view(repo) for repo in org.get_repos(type="member")] + return BuildTriggerHandler.build_sources_response(repos) - def get_tag_sha(tag_name): - tags = {tag.name: tag for tag in repo.get_tags()} - if not tag_name in tags: - raise TriggerStartException('Could not find tag in repository') + @_catch_ssl_errors + def list_build_subdirs(self): + config = self.config + gh_client = self._get_client() + source = config["build_source"] - return tags[tag_name].commit.sha + try: + repo = gh_client.get_repo(source) - # Find the branch or tag to build. - (commit_sha, ref) = determine_build_ref(run_parameters, get_branch_sha, get_tag_sha, - default_branch) + # Find the first matching branch. + repo_branches = self.list_field_values("branch_name") or [] + branches = find_matching_branches(config, repo_branches) + branches = branches or [repo.default_branch or "master"] + default_commit = repo.get_branch(branches[0]).commit + commit_tree = repo.get_git_tree(default_commit.sha, recursive=True) - metadata = GithubBuildTrigger._build_metadata_for_commit(commit_sha, ref, repo) - return self.prepare_build(metadata, is_manual=True) + return [ + elem.path + for elem in commit_tree.tree + if ( + elem.type == u"blob" + and self.filename_is_dockerfile(os.path.basename(elem.path)) + ) + ] + except GithubException as ghe: + message = ghe.data.get( + "message", "Unable to list contents of repository: %s" % source + ) + if message == "Branch not found": + raise EmptyRepositoryException() - @_catch_ssl_errors - def lookup_user(self, username): - try: - gh_client = self._get_client() - user = gh_client.get_user(username) - return { - 'html_url': user.html_url, - 'avatar_url': user.avatar_url - } - except GithubException: - return None + raise RepositoryReadException(message) - @_catch_ssl_errors - def handle_trigger_request(self, request): - # Check the payload to see if we should skip it based on the lack of a head_commit. - payload = request.get_json() - if payload is None: - raise InvalidPayloadException('Missing payload') + @_catch_ssl_errors + def load_dockerfile_contents(self): + config = self.config + gh_client = self._get_client() + source = config["build_source"] - # This is for GitHub's probing/testing. - if 'zen' in payload: - raise SkipRequestException() + try: + repo = gh_client.get_repo(source) + except GithubException as ghe: + message = ghe.data.get( + "message", "Unable to list contents of repository: %s" % source + ) + raise RepositoryReadException(message) - # Lookup the default branch for the repository. - if 'repository' not in payload: - raise InvalidPayloadException("Missing 'repository' on request") + path = self.get_dockerfile_path() + if not path: + return None - if 'owner' not in payload['repository']: - raise InvalidPayloadException("Missing 'owner' on repository") + try: + file_info = repo.get_contents(path) + # TypeError is needed because directory inputs cause a TypeError + except (GithubException, TypeError) as ghe: + logger.error("got error from trying to find github file %s" % ghe) + return None - if 'name' not in payload['repository']['owner']: - raise InvalidPayloadException("Missing owner 'name' on repository") + if file_info is None: + return None - if 'name' not in payload['repository']: - raise InvalidPayloadException("Missing 'name' on repository") + if isinstance(file_info, list): + return None - default_branch = None - lookup_user = None - try: - repo_full_name = '%s/%s' % (payload['repository']['owner']['name'], - payload['repository']['name']) + content = file_info.content + if file_info.encoding == "base64": + content = base64.b64decode(content) + return content - gh_client = self._get_client() - repo = gh_client.get_repo(repo_full_name) - default_branch = repo.default_branch - lookup_user = self.lookup_user - except GitHubBadCredentialsException: - logger.exception('Got GitHub Credentials Exception; Cannot lookup default branch') - except GithubException: - logger.exception("Got GitHub Exception when trying to start trigger %s", self.trigger.id) - raise SkipRequestException() + @_catch_ssl_errors + def list_field_values(self, field_name, limit=None): + if field_name == "refs": + branches = self.list_field_values("branch_name") + tags = self.list_field_values("tag_name") - logger.debug('GitHub trigger payload %s', payload) - metadata = get_transformed_webhook_payload(payload, default_branch=default_branch, - lookup_user=lookup_user) - prepared = self.prepare_build(metadata) + return [{"kind": "branch", "name": b} for b in branches] + [ + {"kind": "tag", "name": tag} for tag in tags + ] - # Check if we should skip this build. - raise_if_skipped_build(prepared, self.config) - return prepared + config = self.config + source = config.get("build_source") + if source is None: + return [] + + if field_name == "tag_name": + try: + gh_client = self._get_client() + repo = gh_client.get_repo(source) + gh_tags = repo.get_tags() + if limit: + gh_tags = repo.get_tags()[0:limit] + + return [tag.name for tag in gh_tags] + except GitHubBadCredentialsException: + return [] + except GithubException: + logger.exception( + "Got GitHub Exception when trying to list tags for trigger %s", + self.trigger.id, + ) + return [] + + if field_name == "branch_name": + try: + gh_client = self._get_client() + repo = gh_client.get_repo(source) + gh_branches = repo.get_branches() + if limit: + gh_branches = repo.get_branches()[0:limit] + + branches = [branch.name for branch in gh_branches] + + if not repo.default_branch in branches: + branches.insert(0, repo.default_branch) + + if branches[0] != repo.default_branch: + branches.remove(repo.default_branch) + branches.insert(0, repo.default_branch) + + return branches + except GitHubBadCredentialsException: + return ["master"] + except GithubException: + logger.exception( + "Got GitHub Exception when trying to list branches for trigger %s", + self.trigger.id, + ) + return ["master"] + + return None + + @classmethod + def _build_metadata_for_commit(cls, commit_sha, ref, repo): + try: + commit = repo.get_commit(commit_sha) + except GithubException: + logger.exception("Could not load commit information from GitHub") + return None + + commit_info = { + "url": commit.html_url, + "message": commit.commit.message, + "date": commit.last_modified, + } + + if commit.author: + commit_info["author"] = { + "username": commit.author.login, + "avatar_url": commit.author.avatar_url, + "url": commit.author.html_url, + } + + if commit.committer: + commit_info["committer"] = { + "username": commit.committer.login, + "avatar_url": commit.committer.avatar_url, + "url": commit.committer.html_url, + } + + return { + "commit": commit_sha, + "ref": ref, + "default_branch": repo.default_branch, + "git_url": repo.ssh_url, + "commit_info": commit_info, + } + + @_catch_ssl_errors + def manual_start(self, run_parameters=None): + config = self.config + source = config["build_source"] + + try: + gh_client = self._get_client() + repo = gh_client.get_repo(source) + default_branch = repo.default_branch + except GithubException as ghe: + msg = GithubBuildTrigger._get_error_message( + ghe, "Unable to start build trigger" + ) + raise TriggerStartException(msg) + + def get_branch_sha(branch_name): + try: + branch = repo.get_branch(branch_name) + return branch.commit.sha + except GithubException: + raise TriggerStartException("Could not find branch in repository") + + def get_tag_sha(tag_name): + tags = {tag.name: tag for tag in repo.get_tags()} + if not tag_name in tags: + raise TriggerStartException("Could not find tag in repository") + + return tags[tag_name].commit.sha + + # Find the branch or tag to build. + (commit_sha, ref) = determine_build_ref( + run_parameters, get_branch_sha, get_tag_sha, default_branch + ) + + metadata = GithubBuildTrigger._build_metadata_for_commit(commit_sha, ref, repo) + return self.prepare_build(metadata, is_manual=True) + + @_catch_ssl_errors + def lookup_user(self, username): + try: + gh_client = self._get_client() + user = gh_client.get_user(username) + return {"html_url": user.html_url, "avatar_url": user.avatar_url} + except GithubException: + return None + + @_catch_ssl_errors + def handle_trigger_request(self, request): + # Check the payload to see if we should skip it based on the lack of a head_commit. + payload = request.get_json() + if payload is None: + raise InvalidPayloadException("Missing payload") + + # This is for GitHub's probing/testing. + if "zen" in payload: + raise SkipRequestException() + + # Lookup the default branch for the repository. + if "repository" not in payload: + raise InvalidPayloadException("Missing 'repository' on request") + + if "owner" not in payload["repository"]: + raise InvalidPayloadException("Missing 'owner' on repository") + + if "name" not in payload["repository"]["owner"]: + raise InvalidPayloadException("Missing owner 'name' on repository") + + if "name" not in payload["repository"]: + raise InvalidPayloadException("Missing 'name' on repository") + + default_branch = None + lookup_user = None + try: + repo_full_name = "%s/%s" % ( + payload["repository"]["owner"]["name"], + payload["repository"]["name"], + ) + + gh_client = self._get_client() + repo = gh_client.get_repo(repo_full_name) + default_branch = repo.default_branch + lookup_user = self.lookup_user + except GitHubBadCredentialsException: + logger.exception( + "Got GitHub Credentials Exception; Cannot lookup default branch" + ) + except GithubException: + logger.exception( + "Got GitHub Exception when trying to start trigger %s", self.trigger.id + ) + raise SkipRequestException() + + logger.debug("GitHub trigger payload %s", payload) + metadata = get_transformed_webhook_payload( + payload, default_branch=default_branch, lookup_user=lookup_user + ) + prepared = self.prepare_build(metadata) + + # Check if we should skip this build. + raise_if_skipped_build(prepared, self.config) + return prepared diff --git a/buildtrigger/gitlabhandler.py b/buildtrigger/gitlabhandler.py index 9ed3e91d0..fbb0afa63 100644 --- a/buildtrigger/gitlabhandler.py +++ b/buildtrigger/gitlabhandler.py @@ -11,12 +11,18 @@ import requests from jsonschema import validate from app import app, gitlab_trigger -from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException, - TriggerDeactivationException, TriggerStartException, - SkipRequestException, InvalidPayloadException, - TriggerAuthException, - determine_build_ref, raise_if_skipped_build, - find_matching_branches) +from buildtrigger.triggerutil import ( + RepositoryReadException, + TriggerActivationException, + TriggerDeactivationException, + TriggerStartException, + SkipRequestException, + InvalidPayloadException, + TriggerAuthException, + determine_build_ref, + raise_if_skipped_build, + find_matching_branches, +) from buildtrigger.basehandler import BuildTriggerHandler from endpoints.exception import ExternalServiceError from util.security.ssh import generate_ssh_keypair @@ -25,597 +31,616 @@ from util.dict_wrappers import JSONPathDict, SafeDictSetter logger = logging.getLogger(__name__) GITLAB_WEBHOOK_PAYLOAD_SCHEMA = { - 'type': 'object', - 'properties': { - 'ref': { - 'type': 'string', - }, - 'checkout_sha': { - 'type': ['string', 'null'], - }, - 'repository': { - 'type': 'object', - 'properties': { - 'git_ssh_url': { - 'type': 'string', + "type": "object", + "properties": { + "ref": {"type": "string"}, + "checkout_sha": {"type": ["string", "null"]}, + "repository": { + "type": "object", + "properties": {"git_ssh_url": {"type": "string"}}, + "required": ["git_ssh_url"], }, - }, - 'required': ['git_ssh_url'], - }, - 'commits': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'id': { - 'type': 'string', - }, - 'url': { - 'type': ['string', 'null'], - }, - 'message': { - 'type': 'string', - }, - 'timestamp': { - 'type': 'string', - }, - 'author': { - 'type': 'object', - 'properties': { - 'email': { - 'type': 'string', - }, + "commits": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": {"type": "string"}, + "url": {"type": ["string", "null"]}, + "message": {"type": "string"}, + "timestamp": {"type": "string"}, + "author": { + "type": "object", + "properties": {"email": {"type": "string"}}, + "required": ["email"], + }, + }, + "required": ["id", "message", "timestamp"], }, - 'required': ['email'], - }, }, - 'required': ['id', 'message', 'timestamp'], - }, }, - }, - 'required': ['ref', 'checkout_sha', 'repository'], + "required": ["ref", "checkout_sha", "repository"], } _ACCESS_LEVEL_MAP = { - 50: ("owner", True), - 40: ("master", True), - 30: ("developer", False), - 20: ("reporter", False), - 10: ("guest", False), + 50: ("owner", True), + 40: ("master", True), + 30: ("developer", False), + 20: ("reporter", False), + 10: ("guest", False), } _PER_PAGE_COUNT = 20 def _catch_timeouts_and_errors(func): - @wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except requests.exceptions.Timeout: - msg = 'Request to the GitLab API timed out' - logger.exception(msg) - raise ExternalServiceError(msg) - except gitlab.GitlabError: - msg = 'GitLab API error. Please contact support.' - logger.exception(msg) - raise ExternalServiceError(msg) - return wrapper + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except requests.exceptions.Timeout: + msg = "Request to the GitLab API timed out" + logger.exception(msg) + raise ExternalServiceError(msg) + except gitlab.GitlabError: + msg = "GitLab API error. Please contact support." + logger.exception(msg) + raise ExternalServiceError(msg) + + return wrapper def _paginated_iterator(func, exc, **kwargs): - """ Returns an iterator over invocations of the given function, automatically handling + """ Returns an iterator over invocations of the given function, automatically handling pagination. """ - page = 1 - while True: - result = func(page=page, per_page=_PER_PAGE_COUNT, **kwargs) - if result is None or result is False: - raise exc + page = 1 + while True: + result = func(page=page, per_page=_PER_PAGE_COUNT, **kwargs) + if result is None or result is False: + raise exc - counter = 0 - for item in result: - yield item - counter = counter + 1 + counter = 0 + for item in result: + yield item + counter = counter + 1 - if counter < _PER_PAGE_COUNT: - break + if counter < _PER_PAGE_COUNT: + break - page = page + 1 + page = page + 1 -def get_transformed_webhook_payload(gl_payload, default_branch=None, lookup_user=None, - lookup_commit=None): - """ Returns the Gitlab webhook JSON payload transformed into our own payload +def get_transformed_webhook_payload( + gl_payload, default_branch=None, lookup_user=None, lookup_commit=None +): + """ Returns the Gitlab webhook JSON payload transformed into our own payload format. If the gl_payload is not valid, returns None. """ - try: - validate(gl_payload, GITLAB_WEBHOOK_PAYLOAD_SCHEMA) - except Exception as exc: - raise InvalidPayloadException(exc.message) + try: + validate(gl_payload, GITLAB_WEBHOOK_PAYLOAD_SCHEMA) + except Exception as exc: + raise InvalidPayloadException(exc.message) - payload = JSONPathDict(gl_payload) + payload = JSONPathDict(gl_payload) - if payload['object_kind'] != 'push' and payload['object_kind'] != 'tag_push': - # Unknown kind of webhook. - raise SkipRequestException + if payload["object_kind"] != "push" and payload["object_kind"] != "tag_push": + # Unknown kind of webhook. + raise SkipRequestException - # Check for empty commits. The commits list will be empty if the branch is deleted. - commits = payload['commits'] - if payload['object_kind'] == 'push' and not commits: - raise SkipRequestException + # Check for empty commits. The commits list will be empty if the branch is deleted. + commits = payload["commits"] + if payload["object_kind"] == "push" and not commits: + raise SkipRequestException - # Check for missing commit information. - commit_sha = payload['checkout_sha'] or payload['after'] - if commit_sha is None or commit_sha == '0000000000000000000000000000000000000000': - raise SkipRequestException + # Check for missing commit information. + commit_sha = payload["checkout_sha"] or payload["after"] + if commit_sha is None or commit_sha == "0000000000000000000000000000000000000000": + raise SkipRequestException - config = SafeDictSetter() - config['commit'] = commit_sha - config['ref'] = payload['ref'] - config['default_branch'] = default_branch - config['git_url'] = payload['repository.git_ssh_url'] + config = SafeDictSetter() + config["commit"] = commit_sha + config["ref"] = payload["ref"] + config["default_branch"] = default_branch + config["git_url"] = payload["repository.git_ssh_url"] - found_commit = JSONPathDict({}) - if payload['object_kind'] == 'push' or payload['object_kind'] == 'tag_push': - # Find the commit associated with the checkout_sha. Gitlab doesn't (necessary) send this in - # any order, so we cannot simply index into the commits list. - found_commit = None - if commits is not None: - for commit in commits: - if commit['id'] == payload['checkout_sha']: - found_commit = JSONPathDict(commit) - break + found_commit = JSONPathDict({}) + if payload["object_kind"] == "push" or payload["object_kind"] == "tag_push": + # Find the commit associated with the checkout_sha. Gitlab doesn't (necessary) send this in + # any order, so we cannot simply index into the commits list. + found_commit = None + if commits is not None: + for commit in commits: + if commit["id"] == payload["checkout_sha"]: + found_commit = JSONPathDict(commit) + break - if found_commit is None and lookup_commit: - checkout_sha = payload['checkout_sha'] or payload['after'] - found_commit_info = lookup_commit(payload['project_id'], checkout_sha) - found_commit = JSONPathDict(dict(found_commit_info) if found_commit_info else {}) + if found_commit is None and lookup_commit: + checkout_sha = payload["checkout_sha"] or payload["after"] + found_commit_info = lookup_commit(payload["project_id"], checkout_sha) + found_commit = JSONPathDict( + dict(found_commit_info) if found_commit_info else {} + ) - if found_commit is None: - raise SkipRequestException + if found_commit is None: + raise SkipRequestException - config['commit_info.url'] = found_commit['url'] - config['commit_info.message'] = found_commit['message'] - config['commit_info.date'] = found_commit['timestamp'] + config["commit_info.url"] = found_commit["url"] + config["commit_info.message"] = found_commit["message"] + config["commit_info.date"] = found_commit["timestamp"] - # Note: Gitlab does not send full user information with the payload, so we have to - # (optionally) look it up. - author_email = found_commit['author.email'] or found_commit['author_email'] - if lookup_user and author_email: - author_info = lookup_user(author_email) - if author_info: - config['commit_info.author.username'] = author_info['username'] - config['commit_info.author.url'] = author_info['html_url'] - config['commit_info.author.avatar_url'] = author_info['avatar_url'] + # Note: Gitlab does not send full user information with the payload, so we have to + # (optionally) look it up. + author_email = found_commit["author.email"] or found_commit["author_email"] + if lookup_user and author_email: + author_info = lookup_user(author_email) + if author_info: + config["commit_info.author.username"] = author_info["username"] + config["commit_info.author.url"] = author_info["html_url"] + config["commit_info.author.avatar_url"] = author_info["avatar_url"] - return config.dict_value() + return config.dict_value() class GitLabBuildTrigger(BuildTriggerHandler): - """ + """ BuildTrigger for GitLab. """ - @classmethod - def service_name(cls): - return 'gitlab' - def _get_authorized_client(self): - auth_token = self.auth_token or 'invalid' - api_version = self.config.get('API_VERSION', '4') - client = gitlab.Gitlab(gitlab_trigger.api_endpoint(), oauth_token=auth_token, timeout=20, - api_version=api_version) - try: - client.auth() - except gitlab.GitlabGetError as ex: - raise TriggerAuthException(ex.message) + @classmethod + def service_name(cls): + return "gitlab" - return client + def _get_authorized_client(self): + auth_token = self.auth_token or "invalid" + api_version = self.config.get("API_VERSION", "4") + client = gitlab.Gitlab( + gitlab_trigger.api_endpoint(), + oauth_token=auth_token, + timeout=20, + api_version=api_version, + ) + try: + client.auth() + except gitlab.GitlabGetError as ex: + raise TriggerAuthException(ex.message) - def is_active(self): - return 'hook_id' in self.config + return client - @_catch_timeouts_and_errors - def activate(self, standard_webhook_url): - config = self.config - new_build_source = config['build_source'] - gl_client = self._get_authorized_client() + def is_active(self): + return "hook_id" in self.config - # Find the GitLab repository. - gl_project = gl_client.projects.get(new_build_source) - if not gl_project: - msg = 'Unable to find GitLab repository for source: %s' % new_build_source - raise TriggerActivationException(msg) + @_catch_timeouts_and_errors + def activate(self, standard_webhook_url): + config = self.config + new_build_source = config["build_source"] + gl_client = self._get_authorized_client() - # Add a deploy key to the repository. - public_key, private_key = generate_ssh_keypair() - config['credentials'] = [ - { - 'name': 'SSH Public Key', - 'value': public_key, - }, - ] + # Find the GitLab repository. + gl_project = gl_client.projects.get(new_build_source) + if not gl_project: + msg = "Unable to find GitLab repository for source: %s" % new_build_source + raise TriggerActivationException(msg) - key = gl_project.keys.create({ - 'title': '%s Builder' % app.config['REGISTRY_TITLE'], - 'key': public_key, - }) + # Add a deploy key to the repository. + public_key, private_key = generate_ssh_keypair() + config["credentials"] = [{"name": "SSH Public Key", "value": public_key}] - if not key: - msg = 'Unable to add deploy key to repository: %s' % new_build_source - raise TriggerActivationException(msg) + key = gl_project.keys.create( + {"title": "%s Builder" % app.config["REGISTRY_TITLE"], "key": public_key} + ) - config['key_id'] = key.get_id() + if not key: + msg = "Unable to add deploy key to repository: %s" % new_build_source + raise TriggerActivationException(msg) - # Add the webhook to the GitLab repository. - hook = gl_project.hooks.create({ - 'url': standard_webhook_url, - 'push': True, - 'tag_push': True, - 'push_events': True, - 'tag_push_events': True, - }) - if not hook: - msg = 'Unable to create webhook on repository: %s' % new_build_source - raise TriggerActivationException(msg) + config["key_id"] = key.get_id() - config['hook_id'] = hook.get_id() - self.config = config - return config, {'private_key': private_key} + # Add the webhook to the GitLab repository. + hook = gl_project.hooks.create( + { + "url": standard_webhook_url, + "push": True, + "tag_push": True, + "push_events": True, + "tag_push_events": True, + } + ) + if not hook: + msg = "Unable to create webhook on repository: %s" % new_build_source + raise TriggerActivationException(msg) - def deactivate(self): - config = self.config - gl_client = self._get_authorized_client() + config["hook_id"] = hook.get_id() + self.config = config + return config, {"private_key": private_key} + + def deactivate(self): + config = self.config + gl_client = self._get_authorized_client() + + # Find the GitLab repository. + try: + gl_project = gl_client.projects.get(config["build_source"]) + if not gl_project: + config.pop("key_id", None) + config.pop("hook_id", None) + self.config = config + return config + except gitlab.GitlabGetError as ex: + if ex.response_code != 404: + raise + + # Remove the webhook. + try: + gl_project.hooks.delete(config["hook_id"]) + except gitlab.GitlabDeleteError as ex: + if ex.response_code != 404: + raise + + config.pop("hook_id", None) + + # Remove the key + try: + gl_project.keys.delete(config["key_id"]) + except gitlab.GitlabDeleteError as ex: + if ex.response_code != 404: + raise + + config.pop("key_id", None) - # Find the GitLab repository. - try: - gl_project = gl_client.projects.get(config['build_source']) - if not gl_project: - config.pop('key_id', None) - config.pop('hook_id', None) self.config = config return config - except gitlab.GitlabGetError as ex: - if ex.response_code != 404: - raise - # Remove the webhook. - try: - gl_project.hooks.delete(config['hook_id']) - except gitlab.GitlabDeleteError as ex: - if ex.response_code != 404: - raise + @_catch_timeouts_and_errors + def list_build_source_namespaces(self): + gl_client = self._get_authorized_client() + current_user = gl_client.user + if not current_user: + raise RepositoryReadException("Unable to get current user") - config.pop('hook_id', None) + namespaces = {} + for namespace in _paginated_iterator( + gl_client.namespaces.list, RepositoryReadException + ): + namespace_id = namespace.get_id() + if namespace_id in namespaces: + namespaces[namespace_id]["score"] = ( + namespaces[namespace_id]["score"] + 1 + ) + else: + owner = namespace.attributes["name"] + namespaces[namespace_id] = { + "personal": namespace.attributes["kind"] == "user", + "id": str(namespace_id), + "title": namespace.attributes["name"], + "avatar_url": namespace.attributes.get("avatar_url"), + "score": 1, + "url": namespace.attributes.get("web_url") or "", + } - # Remove the key - try: - gl_project.keys.delete(config['key_id']) - except gitlab.GitlabDeleteError as ex: - if ex.response_code != 404: - raise + return BuildTriggerHandler.build_namespaces_response(namespaces) - config.pop('key_id', None) + def _get_namespace(self, gl_client, gl_namespace, lazy=False): + try: + if gl_namespace.attributes["kind"] == "group": + return gl_client.groups.get(gl_namespace.attributes["id"], lazy=lazy) - self.config = config - return config + if gl_namespace.attributes["kind"] == "user": + return gl_client.users.get(gl_client.user.attributes["id"], lazy=lazy) - @_catch_timeouts_and_errors - def list_build_source_namespaces(self): - gl_client = self._get_authorized_client() - current_user = gl_client.user - if not current_user: - raise RepositoryReadException('Unable to get current user') + # Note: This doesn't seem to work for IDs retrieved via the namespaces API; the IDs are + # different. + return gl_client.users.get(gl_namespace.attributes["id"], lazy=lazy) + except gitlab.GitlabGetError: + return None - namespaces = {} - for namespace in _paginated_iterator(gl_client.namespaces.list, RepositoryReadException): - namespace_id = namespace.get_id() - if namespace_id in namespaces: - namespaces[namespace_id]['score'] = namespaces[namespace_id]['score'] + 1 - else: - owner = namespace.attributes['name'] - namespaces[namespace_id] = { - 'personal': namespace.attributes['kind'] == 'user', - 'id': str(namespace_id), - 'title': namespace.attributes['name'], - 'avatar_url': namespace.attributes.get('avatar_url'), - 'score': 1, - 'url': namespace.attributes.get('web_url') or '', + @_catch_timeouts_and_errors + def list_build_sources_for_namespace(self, namespace_id): + if not namespace_id: + return [] + + def repo_view(repo): + # Because *anything* can be None in GitLab API! + permissions = repo.attributes.get("permissions") or {} + group_access = permissions.get("group_access") or {} + project_access = permissions.get("project_access") or {} + + missing_group_access = permissions.get("group_access") is None + missing_project_access = permissions.get("project_access") is None + + access_level = max( + group_access.get("access_level") or 0, + project_access.get("access_level") or 0, + ) + + has_admin_permission = _ACCESS_LEVEL_MAP.get(access_level, ("", False))[1] + if missing_group_access or missing_project_access: + # Default to has permission if we cannot check the permissions. This will allow our users + # to select the repository and then GitLab's own checks will ensure that the webhook is + # added only if allowed. + # TODO: Do we want to display this differently in the UI? + has_admin_permission = True + + view = { + "name": repo.attributes["path"], + "full_name": repo.attributes["path_with_namespace"], + "description": repo.attributes.get("description") or "", + "url": repo.attributes.get("web_url"), + "has_admin_permissions": has_admin_permission, + "private": repo.attributes.get("visibility") == "private", + } + + if repo.attributes.get("last_activity_at"): + try: + last_modified = dateutil.parser.parse( + repo.attributes["last_activity_at"] + ) + view["last_updated"] = timegm(last_modified.utctimetuple()) + except ValueError: + logger.exception( + "Gitlab gave us an invalid last_activity_at: %s", last_modified + ) + + return view + + gl_client = self._get_authorized_client() + + try: + gl_namespace = gl_client.namespaces.get(namespace_id) + except gitlab.GitlabGetError: + return [] + + namespace_obj = self._get_namespace(gl_client, gl_namespace, lazy=True) + repositories = _paginated_iterator( + namespace_obj.projects.list, RepositoryReadException + ) + + try: + return BuildTriggerHandler.build_sources_response( + [repo_view(repo) for repo in repositories] + ) + except gitlab.GitlabGetError: + return [] + + @_catch_timeouts_and_errors + def list_build_subdirs(self): + config = self.config + gl_client = self._get_authorized_client() + new_build_source = config["build_source"] + + gl_project = gl_client.projects.get(new_build_source) + if not gl_project: + msg = "Unable to find GitLab repository for source: %s" % new_build_source + raise RepositoryReadException(msg) + + repo_branches = gl_project.branches.list() + if not repo_branches: + msg = "Unable to find GitLab branches for source: %s" % new_build_source + raise RepositoryReadException(msg) + + branches = [branch.attributes["name"] for branch in repo_branches] + branches = find_matching_branches(config, branches) + branches = branches or [gl_project.attributes["default_branch"] or "master"] + + repo_tree = gl_project.repository_tree(ref=branches[0]) + if not repo_tree: + msg = ( + "Unable to find GitLab repository tree for source: %s" + % new_build_source + ) + raise RepositoryReadException(msg) + + return [ + node["name"] + for node in repo_tree + if self.filename_is_dockerfile(node["name"]) + ] + + @_catch_timeouts_and_errors + def load_dockerfile_contents(self): + gl_client = self._get_authorized_client() + path = self.get_dockerfile_path() + + gl_project = gl_client.projects.get(self.config["build_source"]) + if not gl_project: + return None + + branches = self.list_field_values("branch_name") + branches = find_matching_branches(self.config, branches) + if branches == []: + return None + + branch_name = branches[0] + if gl_project.attributes["default_branch"] in branches: + branch_name = gl_project.attributes["default_branch"] + + try: + return gl_project.files.get(path, branch_name).decode() + except gitlab.GitlabGetError: + return None + + @_catch_timeouts_and_errors + def list_field_values(self, field_name, limit=None): + if field_name == "refs": + branches = self.list_field_values("branch_name") + tags = self.list_field_values("tag_name") + + return [{"kind": "branch", "name": b} for b in branches] + [ + {"kind": "tag", "name": t} for t in tags + ] + + gl_client = self._get_authorized_client() + gl_project = gl_client.projects.get(self.config["build_source"]) + if not gl_project: + return [] + + if field_name == "tag_name": + tags = gl_project.tags.list() + if not tags: + return [] + + if limit: + tags = tags[0:limit] + + return [tag.attributes["name"] for tag in tags] + + if field_name == "branch_name": + branches = gl_project.branches.list() + if not branches: + return [] + + if limit: + branches = branches[0:limit] + + return [branch.attributes["name"] for branch in branches] + + return None + + def get_repository_url(self): + return gitlab_trigger.get_public_url(self.config["build_source"]) + + @_catch_timeouts_and_errors + def lookup_commit(self, repo_id, commit_sha): + if repo_id is None: + return None + + gl_client = self._get_authorized_client() + gl_project = gl_client.projects.get(self.config["build_source"], lazy=True) + commit = gl_project.commits.get(commit_sha) + if not commit: + return None + + return commit + + @_catch_timeouts_and_errors + def lookup_user(self, email): + gl_client = self._get_authorized_client() + try: + result = gl_client.users.list(search=email) + if not result: + return None + + [user] = result + return { + "username": user.attributes["username"], + "html_url": user.attributes["web_url"], + "avatar_url": user.attributes["avatar_url"], + } + except ValueError: + return None + + @_catch_timeouts_and_errors + def get_metadata_for_commit(self, commit_sha, ref, repo): + commit = self.lookup_commit(repo.get_id(), commit_sha) + if commit is None: + return None + + metadata = { + "commit": commit.attributes["id"], + "ref": ref, + "default_branch": repo.attributes["default_branch"], + "git_url": repo.attributes["ssh_url_to_repo"], + "commit_info": { + "url": os.path.join( + repo.attributes["web_url"], "commit", commit.attributes["id"] + ), + "message": commit.attributes["message"], + "date": commit.attributes["committed_date"], + }, } - return BuildTriggerHandler.build_namespaces_response(namespaces) + committer = None + if "committer_email" in commit.attributes: + committer = self.lookup_user(commit.attributes["committer_email"]) - def _get_namespace(self, gl_client, gl_namespace, lazy=False): - try: - if gl_namespace.attributes['kind'] == 'group': - return gl_client.groups.get(gl_namespace.attributes['id'], lazy=lazy) + author = None + if "author_email" in commit.attributes: + author = self.lookup_user(commit.attributes["author_email"]) - if gl_namespace.attributes['kind'] == 'user': - return gl_client.users.get(gl_client.user.attributes['id'], lazy=lazy) + if committer is not None: + metadata["commit_info"]["committer"] = { + "username": committer["username"], + "avatar_url": committer["avatar_url"], + "url": committer.get("http_url", ""), + } - # Note: This doesn't seem to work for IDs retrieved via the namespaces API; the IDs are - # different. - return gl_client.users.get(gl_namespace.attributes['id'], lazy=lazy) - except gitlab.GitlabGetError: - return None + if author is not None: + metadata["commit_info"]["author"] = { + "username": author["username"], + "avatar_url": author["avatar_url"], + "url": author.get("http_url", ""), + } - @_catch_timeouts_and_errors - def list_build_sources_for_namespace(self, namespace_id): - if not namespace_id: - return [] + return metadata - def repo_view(repo): - # Because *anything* can be None in GitLab API! - permissions = repo.attributes.get('permissions') or {} - group_access = permissions.get('group_access') or {} - project_access = permissions.get('project_access') or {} + @_catch_timeouts_and_errors + def manual_start(self, run_parameters=None): + gl_client = self._get_authorized_client() + gl_project = gl_client.projects.get(self.config["build_source"]) + if not gl_project: + raise TriggerStartException("Could not find repository") - missing_group_access = permissions.get('group_access') is None - missing_project_access = permissions.get('project_access') is None + def get_tag_sha(tag_name): + try: + tag = gl_project.tags.get(tag_name) + except gitlab.GitlabGetError: + raise TriggerStartException("Could not find tag in repository") - access_level = max(group_access.get('access_level') or 0, - project_access.get('access_level') or 0) + return tag.attributes["commit"]["id"] - has_admin_permission = _ACCESS_LEVEL_MAP.get(access_level, ("", False))[1] - if missing_group_access or missing_project_access: - # Default to has permission if we cannot check the permissions. This will allow our users - # to select the repository and then GitLab's own checks will ensure that the webhook is - # added only if allowed. - # TODO: Do we want to display this differently in the UI? - has_admin_permission = True + def get_branch_sha(branch_name): + try: + branch = gl_project.branches.get(branch_name) + except gitlab.GitlabGetError: + raise TriggerStartException("Could not find branch in repository") - view = { - 'name': repo.attributes['path'], - 'full_name': repo.attributes['path_with_namespace'], - 'description': repo.attributes.get('description') or '', - 'url': repo.attributes.get('web_url'), - 'has_admin_permissions': has_admin_permission, - 'private': repo.attributes.get('visibility') == 'private', - } + return branch.attributes["commit"]["id"] - if repo.attributes.get('last_activity_at'): - try: - last_modified = dateutil.parser.parse(repo.attributes['last_activity_at']) - view['last_updated'] = timegm(last_modified.utctimetuple()) - except ValueError: - logger.exception('Gitlab gave us an invalid last_activity_at: %s', last_modified) + # Find the branch or tag to build. + (commit_sha, ref) = determine_build_ref( + run_parameters, + get_branch_sha, + get_tag_sha, + gl_project.attributes["default_branch"], + ) - return view + metadata = self.get_metadata_for_commit(commit_sha, ref, gl_project) + return self.prepare_build(metadata, is_manual=True) - gl_client = self._get_authorized_client() + @_catch_timeouts_and_errors + def handle_trigger_request(self, request): + payload = request.get_json() + if not payload: + raise InvalidPayloadException() - try: - gl_namespace = gl_client.namespaces.get(namespace_id) - except gitlab.GitlabGetError: - return [] + logger.debug("GitLab trigger payload %s", payload) - namespace_obj = self._get_namespace(gl_client, gl_namespace, lazy=True) - repositories = _paginated_iterator(namespace_obj.projects.list, RepositoryReadException) + # Lookup the default branch. + gl_client = self._get_authorized_client() + gl_project = gl_client.projects.get(self.config["build_source"]) + if not gl_project: + logger.debug( + "Skipping GitLab build; project %s not found", + self.config["build_source"], + ) + raise InvalidPayloadException() - try: - return BuildTriggerHandler.build_sources_response([repo_view(repo) for repo in repositories]) - except gitlab.GitlabGetError: - return [] + def lookup_commit(repo_id, commit_sha): + commit = self.lookup_commit(repo_id, commit_sha) + if commit is None: + return None - @_catch_timeouts_and_errors - def list_build_subdirs(self): - config = self.config - gl_client = self._get_authorized_client() - new_build_source = config['build_source'] + return dict(commit.attributes) - gl_project = gl_client.projects.get(new_build_source) - if not gl_project: - msg = 'Unable to find GitLab repository for source: %s' % new_build_source - raise RepositoryReadException(msg) + default_branch = gl_project.attributes["default_branch"] + metadata = get_transformed_webhook_payload( + payload, + default_branch=default_branch, + lookup_user=self.lookup_user, + lookup_commit=lookup_commit, + ) + prepared = self.prepare_build(metadata) - repo_branches = gl_project.branches.list() - if not repo_branches: - msg = 'Unable to find GitLab branches for source: %s' % new_build_source - raise RepositoryReadException(msg) - - branches = [branch.attributes['name'] for branch in repo_branches] - branches = find_matching_branches(config, branches) - branches = branches or [gl_project.attributes['default_branch'] or 'master'] - - repo_tree = gl_project.repository_tree(ref=branches[0]) - if not repo_tree: - msg = 'Unable to find GitLab repository tree for source: %s' % new_build_source - raise RepositoryReadException(msg) - - return [node['name'] for node in repo_tree if self.filename_is_dockerfile(node['name'])] - - @_catch_timeouts_and_errors - def load_dockerfile_contents(self): - gl_client = self._get_authorized_client() - path = self.get_dockerfile_path() - - gl_project = gl_client.projects.get(self.config['build_source']) - if not gl_project: - return None - - branches = self.list_field_values('branch_name') - branches = find_matching_branches(self.config, branches) - if branches == []: - return None - - branch_name = branches[0] - if gl_project.attributes['default_branch'] in branches: - branch_name = gl_project.attributes['default_branch'] - - try: - return gl_project.files.get(path, branch_name).decode() - except gitlab.GitlabGetError: - return None - - @_catch_timeouts_and_errors - def list_field_values(self, field_name, limit=None): - if field_name == 'refs': - branches = self.list_field_values('branch_name') - tags = self.list_field_values('tag_name') - - return ([{'kind': 'branch', 'name': b} for b in branches] + - [{'kind': 'tag', 'name': t} for t in tags]) - - gl_client = self._get_authorized_client() - gl_project = gl_client.projects.get(self.config['build_source']) - if not gl_project: - return [] - - if field_name == 'tag_name': - tags = gl_project.tags.list() - if not tags: - return [] - - if limit: - tags = tags[0:limit] - - return [tag.attributes['name'] for tag in tags] - - if field_name == 'branch_name': - branches = gl_project.branches.list() - if not branches: - return [] - - if limit: - branches = branches[0:limit] - - return [branch.attributes['name'] for branch in branches] - - return None - - def get_repository_url(self): - return gitlab_trigger.get_public_url(self.config['build_source']) - - @_catch_timeouts_and_errors - def lookup_commit(self, repo_id, commit_sha): - if repo_id is None: - return None - - gl_client = self._get_authorized_client() - gl_project = gl_client.projects.get(self.config['build_source'], lazy=True) - commit = gl_project.commits.get(commit_sha) - if not commit: - return None - - return commit - - @_catch_timeouts_and_errors - def lookup_user(self, email): - gl_client = self._get_authorized_client() - try: - result = gl_client.users.list(search=email) - if not result: - return None - - [user] = result - return { - 'username': user.attributes['username'], - 'html_url': user.attributes['web_url'], - 'avatar_url': user.attributes['avatar_url'] - } - except ValueError: - return None - - @_catch_timeouts_and_errors - def get_metadata_for_commit(self, commit_sha, ref, repo): - commit = self.lookup_commit(repo.get_id(), commit_sha) - if commit is None: - return None - - metadata = { - 'commit': commit.attributes['id'], - 'ref': ref, - 'default_branch': repo.attributes['default_branch'], - 'git_url': repo.attributes['ssh_url_to_repo'], - 'commit_info': { - 'url': os.path.join(repo.attributes['web_url'], 'commit', commit.attributes['id']), - 'message': commit.attributes['message'], - 'date': commit.attributes['committed_date'], - }, - } - - committer = None - if 'committer_email' in commit.attributes: - committer = self.lookup_user(commit.attributes['committer_email']) - - author = None - if 'author_email' in commit.attributes: - author = self.lookup_user(commit.attributes['author_email']) - - if committer is not None: - metadata['commit_info']['committer'] = { - 'username': committer['username'], - 'avatar_url': committer['avatar_url'], - 'url': committer.get('http_url', ''), - } - - if author is not None: - metadata['commit_info']['author'] = { - 'username': author['username'], - 'avatar_url': author['avatar_url'], - 'url': author.get('http_url', ''), - } - - return metadata - - @_catch_timeouts_and_errors - def manual_start(self, run_parameters=None): - gl_client = self._get_authorized_client() - gl_project = gl_client.projects.get(self.config['build_source']) - if not gl_project: - raise TriggerStartException('Could not find repository') - - def get_tag_sha(tag_name): - try: - tag = gl_project.tags.get(tag_name) - except gitlab.GitlabGetError: - raise TriggerStartException('Could not find tag in repository') - - return tag.attributes['commit']['id'] - - def get_branch_sha(branch_name): - try: - branch = gl_project.branches.get(branch_name) - except gitlab.GitlabGetError: - raise TriggerStartException('Could not find branch in repository') - - return branch.attributes['commit']['id'] - - # Find the branch or tag to build. - (commit_sha, ref) = determine_build_ref(run_parameters, get_branch_sha, get_tag_sha, - gl_project.attributes['default_branch']) - - metadata = self.get_metadata_for_commit(commit_sha, ref, gl_project) - return self.prepare_build(metadata, is_manual=True) - - @_catch_timeouts_and_errors - def handle_trigger_request(self, request): - payload = request.get_json() - if not payload: - raise InvalidPayloadException() - - logger.debug('GitLab trigger payload %s', payload) - - # Lookup the default branch. - gl_client = self._get_authorized_client() - gl_project = gl_client.projects.get(self.config['build_source']) - if not gl_project: - logger.debug('Skipping GitLab build; project %s not found', self.config['build_source']) - raise InvalidPayloadException() - - def lookup_commit(repo_id, commit_sha): - commit = self.lookup_commit(repo_id, commit_sha) - if commit is None: - return None - - return dict(commit.attributes) - - default_branch = gl_project.attributes['default_branch'] - metadata = get_transformed_webhook_payload(payload, default_branch=default_branch, - lookup_user=self.lookup_user, - lookup_commit=lookup_commit) - prepared = self.prepare_build(metadata) - - # Check if we should skip this build. - raise_if_skipped_build(prepared, self.config) - return prepared + # Check if we should skip this build. + raise_if_skipped_build(prepared, self.config) + return prepared diff --git a/buildtrigger/test/bitbucketmock.py b/buildtrigger/test/bitbucketmock.py index 0e5cad97f..b6cf2b3b8 100644 --- a/buildtrigger/test/bitbucketmock.py +++ b/buildtrigger/test/bitbucketmock.py @@ -4,156 +4,168 @@ from mock import Mock from buildtrigger.bitbuckethandler import BitbucketBuildTrigger from util.morecollections import AttrDict -def get_bitbucket_trigger(dockerfile_path=''): - trigger_obj = AttrDict(dict(auth_token='foobar', id='sometrigger')) - trigger = BitbucketBuildTrigger(trigger_obj, { - 'build_source': 'foo/bar', - 'dockerfile_path': dockerfile_path, - 'nickname': 'knownuser', - 'account_id': 'foo', - }) - trigger._get_client = get_mock_bitbucket - return trigger +def get_bitbucket_trigger(dockerfile_path=""): + trigger_obj = AttrDict(dict(auth_token="foobar", id="sometrigger")) + trigger = BitbucketBuildTrigger( + trigger_obj, + { + "build_source": "foo/bar", + "dockerfile_path": dockerfile_path, + "nickname": "knownuser", + "account_id": "foo", + }, + ) + + trigger._get_client = get_mock_bitbucket + return trigger + def get_repo_path_contents(path, revision): - data = { - 'files': [{'path': 'Dockerfile'}], - } + data = {"files": [{"path": "Dockerfile"}]} + + return (True, data, None) - return (True, data, None) def get_raw_path_contents(path, revision): - if path == 'Dockerfile': - return (True, 'hello world', None) + if path == "Dockerfile": + return (True, "hello world", None) - if path == 'somesubdir/Dockerfile': - return (True, 'hi universe', None) + if path == "somesubdir/Dockerfile": + return (True, "hi universe", None) + + return (False, None, None) - return (False, None, None) def get_branches_and_tags(): - data = { - 'branches': [{'name': 'master'}, {'name': 'otherbranch'}], - 'tags': [{'name': 'sometag'}, {'name': 'someothertag'}], - } - return (True, data, None) + data = { + "branches": [{"name": "master"}, {"name": "otherbranch"}], + "tags": [{"name": "sometag"}, {"name": "someothertag"}], + } + return (True, data, None) + def get_branches(): - return (True, {'master': {}, 'otherbranch': {}}, None) + return (True, {"master": {}, "otherbranch": {}}, None) + def get_tags(): - return (True, {'sometag': {}, 'someothertag': {}}, None) + return (True, {"sometag": {}, "someothertag": {}}, None) + def get_branch(branch_name): - if branch_name != 'master': - return (False, None, None) + if branch_name != "master": + return (False, None, None) - data = { - 'target': { - 'hash': 'aaaaaaa', - }, - } + data = {"target": {"hash": "aaaaaaa"}} + + return (True, data, None) - return (True, data, None) def get_tag(tag_name): - if tag_name != 'sometag': - return (False, None, None) + if tag_name != "sometag": + return (False, None, None) - data = { - 'target': { - 'hash': 'aaaaaaa', - }, - } + data = {"target": {"hash": "aaaaaaa"}} + + return (True, data, None) - return (True, data, None) def get_changeset_mock(commit_sha): - if commit_sha != 'aaaaaaa': - return (False, None, 'Not found') + if commit_sha != "aaaaaaa": + return (False, None, "Not found") - data = { - 'node': 'aaaaaaa', - 'message': 'some message', - 'timestamp': 'now', - 'raw_author': 'foo@bar.com', - } + data = { + "node": "aaaaaaa", + "message": "some message", + "timestamp": "now", + "raw_author": "foo@bar.com", + } + + return (True, data, None) - return (True, data, None) def get_changesets(): - changesets_mock = Mock() - changesets_mock.get = Mock(side_effect=get_changeset_mock) - return changesets_mock + changesets_mock = Mock() + changesets_mock.get = Mock(side_effect=get_changeset_mock) + return changesets_mock + def get_deploykeys(): - deploykeys_mock = Mock() - deploykeys_mock.create = Mock(return_value=(True, {'pk': 'someprivatekey'}, None)) - deploykeys_mock.delete = Mock(return_value=(True, {}, None)) - return deploykeys_mock + deploykeys_mock = Mock() + deploykeys_mock.create = Mock(return_value=(True, {"pk": "someprivatekey"}, None)) + deploykeys_mock.delete = Mock(return_value=(True, {}, None)) + return deploykeys_mock + def get_webhooks(): - webhooks_mock = Mock() - webhooks_mock.create = Mock(return_value=(True, {'uuid': 'someuuid'}, None)) - webhooks_mock.delete = Mock(return_value=(True, {}, None)) - return webhooks_mock + webhooks_mock = Mock() + webhooks_mock.create = Mock(return_value=(True, {"uuid": "someuuid"}, None)) + webhooks_mock.delete = Mock(return_value=(True, {}, None)) + return webhooks_mock + def get_repo_mock(name): - if name != 'bar': - return None + if name != "bar": + return None - repo_mock = Mock() - repo_mock.get_main_branch = Mock(return_value=(True, {'name': 'master'}, None)) - repo_mock.get_path_contents = Mock(side_effect=get_repo_path_contents) - repo_mock.get_raw_path_contents = Mock(side_effect=get_raw_path_contents) - repo_mock.get_branches_and_tags = Mock(side_effect=get_branches_and_tags) - repo_mock.get_branches = Mock(side_effect=get_branches) - repo_mock.get_tags = Mock(side_effect=get_tags) - repo_mock.get_branch = Mock(side_effect=get_branch) - repo_mock.get_tag = Mock(side_effect=get_tag) + repo_mock = Mock() + repo_mock.get_main_branch = Mock(return_value=(True, {"name": "master"}, None)) + repo_mock.get_path_contents = Mock(side_effect=get_repo_path_contents) + repo_mock.get_raw_path_contents = Mock(side_effect=get_raw_path_contents) + repo_mock.get_branches_and_tags = Mock(side_effect=get_branches_and_tags) + repo_mock.get_branches = Mock(side_effect=get_branches) + repo_mock.get_tags = Mock(side_effect=get_tags) + repo_mock.get_branch = Mock(side_effect=get_branch) + repo_mock.get_tag = Mock(side_effect=get_tag) + + repo_mock.changesets = Mock(side_effect=get_changesets) + repo_mock.deploykeys = Mock(side_effect=get_deploykeys) + repo_mock.webhooks = Mock(side_effect=get_webhooks) + return repo_mock - repo_mock.changesets = Mock(side_effect=get_changesets) - repo_mock.deploykeys = Mock(side_effect=get_deploykeys) - repo_mock.webhooks = Mock(side_effect=get_webhooks) - return repo_mock def get_repositories_mock(): - repos_mock = Mock() - repos_mock.get = Mock(side_effect=get_repo_mock) - return repos_mock + repos_mock = Mock() + repos_mock.get = Mock(side_effect=get_repo_mock) + return repos_mock + def get_namespace_mock(namespace): - namespace_mock = Mock() - namespace_mock.repositories = Mock(side_effect=get_repositories_mock) - return namespace_mock + namespace_mock = Mock() + namespace_mock.repositories = Mock(side_effect=get_repositories_mock) + return namespace_mock + def get_repo(namespace, name): - return { - 'owner': namespace, - 'logo': 'avatarurl', - 'slug': name, - 'description': 'some %s repo' % (name), - 'utc_last_updated': str(datetime.utcfromtimestamp(0)), - 'read_only': namespace != 'knownuser', - 'is_private': name == 'somerepo', - } + return { + "owner": namespace, + "logo": "avatarurl", + "slug": name, + "description": "some %s repo" % (name), + "utc_last_updated": str(datetime.utcfromtimestamp(0)), + "read_only": namespace != "knownuser", + "is_private": name == "somerepo", + } + def get_visible_repos(): - repos = [ - get_repo('knownuser', 'somerepo'), - get_repo('someorg', 'somerepo'), - get_repo('someorg', 'anotherrepo'), - ] - return (True, repos, None) + repos = [ + get_repo("knownuser", "somerepo"), + get_repo("someorg", "somerepo"), + get_repo("someorg", "anotherrepo"), + ] + return (True, repos, None) + def get_authed_mock(token, secret): - authed_mock = Mock() - authed_mock.for_namespace = Mock(side_effect=get_namespace_mock) - authed_mock.get_visible_repositories = Mock(side_effect=get_visible_repos) - return authed_mock + authed_mock = Mock() + authed_mock.for_namespace = Mock(side_effect=get_namespace_mock) + authed_mock.get_visible_repositories = Mock(side_effect=get_visible_repos) + return authed_mock + def get_mock_bitbucket(): - bitbucket_mock = Mock() - bitbucket_mock.get_authorized_client = Mock(side_effect=get_authed_mock) - return bitbucket_mock + bitbucket_mock = Mock() + bitbucket_mock.get_authorized_client = Mock(side_effect=get_authed_mock) + return bitbucket_mock diff --git a/buildtrigger/test/githubmock.py b/buildtrigger/test/githubmock.py index e0f8daffc..c8fcbe73f 100644 --- a/buildtrigger/test/githubmock.py +++ b/buildtrigger/test/githubmock.py @@ -6,173 +6,178 @@ from github import GithubException from buildtrigger.githubhandler import GithubBuildTrigger from util.morecollections import AttrDict -def get_github_trigger(dockerfile_path=''): - trigger_obj = AttrDict(dict(auth_token='foobar', id='sometrigger')) - trigger = GithubBuildTrigger(trigger_obj, {'build_source': 'foo', 'dockerfile_path': dockerfile_path}) - trigger._get_client = get_mock_github - return trigger + +def get_github_trigger(dockerfile_path=""): + trigger_obj = AttrDict(dict(auth_token="foobar", id="sometrigger")) + trigger = GithubBuildTrigger( + trigger_obj, {"build_source": "foo", "dockerfile_path": dockerfile_path} + ) + trigger._get_client = get_mock_github + return trigger + def get_mock_github(): - def get_commit_mock(commit_sha): - if commit_sha == 'aaaaaaa': - commit_mock = Mock() - commit_mock.sha = commit_sha - commit_mock.html_url = 'http://url/to/commit' - commit_mock.last_modified = 'now' + def get_commit_mock(commit_sha): + if commit_sha == "aaaaaaa": + commit_mock = Mock() + commit_mock.sha = commit_sha + commit_mock.html_url = "http://url/to/commit" + commit_mock.last_modified = "now" - commit_mock.commit = Mock() - commit_mock.commit.message = 'some cool message' + commit_mock.commit = Mock() + commit_mock.commit.message = "some cool message" - commit_mock.committer = Mock() - commit_mock.committer.login = 'someuser' - commit_mock.committer.avatar_url = 'avatarurl' - commit_mock.committer.html_url = 'htmlurl' + commit_mock.committer = Mock() + commit_mock.committer.login = "someuser" + commit_mock.committer.avatar_url = "avatarurl" + commit_mock.committer.html_url = "htmlurl" - commit_mock.author = Mock() - commit_mock.author.login = 'someuser' - commit_mock.author.avatar_url = 'avatarurl' - commit_mock.author.html_url = 'htmlurl' - return commit_mock + commit_mock.author = Mock() + commit_mock.author.login = "someuser" + commit_mock.author.avatar_url = "avatarurl" + commit_mock.author.html_url = "htmlurl" + return commit_mock - raise GithubException(None, None) + raise GithubException(None, None) - def get_branch_mock(branch_name): - if branch_name == 'master': - branch_mock = Mock() - branch_mock.commit = Mock() - branch_mock.commit.sha = 'aaaaaaa' - return branch_mock + def get_branch_mock(branch_name): + if branch_name == "master": + branch_mock = Mock() + branch_mock.commit = Mock() + branch_mock.commit.sha = "aaaaaaa" + return branch_mock - raise GithubException(None, None) + raise GithubException(None, None) + + def get_repo_mock(namespace, name): + repo_mock = Mock() + repo_mock.owner = Mock() + repo_mock.owner.login = namespace + + repo_mock.full_name = "%s/%s" % (namespace, name) + repo_mock.name = name + repo_mock.description = "some %s repo" % (name) + + if name != "anotherrepo": + repo_mock.pushed_at = datetime.utcfromtimestamp(0) + else: + repo_mock.pushed_at = None + + repo_mock.html_url = "https://bitbucket.org/%s/%s" % (namespace, name) + repo_mock.private = name == "somerepo" + repo_mock.permissions = Mock() + repo_mock.permissions.admin = namespace == "knownuser" + return repo_mock + + def get_user_repos_mock(type="all", sort="created"): + return [get_repo_mock("knownuser", "somerepo")] + + def get_org_repos_mock(type="all"): + return [ + get_repo_mock("someorg", "somerepo"), + get_repo_mock("someorg", "anotherrepo"), + ] + + def get_orgs_mock(): + return [get_org_mock("someorg")] + + def get_user_mock(username="knownuser"): + if username == "knownuser": + user_mock = Mock() + user_mock.name = username + user_mock.plan = Mock() + user_mock.plan.private_repos = 1 + user_mock.login = username + user_mock.html_url = "https://bitbucket.org/%s" % (username) + user_mock.avatar_url = "avatarurl" + user_mock.get_repos = Mock(side_effect=get_user_repos_mock) + user_mock.get_orgs = Mock(side_effect=get_orgs_mock) + return user_mock + + raise GithubException(None, None) + + def get_org_mock(namespace): + if namespace == "someorg": + org_mock = Mock() + org_mock.get_repos = Mock(side_effect=get_org_repos_mock) + org_mock.login = namespace + org_mock.html_url = "https://bitbucket.org/%s" % (namespace) + org_mock.avatar_url = "avatarurl" + org_mock.name = namespace + org_mock.plan = Mock() + org_mock.plan.private_repos = 2 + return org_mock + + raise GithubException(None, None) + + def get_tags_mock(): + sometag = Mock() + sometag.name = "sometag" + sometag.commit = get_commit_mock("aaaaaaa") + + someothertag = Mock() + someothertag.name = "someothertag" + someothertag.commit = get_commit_mock("aaaaaaa") + return [sometag, someothertag] + + def get_branches_mock(): + master = Mock() + master.name = "master" + master.commit = get_commit_mock("aaaaaaa") + + otherbranch = Mock() + otherbranch.name = "otherbranch" + otherbranch.commit = get_commit_mock("aaaaaaa") + return [master, otherbranch] + + def get_contents_mock(filepath): + if filepath == "Dockerfile": + m = Mock() + m.content = "hello world" + return m + + if filepath == "somesubdir/Dockerfile": + m = Mock() + m.content = "hi universe" + return m + + raise GithubException(None, None) + + def get_git_tree_mock(commit_sha, recursive=False): + first_file = Mock() + first_file.type = "blob" + first_file.path = "Dockerfile" + + second_file = Mock() + second_file.type = "other" + second_file.path = "/some/Dockerfile" + + third_file = Mock() + third_file.type = "blob" + third_file.path = "somesubdir/Dockerfile" + + t = Mock() + + if commit_sha == "aaaaaaa": + t.tree = [first_file, second_file, third_file] + else: + t.tree = [] + + return t - def get_repo_mock(namespace, name): repo_mock = Mock() - repo_mock.owner = Mock() - repo_mock.owner.login = namespace + repo_mock.default_branch = "master" + repo_mock.ssh_url = "ssh_url" - repo_mock.full_name = '%s/%s' % (namespace, name) - repo_mock.name = name - repo_mock.description = 'some %s repo' % (name) + repo_mock.get_branch = Mock(side_effect=get_branch_mock) + repo_mock.get_tags = Mock(side_effect=get_tags_mock) + repo_mock.get_branches = Mock(side_effect=get_branches_mock) + repo_mock.get_commit = Mock(side_effect=get_commit_mock) + repo_mock.get_contents = Mock(side_effect=get_contents_mock) + repo_mock.get_git_tree = Mock(side_effect=get_git_tree_mock) - if name != 'anotherrepo': - repo_mock.pushed_at = datetime.utcfromtimestamp(0) - else: - repo_mock.pushed_at = None - - repo_mock.html_url = 'https://bitbucket.org/%s/%s' % (namespace, name) - repo_mock.private = name == 'somerepo' - repo_mock.permissions = Mock() - repo_mock.permissions.admin = namespace == 'knownuser' - return repo_mock - - def get_user_repos_mock(type='all', sort='created'): - return [get_repo_mock('knownuser', 'somerepo')] - - def get_org_repos_mock(type='all'): - return [get_repo_mock('someorg', 'somerepo'), get_repo_mock('someorg', 'anotherrepo')] - - def get_orgs_mock(): - return [get_org_mock('someorg')] - - def get_user_mock(username='knownuser'): - if username == 'knownuser': - user_mock = Mock() - user_mock.name = username - user_mock.plan = Mock() - user_mock.plan.private_repos = 1 - user_mock.login = username - user_mock.html_url = 'https://bitbucket.org/%s' % (username) - user_mock.avatar_url = 'avatarurl' - user_mock.get_repos = Mock(side_effect=get_user_repos_mock) - user_mock.get_orgs = Mock(side_effect=get_orgs_mock) - return user_mock - - raise GithubException(None, None) - - def get_org_mock(namespace): - if namespace == 'someorg': - org_mock = Mock() - org_mock.get_repos = Mock(side_effect=get_org_repos_mock) - org_mock.login = namespace - org_mock.html_url = 'https://bitbucket.org/%s' % (namespace) - org_mock.avatar_url = 'avatarurl' - org_mock.name = namespace - org_mock.plan = Mock() - org_mock.plan.private_repos = 2 - return org_mock - - raise GithubException(None, None) - - def get_tags_mock(): - sometag = Mock() - sometag.name = 'sometag' - sometag.commit = get_commit_mock('aaaaaaa') - - someothertag = Mock() - someothertag.name = 'someothertag' - someothertag.commit = get_commit_mock('aaaaaaa') - return [sometag, someothertag] - - def get_branches_mock(): - master = Mock() - master.name = 'master' - master.commit = get_commit_mock('aaaaaaa') - - otherbranch = Mock() - otherbranch.name = 'otherbranch' - otherbranch.commit = get_commit_mock('aaaaaaa') - return [master, otherbranch] - - def get_contents_mock(filepath): - if filepath == 'Dockerfile': - m = Mock() - m.content = 'hello world' - return m - - if filepath == 'somesubdir/Dockerfile': - m = Mock() - m.content = 'hi universe' - return m - - raise GithubException(None, None) - - def get_git_tree_mock(commit_sha, recursive=False): - first_file = Mock() - first_file.type = 'blob' - first_file.path = 'Dockerfile' - - second_file = Mock() - second_file.type = 'other' - second_file.path = '/some/Dockerfile' - - third_file = Mock() - third_file.type = 'blob' - third_file.path = 'somesubdir/Dockerfile' - - t = Mock() - - if commit_sha == 'aaaaaaa': - t.tree = [ - first_file, second_file, third_file, - ] - else: - t.tree = [] - - return t - - repo_mock = Mock() - repo_mock.default_branch = 'master' - repo_mock.ssh_url = 'ssh_url' - - repo_mock.get_branch = Mock(side_effect=get_branch_mock) - repo_mock.get_tags = Mock(side_effect=get_tags_mock) - repo_mock.get_branches = Mock(side_effect=get_branches_mock) - repo_mock.get_commit = Mock(side_effect=get_commit_mock) - repo_mock.get_contents = Mock(side_effect=get_contents_mock) - repo_mock.get_git_tree = Mock(side_effect=get_git_tree_mock) - - gh_mock = Mock() - gh_mock.get_repo = Mock(return_value=repo_mock) - gh_mock.get_user = Mock(side_effect=get_user_mock) - gh_mock.get_organization = Mock(side_effect=get_org_mock) - return gh_mock + gh_mock = Mock() + gh_mock.get_repo = Mock(return_value=repo_mock) + gh_mock.get_user = Mock(side_effect=get_user_mock) + gh_mock.get_organization = Mock(side_effect=get_org_mock) + return gh_mock diff --git a/buildtrigger/test/gitlabmock.py b/buildtrigger/test/gitlabmock.py index cd864241e..90c983a40 100644 --- a/buildtrigger/test/gitlabmock.py +++ b/buildtrigger/test/gitlabmock.py @@ -11,588 +11,598 @@ from buildtrigger.gitlabhandler import GitLabBuildTrigger from util.morecollections import AttrDict -@urlmatch(netloc=r'fakegitlab') +@urlmatch(netloc=r"fakegitlab") def catchall_handler(url, request): - return {'status_code': 404} + return {"status_code": 404} -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/users$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/users$") def users_handler(url, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} + + if url.query.find("knownuser") < 0: + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps([]), + } - if url.query.find('knownuser') < 0: return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps([]), + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + [ + { + "id": 1, + "username": "knownuser", + "name": "Known User", + "state": "active", + "avatar_url": "avatarurl", + "web_url": "https://bitbucket.org/knownuser", + } + ] + ), } - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps([ - { - "id": 1, - "username": "knownuser", - "name": "Known User", - "state": "active", - "avatar_url": "avatarurl", - "web_url": "https://bitbucket.org/knownuser", - }, - ]), - } - -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/user$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/user$") def user_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "id": 1, - "username": "john_smith", - "email": "john@example.com", - "name": "John Smith", - "state": "active", - }), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "id": 1, + "username": "john_smith", + "email": "john@example.com", + "name": "John Smith", + "state": "active", + } + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/foo%2Fbar$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/projects/foo%2Fbar$") def project_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "id": 4, - "description": None, - "default_branch": "master", - "visibility": "private", - "path_with_namespace": "someorg/somerepo", - "ssh_url_to_repo": "git@example.com:someorg/somerepo.git", - "web_url": "http://example.com/someorg/somerepo", - }), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "id": 4, + "description": None, + "default_branch": "master", + "visibility": "private", + "path_with_namespace": "someorg/somerepo", + "ssh_url_to_repo": "git@example.com:someorg/somerepo.git", + "web_url": "http://example.com/someorg/somerepo", + } + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tree$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/projects/4/repository/tree$") def project_tree_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps([ - { - "id": "a1e8f8d745cc87e3a9248358d9352bb7f9a0aeba", - "name": "Dockerfile", - "type": "tree", - "path": "files/Dockerfile", - "mode": "040000", - }, - ]), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + [ + { + "id": "a1e8f8d745cc87e3a9248358d9352bb7f9a0aeba", + "name": "Dockerfile", + "type": "tree", + "path": "files/Dockerfile", + "mode": "040000", + } + ] + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tags$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/projects/4/repository/tags$") def project_tags_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps([ - { - 'name': 'sometag', - 'commit': { - 'id': '60a8ff033665e1207714d6670fcd7b65304ec02f', - }, - }, - { - 'name': 'someothertag', - 'commit': { - 'id': '60a8ff033665e1207714d6670fcd7b65304ec02f', - }, - }, - ]), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + [ + { + "name": "sometag", + "commit": {"id": "60a8ff033665e1207714d6670fcd7b65304ec02f"}, + }, + { + "name": "someothertag", + "commit": {"id": "60a8ff033665e1207714d6670fcd7b65304ec02f"}, + }, + ] + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/branches$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/projects/4/repository/branches$") def project_branches_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps([ - { - 'name': 'master', - 'commit': { - 'id': '60a8ff033665e1207714d6670fcd7b65304ec02f', - }, - }, - { - 'name': 'otherbranch', - 'commit': { - 'id': '60a8ff033665e1207714d6670fcd7b65304ec02f', - }, - }, - ]), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + [ + { + "name": "master", + "commit": {"id": "60a8ff033665e1207714d6670fcd7b65304ec02f"}, + }, + { + "name": "otherbranch", + "commit": {"id": "60a8ff033665e1207714d6670fcd7b65304ec02f"}, + }, + ] + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/branches/master$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/projects/4/repository/branches/master$") def project_branch_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "name": "master", - "merged": True, - "protected": True, - "developers_can_push": False, - "developers_can_merge": False, - "commit": { - "author_email": "john@example.com", - "author_name": "John Smith", - "authored_date": "2012-06-27T05:51:39-07:00", - "committed_date": "2012-06-28T03:44:20-07:00", - "committer_email": "john@example.com", - "committer_name": "John Smith", - "id": "60a8ff033665e1207714d6670fcd7b65304ec02f", - "short_id": "7b5c3cc", - "title": "add projects API", - "message": "add projects API", - "parent_ids": [ - "4ad91d3c1144c406e50c7b33bae684bd6837faf8", - ], - }, - }), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "name": "master", + "merged": True, + "protected": True, + "developers_can_push": False, + "developers_can_merge": False, + "commit": { + "author_email": "john@example.com", + "author_name": "John Smith", + "authored_date": "2012-06-27T05:51:39-07:00", + "committed_date": "2012-06-28T03:44:20-07:00", + "committer_email": "john@example.com", + "committer_name": "John Smith", + "id": "60a8ff033665e1207714d6670fcd7b65304ec02f", + "short_id": "7b5c3cc", + "title": "add projects API", + "message": "add projects API", + "parent_ids": ["4ad91d3c1144c406e50c7b33bae684bd6837faf8"], + }, + } + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces/someorg$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/namespaces/someorg$") def namespace_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "id": 2, - "name": "someorg", - "path": "someorg", - "kind": "group", - "full_path": "someorg", - "parent_id": None, - "members_count_with_descendants": 2 - }), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "id": 2, + "name": "someorg", + "path": "someorg", + "kind": "group", + "full_path": "someorg", + "parent_id": None, + "members_count_with_descendants": 2, + } + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces/knownuser$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/namespaces/knownuser$") def user_namespace_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "id": 1, - "name": "knownuser", - "path": "knownuser", - "kind": "user", - "full_path": "knownuser", - "parent_id": None, - "members_count_with_descendants": 2 - }), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "id": 1, + "name": "knownuser", + "path": "knownuser", + "kind": "user", + "full_path": "knownuser", + "parent_id": None, + "members_count_with_descendants": 2, + } + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces(/)?$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/namespaces(/)?$") def namespaces_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps([{ - "id": 2, - "name": "someorg", - "path": "someorg", - "kind": "group", - "full_path": "someorg", - "parent_id": None, - "web_url": "http://gitlab.com/groups/someorg", - "members_count_with_descendants": 2 - }]), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + [ + { + "id": 2, + "name": "someorg", + "path": "someorg", + "kind": "group", + "full_path": "someorg", + "parent_id": None, + "web_url": "http://gitlab.com/groups/someorg", + "members_count_with_descendants": 2, + } + ] + ), + } def get_projects_handler(add_permissions_block): - @urlmatch(netloc=r'fakegitlab', path=r'/api/v4/groups/2/projects$') - def projects_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + @urlmatch(netloc=r"fakegitlab", path=r"/api/v4/groups/2/projects$") + def projects_handler(_, request): + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - permissions_block = { - "project_access": { - "access_level": 10, - "notification_level": 3 - }, - "group_access": { - "access_level": 20, - "notification_level": 3 - }, - } + permissions_block = { + "project_access": {"access_level": 10, "notification_level": 3}, + "group_access": {"access_level": 20, "notification_level": 3}, + } - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps([{ - "id": 4, - "name": "Some project", - "description": None, - "default_branch": "master", - "visibility": "private", - "path": "someproject", - "path_with_namespace": "someorg/someproject", - "last_activity_at": "2013-09-30T13:46:02Z", - "web_url": "http://example.com/someorg/someproject", - "permissions": permissions_block if add_permissions_block else None, - }, - { - "id": 5, - "name": "Another project", - "description": None, - "default_branch": "master", - "visibility": "public", - "path": "anotherproject", - "path_with_namespace": "someorg/anotherproject", - "last_activity_at": "2013-09-30T13:46:02Z", - "web_url": "http://example.com/someorg/anotherproject", - }]), - } - return projects_handler + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + [ + { + "id": 4, + "name": "Some project", + "description": None, + "default_branch": "master", + "visibility": "private", + "path": "someproject", + "path_with_namespace": "someorg/someproject", + "last_activity_at": "2013-09-30T13:46:02Z", + "web_url": "http://example.com/someorg/someproject", + "permissions": permissions_block + if add_permissions_block + else None, + }, + { + "id": 5, + "name": "Another project", + "description": None, + "default_branch": "master", + "visibility": "public", + "path": "anotherproject", + "path_with_namespace": "someorg/anotherproject", + "last_activity_at": "2013-09-30T13:46:02Z", + "web_url": "http://example.com/someorg/anotherproject", + }, + ] + ), + } + + return projects_handler def get_group_handler(null_avatar): - @urlmatch(netloc=r'fakegitlab', path=r'/api/v4/groups/2$') - def group_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + @urlmatch(netloc=r"fakegitlab", path=r"/api/v4/groups/2$") + def group_handler(_, request): + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} + + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "id": 1, + "name": "SomeOrg Group", + "path": "someorg", + "description": "An interesting group", + "visibility": "public", + "lfs_enabled": True, + "avatar_url": "avatar_url" if not null_avatar else None, + "web_url": "http://gitlab.com/groups/someorg", + "request_access_enabled": False, + "full_name": "SomeOrg Group", + "full_path": "someorg", + "parent_id": None, + } + ), + } + + return group_handler + + +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/projects/4/repository/files/Dockerfile$") +def dockerfile_handler(_, request): + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "id": 1, - "name": "SomeOrg Group", - "path": "someorg", - "description": "An interesting group", - "visibility": "public", - "lfs_enabled": True, - "avatar_url": 'avatar_url' if not null_avatar else None, - "web_url": "http://gitlab.com/groups/someorg", - "request_access_enabled": False, - "full_name": "SomeOrg Group", - "full_path": "someorg", - "parent_id": None, - }), + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "file_name": "Dockerfile", + "file_path": "Dockerfile", + "size": 10, + "encoding": "base64", + "content": base64.b64encode("hello world"), + "ref": "master", + "blob_id": "79f7bbd25901e8334750839545a9bd021f0e4c83", + "commit_id": "d5a3ff139356ce33e37e73add446f16869741b50", + "last_commit_id": "570e7b2abdd848b95f2f578043fc23bd6f6fd24d", + } + ), } - return group_handler -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/files/Dockerfile$') -def dockerfile_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} - - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "file_name": "Dockerfile", - "file_path": "Dockerfile", - "size": 10, - "encoding": "base64", - "content": base64.b64encode('hello world'), - "ref": "master", - "blob_id": "79f7bbd25901e8334750839545a9bd021f0e4c83", - "commit_id": "d5a3ff139356ce33e37e73add446f16869741b50", - "last_commit_id": "570e7b2abdd848b95f2f578043fc23bd6f6fd24d" - }), - } - - -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/files/somesubdir%2FDockerfile$') +@urlmatch( + netloc=r"fakegitlab", + path=r"/api/v4/projects/4/repository/files/somesubdir%2FDockerfile$", +) def sub_dockerfile_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "file_name": "Dockerfile", - "file_path": "somesubdir/Dockerfile", - "size": 10, - "encoding": "base64", - "content": base64.b64encode('hi universe'), - "ref": "master", - "blob_id": "79f7bbd25901e8334750839545a9bd021f0e4c83", - "commit_id": "d5a3ff139356ce33e37e73add446f16869741b50", - "last_commit_id": "570e7b2abdd848b95f2f578043fc23bd6f6fd24d" - }), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "file_name": "Dockerfile", + "file_path": "somesubdir/Dockerfile", + "size": 10, + "encoding": "base64", + "content": base64.b64encode("hi universe"), + "ref": "master", + "blob_id": "79f7bbd25901e8334750839545a9bd021f0e4c83", + "commit_id": "d5a3ff139356ce33e37e73add446f16869741b50", + "last_commit_id": "570e7b2abdd848b95f2f578043fc23bd6f6fd24d", + } + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tags/sometag$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/projects/4/repository/tags/sometag$") def tag_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "name": "sometag", - "message": "some cool message", - "target": "60a8ff033665e1207714d6670fcd7b65304ec02f", - "commit": { - "id": "60a8ff033665e1207714d6670fcd7b65304ec02f", - "short_id": "60a8ff03", - "title": "Initial commit", - "created_at": "2017-07-26T11:08:53.000+02:00", - "parent_ids": [ - "f61c062ff8bcbdb00e0a1b3317a91aed6ceee06b" - ], - "message": "v5.0.0\n", - "author_name": "Arthur Verschaeve", - "author_email": "contact@arthurverschaeve.be", - "authored_date": "2015-02-01T21:56:31.000+01:00", - "committer_name": "Arthur Verschaeve", - "committer_email": "contact@arthurverschaeve.be", - "committed_date": "2015-02-01T21:56:31.000+01:00" - }, - "release": None, - }), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "name": "sometag", + "message": "some cool message", + "target": "60a8ff033665e1207714d6670fcd7b65304ec02f", + "commit": { + "id": "60a8ff033665e1207714d6670fcd7b65304ec02f", + "short_id": "60a8ff03", + "title": "Initial commit", + "created_at": "2017-07-26T11:08:53.000+02:00", + "parent_ids": ["f61c062ff8bcbdb00e0a1b3317a91aed6ceee06b"], + "message": "v5.0.0\n", + "author_name": "Arthur Verschaeve", + "author_email": "contact@arthurverschaeve.be", + "authored_date": "2015-02-01T21:56:31.000+01:00", + "committer_name": "Arthur Verschaeve", + "committer_email": "contact@arthurverschaeve.be", + "committed_date": "2015-02-01T21:56:31.000+01:00", + }, + "release": None, + } + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/foo%2Fbar/repository/commits/60a8ff033665e1207714d6670fcd7b65304ec02f$') +@urlmatch( + netloc=r"fakegitlab", + path=r"/api/v4/projects/foo%2Fbar/repository/commits/60a8ff033665e1207714d6670fcd7b65304ec02f$", +) def commit_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "id": "60a8ff033665e1207714d6670fcd7b65304ec02f", - "short_id": "60a8ff03366", - "title": "Sanitize for network graph", - "author_name": "someguy", - "author_email": "some.guy@gmail.com", - "committer_name": "Some Guy", - "committer_email": "some.guy@gmail.com", - "created_at": "2012-09-20T09:06:12+03:00", - "message": "Sanitize for network graph", - "committed_date": "2012-09-20T09:06:12+03:00", - "authored_date": "2012-09-20T09:06:12+03:00", - "parent_ids": [ - "ae1d9fb46aa2b07ee9836d49862ec4e2c46fbbba" - ], - "last_pipeline" : { - "id": 8, - "ref": "master", - "sha": "2dc6aa325a317eda67812f05600bdf0fcdc70ab0", - "status": "created", - }, - "stats": { - "additions": 15, - "deletions": 10, - "total": 25 - }, - "status": "running" - }), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "id": "60a8ff033665e1207714d6670fcd7b65304ec02f", + "short_id": "60a8ff03366", + "title": "Sanitize for network graph", + "author_name": "someguy", + "author_email": "some.guy@gmail.com", + "committer_name": "Some Guy", + "committer_email": "some.guy@gmail.com", + "created_at": "2012-09-20T09:06:12+03:00", + "message": "Sanitize for network graph", + "committed_date": "2012-09-20T09:06:12+03:00", + "authored_date": "2012-09-20T09:06:12+03:00", + "parent_ids": ["ae1d9fb46aa2b07ee9836d49862ec4e2c46fbbba"], + "last_pipeline": { + "id": 8, + "ref": "master", + "sha": "2dc6aa325a317eda67812f05600bdf0fcdc70ab0", + "status": "created", + }, + "stats": {"additions": 15, "deletions": 10, "total": 25}, + "status": "running", + } + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/deploy_keys$', method='POST') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/projects/4/deploy_keys$", method="POST") def create_deploykey_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "id": 1, - "title": "Public key", - "key": "ssh-rsa some stuff", - "created_at": "2013-10-02T10:12:29Z", - "can_push": False, - }), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "id": 1, + "title": "Public key", + "key": "ssh-rsa some stuff", + "created_at": "2013-10-02T10:12:29Z", + "can_push": False, + } + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/hooks$', method='POST') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/projects/4/hooks$", method="POST") def create_hook_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({ - "id": 1, - "url": "http://example.com/hook", - "project_id": 4, - "push_events": True, - "issues_events": True, - "confidential_issues_events": True, - "merge_requests_events": True, - "tag_push_events": True, - "note_events": True, - "job_events": True, - "pipeline_events": True, - "wiki_page_events": True, - "enable_ssl_verification": True, - "created_at": "2012-10-12T17:04:47Z", - }), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + { + "id": 1, + "url": "http://example.com/hook", + "project_id": 4, + "push_events": True, + "issues_events": True, + "confidential_issues_events": True, + "merge_requests_events": True, + "tag_push_events": True, + "note_events": True, + "job_events": True, + "pipeline_events": True, + "wiki_page_events": True, + "enable_ssl_verification": True, + "created_at": "2012-10-12T17:04:47Z", + } + ), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/hooks/1$', method='DELETE') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/projects/4/hooks/1$", method="DELETE") def delete_hook_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({}), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps({}), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/deploy_keys/1$', method='DELETE') +@urlmatch( + netloc=r"fakegitlab", path=r"/api/v4/projects/4/deploy_keys/1$", method="DELETE" +) def delete_deploykey_handker(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps({}), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps({}), + } -@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/users/1/projects$') +@urlmatch(netloc=r"fakegitlab", path=r"/api/v4/users/1/projects$") def user_projects_list_handler(_, request): - if not request.headers.get('Authorization') == 'Bearer foobar': - return {'status_code': 401} + if not request.headers.get("Authorization") == "Bearer foobar": + return {"status_code": 401} - return { - 'status_code': 200, - 'headers': { - 'Content-Type': 'application/json', - }, - 'content': json.dumps([ - { - "id": 2, - "name": "Another project", - "description": None, - "default_branch": "master", - "visibility": "public", - "path": "anotherproject", - "path_with_namespace": "knownuser/anotherproject", - "last_activity_at": "2013-09-30T13:46:02Z", - "web_url": "http://example.com/knownuser/anotherproject", - } - ]), - } + return { + "status_code": 200, + "headers": {"Content-Type": "application/json"}, + "content": json.dumps( + [ + { + "id": 2, + "name": "Another project", + "description": None, + "default_branch": "master", + "visibility": "public", + "path": "anotherproject", + "path_with_namespace": "knownuser/anotherproject", + "last_activity_at": "2013-09-30T13:46:02Z", + "web_url": "http://example.com/knownuser/anotherproject", + } + ] + ), + } @contextmanager -def get_gitlab_trigger(dockerfile_path='', add_permissions=True, missing_avatar_url=False): - handlers = [user_handler, users_handler, project_branches_handler, project_tree_handler, - project_handler, get_projects_handler(add_permissions), tag_handler, - project_branch_handler, get_group_handler(missing_avatar_url), dockerfile_handler, - sub_dockerfile_handler, namespace_handler, user_namespace_handler, namespaces_handler, - commit_handler, create_deploykey_handler, delete_deploykey_handker, - create_hook_handler, delete_hook_handler, project_tags_handler, - user_projects_list_handler, catchall_handler] +def get_gitlab_trigger( + dockerfile_path="", add_permissions=True, missing_avatar_url=False +): + handlers = [ + user_handler, + users_handler, + project_branches_handler, + project_tree_handler, + project_handler, + get_projects_handler(add_permissions), + tag_handler, + project_branch_handler, + get_group_handler(missing_avatar_url), + dockerfile_handler, + sub_dockerfile_handler, + namespace_handler, + user_namespace_handler, + namespaces_handler, + commit_handler, + create_deploykey_handler, + delete_deploykey_handker, + create_hook_handler, + delete_hook_handler, + project_tags_handler, + user_projects_list_handler, + catchall_handler, + ] - with HTTMock(*handlers): - trigger_obj = AttrDict(dict(auth_token='foobar', id='sometrigger')) - trigger = GitLabBuildTrigger(trigger_obj, { - 'build_source': 'foo/bar', - 'dockerfile_path': dockerfile_path, - 'username': 'knownuser' - }) + with HTTMock(*handlers): + trigger_obj = AttrDict(dict(auth_token="foobar", id="sometrigger")) + trigger = GitLabBuildTrigger( + trigger_obj, + { + "build_source": "foo/bar", + "dockerfile_path": dockerfile_path, + "username": "knownuser", + }, + ) - client = gitlab.Gitlab('http://fakegitlab', oauth_token='foobar', timeout=20, api_version=4) - client.auth() + client = gitlab.Gitlab( + "http://fakegitlab", oauth_token="foobar", timeout=20, api_version=4 + ) + client.auth() - trigger._get_authorized_client = lambda: client - yield trigger + trigger._get_authorized_client = lambda: client + yield trigger diff --git a/buildtrigger/test/test_basehandler.py b/buildtrigger/test/test_basehandler.py index 7162c2535..50bdb5022 100644 --- a/buildtrigger/test/test_basehandler.py +++ b/buildtrigger/test/test_basehandler.py @@ -3,53 +3,74 @@ import pytest from buildtrigger.basehandler import BuildTriggerHandler -@pytest.mark.parametrize('input,output', [ - ("Dockerfile", True), - ("server.Dockerfile", True), - (u"Dockerfile", True), - (u"server.Dockerfile", True), - ("bad file name", False), - (u"bad file name", False), -]) +@pytest.mark.parametrize( + "input,output", + [ + ("Dockerfile", True), + ("server.Dockerfile", True), + (u"Dockerfile", True), + (u"server.Dockerfile", True), + ("bad file name", False), + (u"bad file name", False), + ], +) def test_path_is_dockerfile(input, output): - assert BuildTriggerHandler.filename_is_dockerfile(input) == output + assert BuildTriggerHandler.filename_is_dockerfile(input) == output -@pytest.mark.parametrize('input,output', [ - ("", {}), - ("/a", {"/a": ["/"]}), - ("a", {"/a": ["/"]}), - ("/b/a", {"/b/a": ["/b", "/"]}), - ("b/a", {"/b/a": ["/b", "/"]}), - ("/c/b/a", {"/c/b/a": ["/c/b", "/c", "/"]}), - ("/a//b//c", {"/a/b/c": ["/", "/a", "/a/b"]}), - ("/a", {"/a": ["/"]}), -]) +@pytest.mark.parametrize( + "input,output", + [ + ("", {}), + ("/a", {"/a": ["/"]}), + ("a", {"/a": ["/"]}), + ("/b/a", {"/b/a": ["/b", "/"]}), + ("b/a", {"/b/a": ["/b", "/"]}), + ("/c/b/a", {"/c/b/a": ["/c/b", "/c", "/"]}), + ("/a//b//c", {"/a/b/c": ["/", "/a", "/a/b"]}), + ("/a", {"/a": ["/"]}), + ], +) def test_subdir_path_map_no_previous(input, output): - actual_mapping = BuildTriggerHandler.get_parent_directory_mappings(input) - for key in actual_mapping: - value = actual_mapping[key] - actual_mapping[key] = value.sort() - for key in output: - value = output[key] - output[key] = value.sort() + actual_mapping = BuildTriggerHandler.get_parent_directory_mappings(input) + for key in actual_mapping: + value = actual_mapping[key] + actual_mapping[key] = value.sort() + for key in output: + value = output[key] + output[key] = value.sort() - assert actual_mapping == output + assert actual_mapping == output -@pytest.mark.parametrize('new_path,original_dictionary,output', [ - ("/a", {}, {"/a": ["/"]}), - ("b", {"/a": ["some_path", "another_path"]}, {"/a": ["some_path", "another_path"], "/b": ["/"]}), - ("/a/b/c/d", {"/e": ["some_path", "another_path"]}, - {"/e": ["some_path", "another_path"], "/a/b/c/d": ["/", "/a", "/a/b", "/a/b/c"]}), -]) +@pytest.mark.parametrize( + "new_path,original_dictionary,output", + [ + ("/a", {}, {"/a": ["/"]}), + ( + "b", + {"/a": ["some_path", "another_path"]}, + {"/a": ["some_path", "another_path"], "/b": ["/"]}, + ), + ( + "/a/b/c/d", + {"/e": ["some_path", "another_path"]}, + { + "/e": ["some_path", "another_path"], + "/a/b/c/d": ["/", "/a", "/a/b", "/a/b/c"], + }, + ), + ], +) def test_subdir_path_map(new_path, original_dictionary, output): - actual_mapping = BuildTriggerHandler.get_parent_directory_mappings(new_path, original_dictionary) - for key in actual_mapping: - value = actual_mapping[key] - actual_mapping[key] = value.sort() - for key in output: - value = output[key] - output[key] = value.sort() + actual_mapping = BuildTriggerHandler.get_parent_directory_mappings( + new_path, original_dictionary + ) + for key in actual_mapping: + value = actual_mapping[key] + actual_mapping[key] = value.sort() + for key in output: + value = output[key] + output[key] = value.sort() - assert actual_mapping == output + assert actual_mapping == output diff --git a/buildtrigger/test/test_bitbuckethandler.py b/buildtrigger/test/test_bitbuckethandler.py index dbb47521a..3b08917f7 100644 --- a/buildtrigger/test/test_bitbuckethandler.py +++ b/buildtrigger/test/test_bitbuckethandler.py @@ -2,35 +2,44 @@ import json import pytest from buildtrigger.test.bitbucketmock import get_bitbucket_trigger -from buildtrigger.triggerutil import (SkipRequestException, ValidationRequestException, - InvalidPayloadException) +from buildtrigger.triggerutil import ( + SkipRequestException, + ValidationRequestException, + InvalidPayloadException, +) from endpoints.building import PreparedBuild from util.morecollections import AttrDict + @pytest.fixture def bitbucket_trigger(): - return get_bitbucket_trigger() + return get_bitbucket_trigger() def test_list_build_subdirs(bitbucket_trigger): - assert bitbucket_trigger.list_build_subdirs() == ["/Dockerfile"] + assert bitbucket_trigger.list_build_subdirs() == ["/Dockerfile"] -@pytest.mark.parametrize('dockerfile_path, contents', [ - ('/Dockerfile', 'hello world'), - ('somesubdir/Dockerfile', 'hi universe'), - ('unknownpath', None), -]) +@pytest.mark.parametrize( + "dockerfile_path, contents", + [ + ("/Dockerfile", "hello world"), + ("somesubdir/Dockerfile", "hi universe"), + ("unknownpath", None), + ], +) def test_load_dockerfile_contents(dockerfile_path, contents): - trigger = get_bitbucket_trigger(dockerfile_path) - assert trigger.load_dockerfile_contents() == contents + trigger = get_bitbucket_trigger(dockerfile_path) + assert trigger.load_dockerfile_contents() == contents -@pytest.mark.parametrize('payload, expected_error, expected_message', [ - ('{}', InvalidPayloadException, "'push' is a required property"), - - # Valid payload: - ('''{ +@pytest.mark.parametrize( + "payload, expected_error, expected_message", + [ + ("{}", InvalidPayloadException, "'push' is a required property"), + # Valid payload: + ( + """{ "push": { "changes": [{ "new": { @@ -51,10 +60,13 @@ def test_load_dockerfile_contents(dockerfile_path, contents): "repository": { "full_name": "foo/bar" } - }''', None, None), - - # Skip message: - ('''{ + }""", + None, + None, + ), + # Skip message: + ( + """{ "push": { "changes": [{ "new": { @@ -75,17 +87,25 @@ def test_load_dockerfile_contents(dockerfile_path, contents): "repository": { "full_name": "foo/bar" } - }''', SkipRequestException, ''), -]) -def test_handle_trigger_request(bitbucket_trigger, payload, expected_error, expected_message): - def get_payload(): - return json.loads(payload) + }""", + SkipRequestException, + "", + ), + ], +) +def test_handle_trigger_request( + bitbucket_trigger, payload, expected_error, expected_message +): + def get_payload(): + return json.loads(payload) - request = AttrDict(dict(get_json=get_payload)) + request = AttrDict(dict(get_json=get_payload)) - if expected_error is not None: - with pytest.raises(expected_error) as ipe: - bitbucket_trigger.handle_trigger_request(request) - assert str(ipe.value) == expected_message - else: - assert isinstance(bitbucket_trigger.handle_trigger_request(request), PreparedBuild) + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + bitbucket_trigger.handle_trigger_request(request) + assert str(ipe.value) == expected_message + else: + assert isinstance( + bitbucket_trigger.handle_trigger_request(request), PreparedBuild + ) diff --git a/buildtrigger/test/test_customhandler.py b/buildtrigger/test/test_customhandler.py index cbb5f484e..984eb27ce 100644 --- a/buildtrigger/test/test_customhandler.py +++ b/buildtrigger/test/test_customhandler.py @@ -1,20 +1,32 @@ import pytest from buildtrigger.customhandler import CustomBuildTrigger -from buildtrigger.triggerutil import (InvalidPayloadException, SkipRequestException, - TriggerStartException) +from buildtrigger.triggerutil import ( + InvalidPayloadException, + SkipRequestException, + TriggerStartException, +) from endpoints.building import PreparedBuild from util.morecollections import AttrDict -@pytest.mark.parametrize('payload, expected_error, expected_message', [ - ('', InvalidPayloadException, 'Missing expected payload'), - ('{}', InvalidPayloadException, "'commit' is a required property"), - ('{"commit": "foo", "ref": "refs/heads/something", "default_branch": "baz"}', - InvalidPayloadException, "u'foo' does not match '^([A-Fa-f0-9]{7,})$'"), - - ('{"commit": "11d6fbc", "ref": "refs/heads/something", "default_branch": "baz"}', None, None), - ('''{ +@pytest.mark.parametrize( + "payload, expected_error, expected_message", + [ + ("", InvalidPayloadException, "Missing expected payload"), + ("{}", InvalidPayloadException, "'commit' is a required property"), + ( + '{"commit": "foo", "ref": "refs/heads/something", "default_branch": "baz"}', + InvalidPayloadException, + "u'foo' does not match '^([A-Fa-f0-9]{7,})$'", + ), + ( + '{"commit": "11d6fbc", "ref": "refs/heads/something", "default_branch": "baz"}', + None, + None, + ), + ( + """{ "commit": "11d6fbc", "ref": "refs/heads/something", "default_branch": "baz", @@ -23,29 +35,41 @@ from util.morecollections import AttrDict "url": "http://foo.bar", "date": "NOW" } - }''', SkipRequestException, ''), -]) + }""", + SkipRequestException, + "", + ), + ], +) def test_handle_trigger_request(payload, expected_error, expected_message): - trigger = CustomBuildTrigger(None, {'build_source': 'foo'}) - request = AttrDict(dict(data=payload)) + trigger = CustomBuildTrigger(None, {"build_source": "foo"}) + request = AttrDict(dict(data=payload)) - if expected_error is not None: - with pytest.raises(expected_error) as ipe: - trigger.handle_trigger_request(request) - assert str(ipe.value) == expected_message - else: - assert isinstance(trigger.handle_trigger_request(request), PreparedBuild) + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + trigger.handle_trigger_request(request) + assert str(ipe.value) == expected_message + else: + assert isinstance(trigger.handle_trigger_request(request), PreparedBuild) -@pytest.mark.parametrize('run_parameters, expected_error, expected_message', [ - ({}, TriggerStartException, 'missing required parameter'), - ({'commit_sha': 'foo'}, TriggerStartException, "'foo' does not match '^([A-Fa-f0-9]{7,})$'"), - ({'commit_sha': '11d6fbc'}, None, None), -]) + +@pytest.mark.parametrize( + "run_parameters, expected_error, expected_message", + [ + ({}, TriggerStartException, "missing required parameter"), + ( + {"commit_sha": "foo"}, + TriggerStartException, + "'foo' does not match '^([A-Fa-f0-9]{7,})$'", + ), + ({"commit_sha": "11d6fbc"}, None, None), + ], +) def test_manual_start(run_parameters, expected_error, expected_message): - trigger = CustomBuildTrigger(None, {'build_source': 'foo'}) - if expected_error is not None: - with pytest.raises(expected_error) as ipe: - trigger.manual_start(run_parameters) - assert str(ipe.value) == expected_message - else: - assert isinstance(trigger.manual_start(run_parameters), PreparedBuild) + trigger = CustomBuildTrigger(None, {"build_source": "foo"}) + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + trigger.manual_start(run_parameters) + assert str(ipe.value) == expected_message + else: + assert isinstance(trigger.manual_start(run_parameters), PreparedBuild) diff --git a/buildtrigger/test/test_githosthandler.py b/buildtrigger/test/test_githosthandler.py index fadf8dce5..f0c43b458 100644 --- a/buildtrigger/test/test_githosthandler.py +++ b/buildtrigger/test/test_githosthandler.py @@ -9,113 +9,145 @@ from endpoints.building import PreparedBuild # in this fixture. Each trigger's mock is expected to return the same data for all of these calls. @pytest.fixture(params=[get_github_trigger(), get_bitbucket_trigger()]) def githost_trigger(request): - return request.param - -@pytest.mark.parametrize('run_parameters, expected_error, expected_message', [ - # No branch or tag specified: use the commit of the default branch. - ({}, None, None), - - # Invalid branch. - ({'refs': {'kind': 'branch', 'name': 'invalid'}}, TriggerStartException, - 'Could not find branch in repository'), - - # Invalid tag. - ({'refs': {'kind': 'tag', 'name': 'invalid'}}, TriggerStartException, - 'Could not find tag in repository'), - - # Valid branch. - ({'refs': {'kind': 'branch', 'name': 'master'}}, None, None), - - # Valid tag. - ({'refs': {'kind': 'tag', 'name': 'sometag'}}, None, None), -]) -def test_manual_start(run_parameters, expected_error, expected_message, githost_trigger): - if expected_error is not None: - with pytest.raises(expected_error) as ipe: - githost_trigger.manual_start(run_parameters) - assert str(ipe.value) == expected_message - else: - assert isinstance(githost_trigger.manual_start(run_parameters), PreparedBuild) + return request.param -@pytest.mark.parametrize('name, expected', [ - ('refs', [ - {'kind': 'branch', 'name': 'master'}, - {'kind': 'branch', 'name': 'otherbranch'}, - {'kind': 'tag', 'name': 'sometag'}, - {'kind': 'tag', 'name': 'someothertag'}, - ]), - ('tag_name', set(['sometag', 'someothertag'])), - ('branch_name', set(['master', 'otherbranch'])), - ('invalid', None) -]) +@pytest.mark.parametrize( + "run_parameters, expected_error, expected_message", + [ + # No branch or tag specified: use the commit of the default branch. + ({}, None, None), + # Invalid branch. + ( + {"refs": {"kind": "branch", "name": "invalid"}}, + TriggerStartException, + "Could not find branch in repository", + ), + # Invalid tag. + ( + {"refs": {"kind": "tag", "name": "invalid"}}, + TriggerStartException, + "Could not find tag in repository", + ), + # Valid branch. + ({"refs": {"kind": "branch", "name": "master"}}, None, None), + # Valid tag. + ({"refs": {"kind": "tag", "name": "sometag"}}, None, None), + ], +) +def test_manual_start( + run_parameters, expected_error, expected_message, githost_trigger +): + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + githost_trigger.manual_start(run_parameters) + assert str(ipe.value) == expected_message + else: + assert isinstance(githost_trigger.manual_start(run_parameters), PreparedBuild) + + +@pytest.mark.parametrize( + "name, expected", + [ + ( + "refs", + [ + {"kind": "branch", "name": "master"}, + {"kind": "branch", "name": "otherbranch"}, + {"kind": "tag", "name": "sometag"}, + {"kind": "tag", "name": "someothertag"}, + ], + ), + ("tag_name", set(["sometag", "someothertag"])), + ("branch_name", set(["master", "otherbranch"])), + ("invalid", None), + ], +) def test_list_field_values(name, expected, githost_trigger): - if expected is None: - assert githost_trigger.list_field_values(name) is None - elif isinstance(expected, set): - assert set(githost_trigger.list_field_values(name)) == set(expected) - else: - assert githost_trigger.list_field_values(name) == expected + if expected is None: + assert githost_trigger.list_field_values(name) is None + elif isinstance(expected, set): + assert set(githost_trigger.list_field_values(name)) == set(expected) + else: + assert githost_trigger.list_field_values(name) == expected def test_list_build_source_namespaces(): - namespaces_expected = [ - { - 'personal': True, - 'score': 1, - 'avatar_url': 'avatarurl', - 'id': 'knownuser', - 'title': 'knownuser', - 'url': 'https://bitbucket.org/knownuser', - }, - { - 'score': 2, - 'title': 'someorg', - 'personal': False, - 'url': 'https://bitbucket.org/someorg', - 'avatar_url': 'avatarurl', - 'id': 'someorg' - } - ] + namespaces_expected = [ + { + "personal": True, + "score": 1, + "avatar_url": "avatarurl", + "id": "knownuser", + "title": "knownuser", + "url": "https://bitbucket.org/knownuser", + }, + { + "score": 2, + "title": "someorg", + "personal": False, + "url": "https://bitbucket.org/someorg", + "avatar_url": "avatarurl", + "id": "someorg", + }, + ] - found = get_bitbucket_trigger().list_build_source_namespaces() - found.sort() + found = get_bitbucket_trigger().list_build_source_namespaces() + found.sort() - namespaces_expected.sort() - assert found == namespaces_expected + namespaces_expected.sort() + assert found == namespaces_expected -@pytest.mark.parametrize('namespace, expected', [ - ('', []), - ('unknown', []), - - ('knownuser', [ - { - 'last_updated': 0, 'name': 'somerepo', - 'url': 'https://bitbucket.org/knownuser/somerepo', 'private': True, - 'full_name': 'knownuser/somerepo', 'has_admin_permissions': True, - 'description': 'some somerepo repo' - }]), - - ('someorg', [ - { - 'last_updated': 0, 'name': 'somerepo', - 'url': 'https://bitbucket.org/someorg/somerepo', 'private': True, - 'full_name': 'someorg/somerepo', 'has_admin_permissions': False, - 'description': 'some somerepo repo' - }, - { - 'last_updated': 0, 'name': 'anotherrepo', - 'url': 'https://bitbucket.org/someorg/anotherrepo', 'private': False, - 'full_name': 'someorg/anotherrepo', 'has_admin_permissions': False, - 'description': 'some anotherrepo repo' - }]), -]) +@pytest.mark.parametrize( + "namespace, expected", + [ + ("", []), + ("unknown", []), + ( + "knownuser", + [ + { + "last_updated": 0, + "name": "somerepo", + "url": "https://bitbucket.org/knownuser/somerepo", + "private": True, + "full_name": "knownuser/somerepo", + "has_admin_permissions": True, + "description": "some somerepo repo", + } + ], + ), + ( + "someorg", + [ + { + "last_updated": 0, + "name": "somerepo", + "url": "https://bitbucket.org/someorg/somerepo", + "private": True, + "full_name": "someorg/somerepo", + "has_admin_permissions": False, + "description": "some somerepo repo", + }, + { + "last_updated": 0, + "name": "anotherrepo", + "url": "https://bitbucket.org/someorg/anotherrepo", + "private": False, + "full_name": "someorg/anotherrepo", + "has_admin_permissions": False, + "description": "some anotherrepo repo", + }, + ], + ), + ], +) def test_list_build_sources_for_namespace(namespace, expected, githost_trigger): - assert githost_trigger.list_build_sources_for_namespace(namespace) == expected + assert githost_trigger.list_build_sources_for_namespace(namespace) == expected def test_activate_and_deactivate(githost_trigger): - _, private_key = githost_trigger.activate('http://some/url') - assert 'private_key' in private_key - githost_trigger.deactivate() + _, private_key = githost_trigger.activate("http://some/url") + assert "private_key" in private_key + githost_trigger.deactivate() diff --git a/buildtrigger/test/test_githubhandler.py b/buildtrigger/test/test_githubhandler.py index f7012b0cf..7866359ce 100644 --- a/buildtrigger/test/test_githubhandler.py +++ b/buildtrigger/test/test_githubhandler.py @@ -2,24 +2,33 @@ import json import pytest from buildtrigger.test.githubmock import get_github_trigger -from buildtrigger.triggerutil import (SkipRequestException, ValidationRequestException, - InvalidPayloadException) +from buildtrigger.triggerutil import ( + SkipRequestException, + ValidationRequestException, + InvalidPayloadException, +) from endpoints.building import PreparedBuild from util.morecollections import AttrDict + @pytest.fixture def github_trigger(): - return get_github_trigger() + return get_github_trigger() -@pytest.mark.parametrize('payload, expected_error, expected_message', [ - ('{"zen": true}', SkipRequestException, ""), - - ('{}', InvalidPayloadException, "Missing 'repository' on request"), - ('{"repository": "foo"}', InvalidPayloadException, "Missing 'owner' on repository"), - - # Valid payload: - ('''{ +@pytest.mark.parametrize( + "payload, expected_error, expected_message", + [ + ('{"zen": true}', SkipRequestException, ""), + ("{}", InvalidPayloadException, "Missing 'repository' on request"), + ( + '{"repository": "foo"}', + InvalidPayloadException, + "Missing 'owner' on repository", + ), + # Valid payload: + ( + """{ "repository": { "owner": { "name": "someguy" @@ -34,10 +43,13 @@ def github_trigger(): "message": "some message", "timestamp": "NOW" } - }''', None, None), - - # Skip message: - ('''{ + }""", + None, + None, + ), + # Skip message: + ( + """{ "repository": { "owner": { "name": "someguy" @@ -52,66 +64,84 @@ def github_trigger(): "message": "[skip build]", "timestamp": "NOW" } - }''', SkipRequestException, ''), -]) -def test_handle_trigger_request(github_trigger, payload, expected_error, expected_message): - def get_payload(): - return json.loads(payload) + }""", + SkipRequestException, + "", + ), + ], +) +def test_handle_trigger_request( + github_trigger, payload, expected_error, expected_message +): + def get_payload(): + return json.loads(payload) - request = AttrDict(dict(get_json=get_payload)) + request = AttrDict(dict(get_json=get_payload)) - if expected_error is not None: - with pytest.raises(expected_error) as ipe: - github_trigger.handle_trigger_request(request) - assert str(ipe.value) == expected_message - else: - assert isinstance(github_trigger.handle_trigger_request(request), PreparedBuild) + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + github_trigger.handle_trigger_request(request) + assert str(ipe.value) == expected_message + else: + assert isinstance(github_trigger.handle_trigger_request(request), PreparedBuild) -@pytest.mark.parametrize('dockerfile_path, contents', [ - ('/Dockerfile', 'hello world'), - ('somesubdir/Dockerfile', 'hi universe'), - ('unknownpath', None), -]) +@pytest.mark.parametrize( + "dockerfile_path, contents", + [ + ("/Dockerfile", "hello world"), + ("somesubdir/Dockerfile", "hi universe"), + ("unknownpath", None), + ], +) def test_load_dockerfile_contents(dockerfile_path, contents): - trigger = get_github_trigger(dockerfile_path) - assert trigger.load_dockerfile_contents() == contents + trigger = get_github_trigger(dockerfile_path) + assert trigger.load_dockerfile_contents() == contents -@pytest.mark.parametrize('username, expected_response', [ - ('unknownuser', None), - ('knownuser', {'html_url': 'https://bitbucket.org/knownuser', 'avatar_url': 'avatarurl'}), -]) +@pytest.mark.parametrize( + "username, expected_response", + [ + ("unknownuser", None), + ( + "knownuser", + {"html_url": "https://bitbucket.org/knownuser", "avatar_url": "avatarurl"}, + ), + ], +) def test_lookup_user(username, expected_response, github_trigger): - assert github_trigger.lookup_user(username) == expected_response + assert github_trigger.lookup_user(username) == expected_response def test_list_build_subdirs(github_trigger): - assert github_trigger.list_build_subdirs() == ['Dockerfile', 'somesubdir/Dockerfile'] + assert github_trigger.list_build_subdirs() == [ + "Dockerfile", + "somesubdir/Dockerfile", + ] def test_list_build_source_namespaces(github_trigger): - namespaces_expected = [ - { - 'personal': True, - 'score': 1, - 'avatar_url': 'avatarurl', - 'id': 'knownuser', - 'title': 'knownuser', - 'url': 'https://bitbucket.org/knownuser', - }, - { - 'score': 0, - 'title': 'someorg', - 'personal': False, - 'url': '', - 'avatar_url': 'avatarurl', - 'id': 'someorg' - } - ] + namespaces_expected = [ + { + "personal": True, + "score": 1, + "avatar_url": "avatarurl", + "id": "knownuser", + "title": "knownuser", + "url": "https://bitbucket.org/knownuser", + }, + { + "score": 0, + "title": "someorg", + "personal": False, + "url": "", + "avatar_url": "avatarurl", + "id": "someorg", + }, + ] - found = github_trigger.list_build_source_namespaces() - found.sort() + found = github_trigger.list_build_source_namespaces() + found.sort() - namespaces_expected.sort() - assert found == namespaces_expected + namespaces_expected.sort() + assert found == namespaces_expected diff --git a/buildtrigger/test/test_gitlabhandler.py b/buildtrigger/test/test_gitlabhandler.py index b74095a8c..cb9b50581 100644 --- a/buildtrigger/test/test_gitlabhandler.py +++ b/buildtrigger/test/test_gitlabhandler.py @@ -4,91 +4,111 @@ import pytest from mock import Mock from buildtrigger.test.gitlabmock import get_gitlab_trigger -from buildtrigger.triggerutil import (SkipRequestException, ValidationRequestException, - InvalidPayloadException, TriggerStartException) +from buildtrigger.triggerutil import ( + SkipRequestException, + ValidationRequestException, + InvalidPayloadException, + TriggerStartException, +) from endpoints.building import PreparedBuild from util.morecollections import AttrDict + @pytest.fixture() def gitlab_trigger(): - with get_gitlab_trigger() as t: - yield t + with get_gitlab_trigger() as t: + yield t def test_list_build_subdirs(gitlab_trigger): - assert gitlab_trigger.list_build_subdirs() == ['Dockerfile'] + assert gitlab_trigger.list_build_subdirs() == ["Dockerfile"] -@pytest.mark.parametrize('dockerfile_path, contents', [ - ('/Dockerfile', 'hello world'), - ('somesubdir/Dockerfile', 'hi universe'), - ('unknownpath', None), -]) +@pytest.mark.parametrize( + "dockerfile_path, contents", + [ + ("/Dockerfile", "hello world"), + ("somesubdir/Dockerfile", "hi universe"), + ("unknownpath", None), + ], +) def test_load_dockerfile_contents(dockerfile_path, contents): - with get_gitlab_trigger(dockerfile_path=dockerfile_path) as trigger: - assert trigger.load_dockerfile_contents() == contents + with get_gitlab_trigger(dockerfile_path=dockerfile_path) as trigger: + assert trigger.load_dockerfile_contents() == contents -@pytest.mark.parametrize('email, expected_response', [ - ('unknown@email.com', None), - ('knownuser', {'username': 'knownuser', 'html_url': 'https://bitbucket.org/knownuser', - 'avatar_url': 'avatarurl'}), -]) +@pytest.mark.parametrize( + "email, expected_response", + [ + ("unknown@email.com", None), + ( + "knownuser", + { + "username": "knownuser", + "html_url": "https://bitbucket.org/knownuser", + "avatar_url": "avatarurl", + }, + ), + ], +) def test_lookup_user(email, expected_response, gitlab_trigger): - assert gitlab_trigger.lookup_user(email) == expected_response + assert gitlab_trigger.lookup_user(email) == expected_response def test_null_permissions(): - with get_gitlab_trigger(add_permissions=False) as trigger: - sources = trigger.list_build_sources_for_namespace('someorg') - source = sources[0] - assert source['has_admin_permissions'] + with get_gitlab_trigger(add_permissions=False) as trigger: + sources = trigger.list_build_sources_for_namespace("someorg") + source = sources[0] + assert source["has_admin_permissions"] def test_list_build_sources(): - with get_gitlab_trigger() as trigger: - sources = trigger.list_build_sources_for_namespace('someorg') - assert sources == [ - { - 'last_updated': 1380548762, - 'name': u'someproject', - 'url': u'http://example.com/someorg/someproject', - 'private': True, - 'full_name': u'someorg/someproject', - 'has_admin_permissions': False, - 'description': '' - }, - { - 'last_updated': 1380548762, - 'name': u'anotherproject', - 'url': u'http://example.com/someorg/anotherproject', - 'private': False, - 'full_name': u'someorg/anotherproject', - 'has_admin_permissions': True, - 'description': '', - }] + with get_gitlab_trigger() as trigger: + sources = trigger.list_build_sources_for_namespace("someorg") + assert sources == [ + { + "last_updated": 1380548762, + "name": u"someproject", + "url": u"http://example.com/someorg/someproject", + "private": True, + "full_name": u"someorg/someproject", + "has_admin_permissions": False, + "description": "", + }, + { + "last_updated": 1380548762, + "name": u"anotherproject", + "url": u"http://example.com/someorg/anotherproject", + "private": False, + "full_name": u"someorg/anotherproject", + "has_admin_permissions": True, + "description": "", + }, + ] def test_null_avatar(): - with get_gitlab_trigger(missing_avatar_url=True) as trigger: - namespace_data = trigger.list_build_source_namespaces() - expected = { - 'avatar_url': None, - 'personal': False, - 'title': u'someorg', - 'url': u'http://gitlab.com/groups/someorg', - 'score': 1, - 'id': '2', - } + with get_gitlab_trigger(missing_avatar_url=True) as trigger: + namespace_data = trigger.list_build_source_namespaces() + expected = { + "avatar_url": None, + "personal": False, + "title": u"someorg", + "url": u"http://gitlab.com/groups/someorg", + "score": 1, + "id": "2", + } - assert namespace_data == [expected] + assert namespace_data == [expected] -@pytest.mark.parametrize('payload, expected_error, expected_message', [ - ('{}', InvalidPayloadException, ''), - - # Valid payload: - ('''{ +@pytest.mark.parametrize( + "payload, expected_error, expected_message", + [ + ("{}", InvalidPayloadException, ""), + # Valid payload: + ( + """{ "object_kind": "push", "ref": "refs/heads/master", "checkout_sha": "aaaaaaa", @@ -103,10 +123,13 @@ def test_null_avatar(): "timestamp": "now" } ] - }''', None, None), - - # Skip message: - ('''{ + }""", + None, + None, + ), + # Skip message: + ( + """{ "object_kind": "push", "ref": "refs/heads/master", "checkout_sha": "aaaaaaa", @@ -121,111 +144,136 @@ def test_null_avatar(): "timestamp": "now" } ] - }''', SkipRequestException, ''), -]) -def test_handle_trigger_request(gitlab_trigger, payload, expected_error, expected_message): - def get_payload(): - return json.loads(payload) + }""", + SkipRequestException, + "", + ), + ], +) +def test_handle_trigger_request( + gitlab_trigger, payload, expected_error, expected_message +): + def get_payload(): + return json.loads(payload) - request = AttrDict(dict(get_json=get_payload)) + request = AttrDict(dict(get_json=get_payload)) - if expected_error is not None: - with pytest.raises(expected_error) as ipe: - gitlab_trigger.handle_trigger_request(request) - assert str(ipe.value) == expected_message - else: - assert isinstance(gitlab_trigger.handle_trigger_request(request), PreparedBuild) + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + gitlab_trigger.handle_trigger_request(request) + assert str(ipe.value) == expected_message + else: + assert isinstance(gitlab_trigger.handle_trigger_request(request), PreparedBuild) -@pytest.mark.parametrize('run_parameters, expected_error, expected_message', [ - # No branch or tag specified: use the commit of the default branch. - ({}, None, None), - - # Invalid branch. - ({'refs': {'kind': 'branch', 'name': 'invalid'}}, TriggerStartException, - 'Could not find branch in repository'), - - # Invalid tag. - ({'refs': {'kind': 'tag', 'name': 'invalid'}}, TriggerStartException, - 'Could not find tag in repository'), - - # Valid branch. - ({'refs': {'kind': 'branch', 'name': 'master'}}, None, None), - - # Valid tag. - ({'refs': {'kind': 'tag', 'name': 'sometag'}}, None, None), -]) +@pytest.mark.parametrize( + "run_parameters, expected_error, expected_message", + [ + # No branch or tag specified: use the commit of the default branch. + ({}, None, None), + # Invalid branch. + ( + {"refs": {"kind": "branch", "name": "invalid"}}, + TriggerStartException, + "Could not find branch in repository", + ), + # Invalid tag. + ( + {"refs": {"kind": "tag", "name": "invalid"}}, + TriggerStartException, + "Could not find tag in repository", + ), + # Valid branch. + ({"refs": {"kind": "branch", "name": "master"}}, None, None), + # Valid tag. + ({"refs": {"kind": "tag", "name": "sometag"}}, None, None), + ], +) def test_manual_start(run_parameters, expected_error, expected_message, gitlab_trigger): - if expected_error is not None: - with pytest.raises(expected_error) as ipe: - gitlab_trigger.manual_start(run_parameters) - assert str(ipe.value) == expected_message - else: - assert isinstance(gitlab_trigger.manual_start(run_parameters), PreparedBuild) + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + gitlab_trigger.manual_start(run_parameters) + assert str(ipe.value) == expected_message + else: + assert isinstance(gitlab_trigger.manual_start(run_parameters), PreparedBuild) def test_activate_and_deactivate(gitlab_trigger): - _, private_key = gitlab_trigger.activate('http://some/url') - assert 'private_key' in private_key + _, private_key = gitlab_trigger.activate("http://some/url") + assert "private_key" in private_key - gitlab_trigger.deactivate() + gitlab_trigger.deactivate() -@pytest.mark.parametrize('name, expected', [ - ('refs', [ - {'kind': 'branch', 'name': 'master'}, - {'kind': 'branch', 'name': 'otherbranch'}, - {'kind': 'tag', 'name': 'sometag'}, - {'kind': 'tag', 'name': 'someothertag'}, - ]), - ('tag_name', set(['sometag', 'someothertag'])), - ('branch_name', set(['master', 'otherbranch'])), - ('invalid', None) -]) +@pytest.mark.parametrize( + "name, expected", + [ + ( + "refs", + [ + {"kind": "branch", "name": "master"}, + {"kind": "branch", "name": "otherbranch"}, + {"kind": "tag", "name": "sometag"}, + {"kind": "tag", "name": "someothertag"}, + ], + ), + ("tag_name", set(["sometag", "someothertag"])), + ("branch_name", set(["master", "otherbranch"])), + ("invalid", None), + ], +) def test_list_field_values(name, expected, gitlab_trigger): - if expected is None: - assert gitlab_trigger.list_field_values(name) is None - elif isinstance(expected, set): - assert set(gitlab_trigger.list_field_values(name)) == set(expected) - else: - assert gitlab_trigger.list_field_values(name) == expected + if expected is None: + assert gitlab_trigger.list_field_values(name) is None + elif isinstance(expected, set): + assert set(gitlab_trigger.list_field_values(name)) == set(expected) + else: + assert gitlab_trigger.list_field_values(name) == expected -@pytest.mark.parametrize('namespace, expected', [ - ('', []), - ('unknown', []), - - ('knownuser', [ - { - 'last_updated': 1380548762, - 'name': u'anotherproject', - 'url': u'http://example.com/knownuser/anotherproject', - 'private': False, - 'full_name': u'knownuser/anotherproject', - 'has_admin_permissions': True, - 'description': '' - }, - ]), - - ('someorg', [ - { - 'last_updated': 1380548762, - 'name': u'someproject', - 'url': u'http://example.com/someorg/someproject', - 'private': True, - 'full_name': u'someorg/someproject', - 'has_admin_permissions': False, - 'description': '' - }, - { - 'last_updated': 1380548762, - 'name': u'anotherproject', - 'url': u'http://example.com/someorg/anotherproject', - 'private': False, - 'full_name': u'someorg/anotherproject', - 'has_admin_permissions': True, - 'description': '', - }]), -]) +@pytest.mark.parametrize( + "namespace, expected", + [ + ("", []), + ("unknown", []), + ( + "knownuser", + [ + { + "last_updated": 1380548762, + "name": u"anotherproject", + "url": u"http://example.com/knownuser/anotherproject", + "private": False, + "full_name": u"knownuser/anotherproject", + "has_admin_permissions": True, + "description": "", + } + ], + ), + ( + "someorg", + [ + { + "last_updated": 1380548762, + "name": u"someproject", + "url": u"http://example.com/someorg/someproject", + "private": True, + "full_name": u"someorg/someproject", + "has_admin_permissions": False, + "description": "", + }, + { + "last_updated": 1380548762, + "name": u"anotherproject", + "url": u"http://example.com/someorg/anotherproject", + "private": False, + "full_name": u"someorg/anotherproject", + "has_admin_permissions": True, + "description": "", + }, + ], + ), + ], +) def test_list_build_sources_for_namespace(namespace, expected, gitlab_trigger): - assert gitlab_trigger.list_build_sources_for_namespace(namespace) == expected + assert gitlab_trigger.list_build_sources_for_namespace(namespace) == expected diff --git a/buildtrigger/test/test_prepare_trigger.py b/buildtrigger/test/test_prepare_trigger.py index e3aab6b48..839c0a91a 100644 --- a/buildtrigger/test/test_prepare_trigger.py +++ b/buildtrigger/test/test_prepare_trigger.py @@ -12,561 +12,577 @@ from buildtrigger.githubhandler import get_transformed_webhook_payload as gh_web from buildtrigger.gitlabhandler import get_transformed_webhook_payload as gl_webhook from buildtrigger.triggerutil import SkipRequestException + def assertSkipped(filename, processor, *args, **kwargs): - with open('buildtrigger/test/triggerjson/%s.json' % filename) as f: - payload = json.loads(f.read()) + with open("buildtrigger/test/triggerjson/%s.json" % filename) as f: + payload = json.loads(f.read()) - nargs = [payload] - nargs.extend(args) + nargs = [payload] + nargs.extend(args) - with pytest.raises(SkipRequestException): - processor(*nargs, **kwargs) + with pytest.raises(SkipRequestException): + processor(*nargs, **kwargs) def assertSchema(filename, expected, processor, *args, **kwargs): - with open('buildtrigger/test/triggerjson/%s.json' % filename) as f: - payload = json.loads(f.read()) + with open("buildtrigger/test/triggerjson/%s.json" % filename) as f: + payload = json.loads(f.read()) - nargs = [payload] - nargs.extend(args) + nargs = [payload] + nargs.extend(args) - created = processor(*nargs, **kwargs) - assert created == expected - validate(created, METADATA_SCHEMA) + created = processor(*nargs, **kwargs) + assert created == expected + validate(created, METADATA_SCHEMA) def test_custom_custom(): - expected = { - u'commit':u'1c002dd', - u'commit_info': { - u'url': u'gitsoftware.com/repository/commits/1234567', - u'date': u'timestamp', - u'message': u'initial commit', - u'committer': { - u'username': u'user', - u'url': u'gitsoftware.com/users/user', - u'avatar_url': u'gravatar.com/user.png' - }, - u'author': { - u'username': u'user', - u'url': u'gitsoftware.com/users/user', - u'avatar_url': u'gravatar.com/user.png' - } - }, - u'ref': u'refs/heads/master', - u'default_branch': u'master', - u'git_url': u'foobar', - } + expected = { + u"commit": u"1c002dd", + u"commit_info": { + u"url": u"gitsoftware.com/repository/commits/1234567", + u"date": u"timestamp", + u"message": u"initial commit", + u"committer": { + u"username": u"user", + u"url": u"gitsoftware.com/users/user", + u"avatar_url": u"gravatar.com/user.png", + }, + u"author": { + u"username": u"user", + u"url": u"gitsoftware.com/users/user", + u"avatar_url": u"gravatar.com/user.png", + }, + }, + u"ref": u"refs/heads/master", + u"default_branch": u"master", + u"git_url": u"foobar", + } - assertSchema('custom_webhook', expected, custom_trigger_payload, git_url='foobar') + assertSchema("custom_webhook", expected, custom_trigger_payload, git_url="foobar") def test_custom_gitlab(): - expected = { - 'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e', - 'ref': u'refs/heads/master', - 'git_url': u'git@gitlab.com:jsmith/somerepo.git', - 'commit_info': { - 'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e', - 'date': u'2015-08-13T19:33:18+00:00', - 'message': u'Fix link\n', - }, - } + expected = { + "commit": u"fb88379ee45de28a0a4590fddcbd8eff8b36026e", + "ref": u"refs/heads/master", + "git_url": u"git@gitlab.com:jsmith/somerepo.git", + "commit_info": { + "url": u"https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e", + "date": u"2015-08-13T19:33:18+00:00", + "message": u"Fix link\n", + }, + } - assertSchema('gitlab_webhook', expected, custom_trigger_payload, git_url='git@gitlab.com:jsmith/somerepo.git') + assertSchema( + "gitlab_webhook", + expected, + custom_trigger_payload, + git_url="git@gitlab.com:jsmith/somerepo.git", + ) def test_custom_github(): - expected = { - 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'ref': u'refs/heads/master', - 'default_branch': u'master', - 'git_url': u'git@github.com:jsmith/anothertest.git', - 'commit_info': { - 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'date': u'2015-09-11T14:26:16-04:00', - 'message': u'Update Dockerfile', - 'committer': { - 'username': u'jsmith', - }, - 'author': { - 'username': u'jsmith', - }, - }, - } + expected = { + "commit": u"410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "ref": u"refs/heads/master", + "default_branch": u"master", + "git_url": u"git@github.com:jsmith/anothertest.git", + "commit_info": { + "url": u"https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "date": u"2015-09-11T14:26:16-04:00", + "message": u"Update Dockerfile", + "committer": {"username": u"jsmith"}, + "author": {"username": u"jsmith"}, + }, + } - assertSchema('github_webhook', expected, custom_trigger_payload, - git_url='git@github.com:jsmith/anothertest.git') + assertSchema( + "github_webhook", + expected, + custom_trigger_payload, + git_url="git@github.com:jsmith/anothertest.git", + ) def test_custom_bitbucket(): - expected = { - "commit": u"af64ae7188685f8424040b4735ad12941b980d75", - "ref": u"refs/heads/master", - "git_url": u"git@bitbucket.org:jsmith/another-repo.git", - "commit_info": { - "url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75", - "date": u"2015-09-10T20:40:54+00:00", - "message": u"Dockerfile edited online with Bitbucket", - "author": { - "username": u"John Smith", - "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", - }, - "committer": { - "username": u"John Smith", - "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", - }, - }, - } + expected = { + "commit": u"af64ae7188685f8424040b4735ad12941b980d75", + "ref": u"refs/heads/master", + "git_url": u"git@bitbucket.org:jsmith/another-repo.git", + "commit_info": { + "url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75", + "date": u"2015-09-10T20:40:54+00:00", + "message": u"Dockerfile edited online with Bitbucket", + "author": { + "username": u"John Smith", + "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", + }, + "committer": { + "username": u"John Smith", + "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", + }, + }, + } - assertSchema('bitbucket_webhook', expected, custom_trigger_payload, git_url='git@bitbucket.org:jsmith/another-repo.git') + assertSchema( + "bitbucket_webhook", + expected, + custom_trigger_payload, + git_url="git@bitbucket.org:jsmith/another-repo.git", + ) def test_bitbucket_customer_payload_noauthor(): - expected = { - "commit": "a0ec139843b2bb281ab21a433266ddc498e605dc", - "ref": "refs/heads/master", - "git_url": "git@bitbucket.org:somecoollabs/svc-identity.git", - "commit_info": { - "url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc", - "date": "2015-09-25T00:55:08+00:00", - "message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n", - "committer": { - "username": "CodeShip Tagging", - "avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/", - }, - }, - } + expected = { + "commit": "a0ec139843b2bb281ab21a433266ddc498e605dc", + "ref": "refs/heads/master", + "git_url": "git@bitbucket.org:somecoollabs/svc-identity.git", + "commit_info": { + "url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc", + "date": "2015-09-25T00:55:08+00:00", + "message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n", + "committer": { + "username": "CodeShip Tagging", + "avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/", + }, + }, + } - assertSchema('bitbucket_customer_example_noauthor', expected, bb_webhook) + assertSchema("bitbucket_customer_example_noauthor", expected, bb_webhook) def test_bitbucket_customer_payload_tag(): - expected = { - "commit": "a0ec139843b2bb281ab21a433266ddc498e605dc", - "ref": "refs/tags/0.1.2", - "git_url": "git@bitbucket.org:somecoollabs/svc-identity.git", - "commit_info": { - "url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc", - "date": "2015-09-25T00:55:08+00:00", - "message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n", - "committer": { - "username": "CodeShip Tagging", - "avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/", - }, - }, - } + expected = { + "commit": "a0ec139843b2bb281ab21a433266ddc498e605dc", + "ref": "refs/tags/0.1.2", + "git_url": "git@bitbucket.org:somecoollabs/svc-identity.git", + "commit_info": { + "url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc", + "date": "2015-09-25T00:55:08+00:00", + "message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n", + "committer": { + "username": "CodeShip Tagging", + "avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/", + }, + }, + } - assertSchema('bitbucket_customer_example_tag', expected, bb_webhook) + assertSchema("bitbucket_customer_example_tag", expected, bb_webhook) def test_bitbucket_commit(): - ref = 'refs/heads/somebranch' - default_branch = 'somebranch' - repository_name = 'foo/bar' + ref = "refs/heads/somebranch" + default_branch = "somebranch" + repository_name = "foo/bar" - def lookup_author(_): - return { - 'user': { - 'display_name': 'cooluser', - 'avatar': 'http://some/avatar/url' - } + def lookup_author(_): + return { + "user": {"display_name": "cooluser", "avatar": "http://some/avatar/url"} + } + + expected = { + "commit": u"abdeaf1b2b4a6b9ddf742c1e1754236380435a62", + "ref": u"refs/heads/somebranch", + "git_url": u"git@bitbucket.org:foo/bar.git", + "default_branch": u"somebranch", + "commit_info": { + "url": u"https://bitbucket.org/foo/bar/commits/abdeaf1b2b4a6b9ddf742c1e1754236380435a62", + "date": u"2012-07-24 00:26:36", + "message": u"making some changes\n", + "author": { + "avatar_url": u"http://some/avatar/url", + "username": u"cooluser", + }, + }, } - expected = { - "commit": u"abdeaf1b2b4a6b9ddf742c1e1754236380435a62", - "ref": u"refs/heads/somebranch", - "git_url": u"git@bitbucket.org:foo/bar.git", - "default_branch": u"somebranch", - "commit_info": { - "url": u"https://bitbucket.org/foo/bar/commits/abdeaf1b2b4a6b9ddf742c1e1754236380435a62", - "date": u"2012-07-24 00:26:36", - "message": u"making some changes\n", - "author": { - "avatar_url": u"http://some/avatar/url", - "username": u"cooluser", - } - } - } + assertSchema( + "bitbucket_commit", + expected, + bb_commit, + ref, + default_branch, + repository_name, + lookup_author, + ) - assertSchema('bitbucket_commit', expected, bb_commit, ref, default_branch, - repository_name, lookup_author) def test_bitbucket_webhook_payload(): - expected = { - "commit": u"af64ae7188685f8424040b4735ad12941b980d75", - "ref": u"refs/heads/master", - "git_url": u"git@bitbucket.org:jsmith/another-repo.git", - "commit_info": { - "url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75", - "date": u"2015-09-10T20:40:54+00:00", - "message": u"Dockerfile edited online with Bitbucket", - "author": { - "username": u"John Smith", - "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", - }, - "committer": { - "username": u"John Smith", - "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", - }, - }, - } + expected = { + "commit": u"af64ae7188685f8424040b4735ad12941b980d75", + "ref": u"refs/heads/master", + "git_url": u"git@bitbucket.org:jsmith/another-repo.git", + "commit_info": { + "url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75", + "date": u"2015-09-10T20:40:54+00:00", + "message": u"Dockerfile edited online with Bitbucket", + "author": { + "username": u"John Smith", + "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", + }, + "committer": { + "username": u"John Smith", + "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", + }, + }, + } - assertSchema('bitbucket_webhook', expected, bb_webhook) + assertSchema("bitbucket_webhook", expected, bb_webhook) def test_github_webhook_payload_slash_branch(): - expected = { - 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'ref': u'refs/heads/slash/branch', - 'default_branch': u'master', - 'git_url': u'git@github.com:jsmith/anothertest.git', - 'commit_info': { - 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'date': u'2015-09-11T14:26:16-04:00', - 'message': u'Update Dockerfile', - 'committer': { - 'username': u'jsmith', - }, - 'author': { - 'username': u'jsmith', - }, - }, - } + expected = { + "commit": u"410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "ref": u"refs/heads/slash/branch", + "default_branch": u"master", + "git_url": u"git@github.com:jsmith/anothertest.git", + "commit_info": { + "url": u"https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "date": u"2015-09-11T14:26:16-04:00", + "message": u"Update Dockerfile", + "committer": {"username": u"jsmith"}, + "author": {"username": u"jsmith"}, + }, + } - assertSchema('github_webhook_slash_branch', expected, gh_webhook) + assertSchema("github_webhook_slash_branch", expected, gh_webhook) def test_github_webhook_payload(): - expected = { - 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'ref': u'refs/heads/master', - 'default_branch': u'master', - 'git_url': u'git@github.com:jsmith/anothertest.git', - 'commit_info': { - 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'date': u'2015-09-11T14:26:16-04:00', - 'message': u'Update Dockerfile', - 'committer': { - 'username': u'jsmith', - }, - 'author': { - 'username': u'jsmith', - }, - }, - } + expected = { + "commit": u"410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "ref": u"refs/heads/master", + "default_branch": u"master", + "git_url": u"git@github.com:jsmith/anothertest.git", + "commit_info": { + "url": u"https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "date": u"2015-09-11T14:26:16-04:00", + "message": u"Update Dockerfile", + "committer": {"username": u"jsmith"}, + "author": {"username": u"jsmith"}, + }, + } - assertSchema('github_webhook', expected, gh_webhook) + assertSchema("github_webhook", expected, gh_webhook) def test_github_webhook_payload_with_lookup(): - expected = { - 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'ref': u'refs/heads/master', - 'default_branch': u'master', - 'git_url': u'git@github.com:jsmith/anothertest.git', - 'commit_info': { - 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'date': u'2015-09-11T14:26:16-04:00', - 'message': u'Update Dockerfile', - 'committer': { - 'username': u'jsmith', - 'url': u'http://github.com/jsmith', - 'avatar_url': u'http://some/avatar/url', - }, - 'author': { - 'username': u'jsmith', - 'url': u'http://github.com/jsmith', - 'avatar_url': u'http://some/avatar/url', - }, - }, - } - - def lookup_user(_): - return { - 'html_url': 'http://github.com/jsmith', - 'avatar_url': 'http://some/avatar/url' + expected = { + "commit": u"410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "ref": u"refs/heads/master", + "default_branch": u"master", + "git_url": u"git@github.com:jsmith/anothertest.git", + "commit_info": { + "url": u"https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "date": u"2015-09-11T14:26:16-04:00", + "message": u"Update Dockerfile", + "committer": { + "username": u"jsmith", + "url": u"http://github.com/jsmith", + "avatar_url": u"http://some/avatar/url", + }, + "author": { + "username": u"jsmith", + "url": u"http://github.com/jsmith", + "avatar_url": u"http://some/avatar/url", + }, + }, } - assertSchema('github_webhook', expected, gh_webhook, lookup_user=lookup_user) + def lookup_user(_): + return { + "html_url": "http://github.com/jsmith", + "avatar_url": "http://some/avatar/url", + } + + assertSchema("github_webhook", expected, gh_webhook, lookup_user=lookup_user) def test_github_webhook_payload_missing_fields_with_lookup(): - expected = { - 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'ref': u'refs/heads/master', - 'default_branch': u'master', - 'git_url': u'git@github.com:jsmith/anothertest.git', - 'commit_info': { - 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'date': u'2015-09-11T14:26:16-04:00', - 'message': u'Update Dockerfile' - }, - } - - def lookup_user(username): - if not username: - raise Exception('Fail!') - - return { - 'html_url': 'http://github.com/jsmith', - 'avatar_url': 'http://some/avatar/url' + expected = { + "commit": u"410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "ref": u"refs/heads/master", + "default_branch": u"master", + "git_url": u"git@github.com:jsmith/anothertest.git", + "commit_info": { + "url": u"https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "date": u"2015-09-11T14:26:16-04:00", + "message": u"Update Dockerfile", + }, } - assertSchema('github_webhook_missing', expected, gh_webhook, lookup_user=lookup_user) + def lookup_user(username): + if not username: + raise Exception("Fail!") + + return { + "html_url": "http://github.com/jsmith", + "avatar_url": "http://some/avatar/url", + } + + assertSchema( + "github_webhook_missing", expected, gh_webhook, lookup_user=lookup_user + ) def test_gitlab_webhook_payload(): - expected = { - 'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e', - 'ref': u'refs/heads/master', - 'git_url': u'git@gitlab.com:jsmith/somerepo.git', - 'commit_info': { - 'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e', - 'date': u'2015-08-13T19:33:18+00:00', - 'message': u'Fix link\n', - }, - } + expected = { + "commit": u"fb88379ee45de28a0a4590fddcbd8eff8b36026e", + "ref": u"refs/heads/master", + "git_url": u"git@gitlab.com:jsmith/somerepo.git", + "commit_info": { + "url": u"https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e", + "date": u"2015-08-13T19:33:18+00:00", + "message": u"Fix link\n", + }, + } - assertSchema('gitlab_webhook', expected, gl_webhook) + assertSchema("gitlab_webhook", expected, gl_webhook) def test_github_webhook_payload_known_issue(): - expected = { - "commit": "118b07121695d9f2e40a5ff264fdcc2917680870", - "ref": "refs/heads/master", - "default_branch": "master", - "git_url": "git@github.com:jsmith/docker-test.git", - "commit_info": { - "url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870", - "date": "2015-09-25T14:55:11-04:00", - "message": "Fail", - }, - } + expected = { + "commit": "118b07121695d9f2e40a5ff264fdcc2917680870", + "ref": "refs/heads/master", + "default_branch": "master", + "git_url": "git@github.com:jsmith/docker-test.git", + "commit_info": { + "url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870", + "date": "2015-09-25T14:55:11-04:00", + "message": "Fail", + }, + } - assertSchema('github_webhook_noname', expected, gh_webhook) + assertSchema("github_webhook_noname", expected, gh_webhook) def test_github_webhook_payload_missing_fields(): - expected = { - 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'ref': u'refs/heads/master', - 'default_branch': u'master', - 'git_url': u'git@github.com:jsmith/anothertest.git', - 'commit_info': { - 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', - 'date': u'2015-09-11T14:26:16-04:00', - 'message': u'Update Dockerfile' - }, - } + expected = { + "commit": u"410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "ref": u"refs/heads/master", + "default_branch": u"master", + "git_url": u"git@github.com:jsmith/anothertest.git", + "commit_info": { + "url": u"https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "date": u"2015-09-11T14:26:16-04:00", + "message": u"Update Dockerfile", + }, + } - assertSchema('github_webhook_missing', expected, gh_webhook) + assertSchema("github_webhook_missing", expected, gh_webhook) def test_gitlab_webhook_nocommit_payload(): - assertSkipped('gitlab_webhook_nocommit', gl_webhook) + assertSkipped("gitlab_webhook_nocommit", gl_webhook) def test_gitlab_webhook_multiple_commits(): - expected = { - 'commit': u'9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53', - 'ref': u'refs/heads/master', - 'git_url': u'git@gitlab.com:jsmith/some-test-project.git', - 'commit_info': { - 'url': u'https://gitlab.com/jsmith/some-test-project/commit/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53', - 'date': u'2016-09-29T15:02:41+00:00', - 'message': u"Merge branch 'foobar' into 'master'\r\n\r\nAdd changelog\r\n\r\nSome merge thing\r\n\r\nSee merge request !1", - 'author': { - 'username': 'jsmith', - 'url': 'http://gitlab.com/jsmith', - 'avatar_url': 'http://some/avatar/url' - }, - }, - } - - def lookup_user(_): - return { - 'username': 'jsmith', - 'html_url': 'http://gitlab.com/jsmith', - 'avatar_url': 'http://some/avatar/url', + expected = { + "commit": u"9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53", + "ref": u"refs/heads/master", + "git_url": u"git@gitlab.com:jsmith/some-test-project.git", + "commit_info": { + "url": u"https://gitlab.com/jsmith/some-test-project/commit/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53", + "date": u"2016-09-29T15:02:41+00:00", + "message": u"Merge branch 'foobar' into 'master'\r\n\r\nAdd changelog\r\n\r\nSome merge thing\r\n\r\nSee merge request !1", + "author": { + "username": "jsmith", + "url": "http://gitlab.com/jsmith", + "avatar_url": "http://some/avatar/url", + }, + }, } - assertSchema('gitlab_webhook_multicommit', expected, gl_webhook, lookup_user=lookup_user) + def lookup_user(_): + return { + "username": "jsmith", + "html_url": "http://gitlab.com/jsmith", + "avatar_url": "http://some/avatar/url", + } + + assertSchema( + "gitlab_webhook_multicommit", expected, gl_webhook, lookup_user=lookup_user + ) def test_gitlab_webhook_for_tag(): - expected = { - 'commit': u'82b3d5ae55f7080f1e6022629cdb57bfae7cccc7', - 'commit_info': { - 'author': { - 'avatar_url': 'http://some/avatar/url', - 'url': 'http://gitlab.com/jsmith', - 'username': 'jsmith' - }, - 'date': '2015-08-13T19:33:18+00:00', - 'message': 'Fix link\n', - 'url': 'https://some/url', - }, - 'git_url': u'git@example.com:jsmith/example.git', - 'ref': u'refs/tags/v1.0.0', - } - - def lookup_user(_): - return { - 'username': 'jsmith', - 'html_url': 'http://gitlab.com/jsmith', - 'avatar_url': 'http://some/avatar/url', + expected = { + "commit": u"82b3d5ae55f7080f1e6022629cdb57bfae7cccc7", + "commit_info": { + "author": { + "avatar_url": "http://some/avatar/url", + "url": "http://gitlab.com/jsmith", + "username": "jsmith", + }, + "date": "2015-08-13T19:33:18+00:00", + "message": "Fix link\n", + "url": "https://some/url", + }, + "git_url": u"git@example.com:jsmith/example.git", + "ref": u"refs/tags/v1.0.0", } - def lookup_commit(repo_id, commit_sha): - if commit_sha == '82b3d5ae55f7080f1e6022629cdb57bfae7cccc7': - return { - "id": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7", - "message": "Fix link\n", - "timestamp": "2015-08-13T19:33:18+00:00", - "url": "https://some/url", - "author_name": "Foo Guy", - "author_email": "foo@bar.com", - } + def lookup_user(_): + return { + "username": "jsmith", + "html_url": "http://gitlab.com/jsmith", + "avatar_url": "http://some/avatar/url", + } - return None + def lookup_commit(repo_id, commit_sha): + if commit_sha == "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7": + return { + "id": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7", + "message": "Fix link\n", + "timestamp": "2015-08-13T19:33:18+00:00", + "url": "https://some/url", + "author_name": "Foo Guy", + "author_email": "foo@bar.com", + } - assertSchema('gitlab_webhook_tag', expected, gl_webhook, lookup_user=lookup_user, - lookup_commit=lookup_commit) + return None + + assertSchema( + "gitlab_webhook_tag", + expected, + gl_webhook, + lookup_user=lookup_user, + lookup_commit=lookup_commit, + ) def test_gitlab_webhook_for_tag_nocommit(): - assertSkipped('gitlab_webhook_tag', gl_webhook) + assertSkipped("gitlab_webhook_tag", gl_webhook) def test_gitlab_webhook_for_tag_commit_sha_null(): - assertSkipped('gitlab_webhook_tag_commit_sha_null', gl_webhook) + assertSkipped("gitlab_webhook_tag_commit_sha_null", gl_webhook) def test_gitlab_webhook_for_tag_known_issue(): - expected = { - 'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f', - 'ref': u'refs/tags/thirdtag', - 'git_url': u'git@gitlab.com:someuser/some-test-project.git', - 'commit_info': { - 'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f', - 'date': u'2019-10-17T18:07:48Z', - 'message': u'Update Dockerfile', - 'author': { - 'username': 'someuser', - 'url': 'http://gitlab.com/someuser', - 'avatar_url': 'http://some/avatar/url', - }, - }, - } - - def lookup_user(_): - return { - 'username': 'someuser', - 'html_url': 'http://gitlab.com/someuser', - 'avatar_url': 'http://some/avatar/url', + expected = { + "commit": u"770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "ref": u"refs/tags/thirdtag", + "git_url": u"git@gitlab.com:someuser/some-test-project.git", + "commit_info": { + "url": u"https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "date": u"2019-10-17T18:07:48Z", + "message": u"Update Dockerfile", + "author": { + "username": "someuser", + "url": "http://gitlab.com/someuser", + "avatar_url": "http://some/avatar/url", + }, + }, } - assertSchema('gitlab_webhook_tag_commit_issue', expected, gl_webhook, lookup_user=lookup_user) + def lookup_user(_): + return { + "username": "someuser", + "html_url": "http://gitlab.com/someuser", + "avatar_url": "http://some/avatar/url", + } + + assertSchema( + "gitlab_webhook_tag_commit_issue", expected, gl_webhook, lookup_user=lookup_user + ) def test_gitlab_webhook_payload_known_issue(): - expected = { - 'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f', - 'ref': u'refs/tags/fourthtag', - 'git_url': u'git@gitlab.com:someuser/some-test-project.git', - 'commit_info': { - 'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f', - 'date': u'2019-10-17T18:07:48Z', - 'message': u'Update Dockerfile', - }, - } + expected = { + "commit": u"770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "ref": u"refs/tags/fourthtag", + "git_url": u"git@gitlab.com:someuser/some-test-project.git", + "commit_info": { + "url": u"https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "date": u"2019-10-17T18:07:48Z", + "message": u"Update Dockerfile", + }, + } - def lookup_commit(repo_id, commit_sha): - if commit_sha == '770830e7ca132856991e6db4f7fc0f4dbe20bd5f': - return { - "added": [], - "author": { - "name": "Some User", - "email": "someuser@somedomain.com" - }, - "url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f", - "message": "Update Dockerfile", - "removed": [], - "modified": [ - "Dockerfile" - ], - "id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f" - } + def lookup_commit(repo_id, commit_sha): + if commit_sha == "770830e7ca132856991e6db4f7fc0f4dbe20bd5f": + return { + "added": [], + "author": {"name": "Some User", "email": "someuser@somedomain.com"}, + "url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "message": "Update Dockerfile", + "removed": [], + "modified": ["Dockerfile"], + "id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + } - return None + return None - assertSchema('gitlab_webhook_known_issue', expected, gl_webhook, lookup_commit=lookup_commit) + assertSchema( + "gitlab_webhook_known_issue", expected, gl_webhook, lookup_commit=lookup_commit + ) def test_gitlab_webhook_for_other(): - assertSkipped('gitlab_webhook_other', gl_webhook) + assertSkipped("gitlab_webhook_other", gl_webhook) def test_gitlab_webhook_payload_with_lookup(): - expected = { - 'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e', - 'ref': u'refs/heads/master', - 'git_url': u'git@gitlab.com:jsmith/somerepo.git', - 'commit_info': { - 'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e', - 'date': u'2015-08-13T19:33:18+00:00', - 'message': u'Fix link\n', - 'author': { - 'username': 'jsmith', - 'url': 'http://gitlab.com/jsmith', - 'avatar_url': 'http://some/avatar/url', - }, - }, - } - - def lookup_user(_): - return { - 'username': 'jsmith', - 'html_url': 'http://gitlab.com/jsmith', - 'avatar_url': 'http://some/avatar/url', + expected = { + "commit": u"fb88379ee45de28a0a4590fddcbd8eff8b36026e", + "ref": u"refs/heads/master", + "git_url": u"git@gitlab.com:jsmith/somerepo.git", + "commit_info": { + "url": u"https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e", + "date": u"2015-08-13T19:33:18+00:00", + "message": u"Fix link\n", + "author": { + "username": "jsmith", + "url": "http://gitlab.com/jsmith", + "avatar_url": "http://some/avatar/url", + }, + }, } - assertSchema('gitlab_webhook', expected, gl_webhook, lookup_user=lookup_user) + def lookup_user(_): + return { + "username": "jsmith", + "html_url": "http://gitlab.com/jsmith", + "avatar_url": "http://some/avatar/url", + } + + assertSchema("gitlab_webhook", expected, gl_webhook, lookup_user=lookup_user) def test_github_webhook_payload_deleted_commit(): - expected = { - 'commit': u'456806b662cb903a0febbaed8344f3ed42f27bab', - 'commit_info': { - 'author': { - 'username': u'jsmith' - }, - 'committer': { - 'username': u'jsmith' - }, - 'date': u'2015-12-08T18:07:03-05:00', - 'message': (u'Merge pull request #1044 from jsmith/errerror\n\n' + - 'Assign the exception to a variable to log it'), - 'url': u'https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab' - }, - 'git_url': u'git@github.com:jsmith/somerepo.git', - 'ref': u'refs/heads/master', - 'default_branch': u'master', - } + expected = { + "commit": u"456806b662cb903a0febbaed8344f3ed42f27bab", + "commit_info": { + "author": {"username": u"jsmith"}, + "committer": {"username": u"jsmith"}, + "date": u"2015-12-08T18:07:03-05:00", + "message": ( + u"Merge pull request #1044 from jsmith/errerror\n\n" + + "Assign the exception to a variable to log it" + ), + "url": u"https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab", + }, + "git_url": u"git@github.com:jsmith/somerepo.git", + "ref": u"refs/heads/master", + "default_branch": u"master", + } - def lookup_user(_): - return None + def lookup_user(_): + return None - assertSchema('github_webhook_deletedcommit', expected, gh_webhook, lookup_user=lookup_user) + assertSchema( + "github_webhook_deletedcommit", expected, gh_webhook, lookup_user=lookup_user + ) def test_github_webhook_known_issue(): - def lookup_user(_): - return None + def lookup_user(_): + return None - assertSkipped('github_webhook_knownissue', gh_webhook, lookup_user=lookup_user) + assertSkipped("github_webhook_knownissue", gh_webhook, lookup_user=lookup_user) def test_bitbucket_webhook_known_issue(): - assertSkipped('bitbucket_knownissue', bb_webhook) + assertSkipped("bitbucket_knownissue", bb_webhook) diff --git a/buildtrigger/test/test_triggerutil.py b/buildtrigger/test/test_triggerutil.py index 15f1bec10..6a1b6ce28 100644 --- a/buildtrigger/test/test_triggerutil.py +++ b/buildtrigger/test/test_triggerutil.py @@ -4,22 +4,43 @@ import pytest from buildtrigger.triggerutil import matches_ref -@pytest.mark.parametrize('ref, filt, matches', [ - ('ref/heads/master', '.+', True), - ('ref/heads/master', 'heads/.+', True), - ('ref/heads/master', 'heads/master', True), - ('ref/heads/slash/branch', 'heads/slash/branch', True), - ('ref/heads/slash/branch', 'heads/.+', True), - ('ref/heads/foobar', 'heads/master', False), - ('ref/heads/master', 'tags/master', False), - - ('ref/heads/master', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True), - ('ref/heads/alpha', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True), - ('ref/heads/beta', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True), - ('ref/heads/gamma', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True), - - ('ref/heads/delta', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', False), -]) +@pytest.mark.parametrize( + "ref, filt, matches", + [ + ("ref/heads/master", ".+", True), + ("ref/heads/master", "heads/.+", True), + ("ref/heads/master", "heads/master", True), + ("ref/heads/slash/branch", "heads/slash/branch", True), + ("ref/heads/slash/branch", "heads/.+", True), + ("ref/heads/foobar", "heads/master", False), + ("ref/heads/master", "tags/master", False), + ( + "ref/heads/master", + "(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)", + True, + ), + ( + "ref/heads/alpha", + "(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)", + True, + ), + ( + "ref/heads/beta", + "(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)", + True, + ), + ( + "ref/heads/gamma", + "(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)", + True, + ), + ( + "ref/heads/delta", + "(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)", + False, + ), + ], +) def test_matches_ref(ref, filt, matches): - assert matches_ref(ref, re.compile(filt)) == matches + assert matches_ref(ref, re.compile(filt)) == matches diff --git a/buildtrigger/triggerutil.py b/buildtrigger/triggerutil.py index 5c459e53e..c24effb3f 100644 --- a/buildtrigger/triggerutil.py +++ b/buildtrigger/triggerutil.py @@ -3,128 +3,146 @@ import io import logging import re + class TriggerException(Exception): - pass + pass + class TriggerAuthException(TriggerException): - pass + pass + class InvalidPayloadException(TriggerException): - pass + pass + class BuildArchiveException(TriggerException): - pass + pass + class InvalidServiceException(TriggerException): - pass + pass + class TriggerActivationException(TriggerException): - pass + pass + class TriggerDeactivationException(TriggerException): - pass + pass + class TriggerStartException(TriggerException): - pass + pass + class ValidationRequestException(TriggerException): - pass + pass + class SkipRequestException(TriggerException): - pass + pass + class EmptyRepositoryException(TriggerException): - pass + pass + class RepositoryReadException(TriggerException): - pass + pass + class TriggerProviderException(TriggerException): - pass + pass + logger = logging.getLogger(__name__) + def determine_build_ref(run_parameters, get_branch_sha, get_tag_sha, default_branch): - run_parameters = run_parameters or {} + run_parameters = run_parameters or {} - kind = '' - value = '' + kind = "" + value = "" - if 'refs' in run_parameters and run_parameters['refs']: - kind = run_parameters['refs']['kind'] - value = run_parameters['refs']['name'] - elif 'branch_name' in run_parameters: - kind = 'branch' - value = run_parameters['branch_name'] + if "refs" in run_parameters and run_parameters["refs"]: + kind = run_parameters["refs"]["kind"] + value = run_parameters["refs"]["name"] + elif "branch_name" in run_parameters: + kind = "branch" + value = run_parameters["branch_name"] - kind = kind or 'branch' - value = value or default_branch or 'master' + kind = kind or "branch" + value = value or default_branch or "master" - ref = 'refs/tags/' + value if kind == 'tag' else 'refs/heads/' + value - commit_sha = get_tag_sha(value) if kind == 'tag' else get_branch_sha(value) - return (commit_sha, ref) + ref = "refs/tags/" + value if kind == "tag" else "refs/heads/" + value + commit_sha = get_tag_sha(value) if kind == "tag" else get_branch_sha(value) + return (commit_sha, ref) def find_matching_branches(config, branches): - if 'branchtag_regex' in config: - try: - regex = re.compile(config['branchtag_regex']) - return [branch for branch in branches - if matches_ref('refs/heads/' + branch, regex)] - except: - pass + if "branchtag_regex" in config: + try: + regex = re.compile(config["branchtag_regex"]) + return [ + branch + for branch in branches + if matches_ref("refs/heads/" + branch, regex) + ] + except: + pass - return branches + return branches def should_skip_commit(metadata): - if 'commit_info' in metadata: - message = metadata['commit_info']['message'] - return '[skip build]' in message or '[build skip]' in message - return False + if "commit_info" in metadata: + message = metadata["commit_info"]["message"] + return "[skip build]" in message or "[build skip]" in message + return False def raise_if_skipped_build(prepared_build, config): - """ Raises a SkipRequestException if the given build should be skipped. """ - # Check to ensure we have metadata. - if not prepared_build.metadata: - logger.debug('Skipping request due to missing metadata for prepared build') - raise SkipRequestException() + """ Raises a SkipRequestException if the given build should be skipped. """ + # Check to ensure we have metadata. + if not prepared_build.metadata: + logger.debug("Skipping request due to missing metadata for prepared build") + raise SkipRequestException() - # Check the branchtag regex. - if 'branchtag_regex' in config: - try: - regex = re.compile(config['branchtag_regex']) - except: - regex = re.compile('.*') + # Check the branchtag regex. + if "branchtag_regex" in config: + try: + regex = re.compile(config["branchtag_regex"]) + except: + regex = re.compile(".*") - if not matches_ref(prepared_build.metadata.get('ref'), regex): - raise SkipRequestException() + if not matches_ref(prepared_build.metadata.get("ref"), regex): + raise SkipRequestException() - # Check the commit message. - if should_skip_commit(prepared_build.metadata): - logger.debug('Skipping request due to commit message request') - raise SkipRequestException() + # Check the commit message. + if should_skip_commit(prepared_build.metadata): + logger.debug("Skipping request due to commit message request") + raise SkipRequestException() def matches_ref(ref, regex): - match_string = ref.split('/', 1)[1] - if not regex: - return False + match_string = ref.split("/", 1)[1] + if not regex: + return False - m = regex.match(match_string) - if not m: - return False + m = regex.match(match_string) + if not m: + return False - return len(m.group(0)) == len(match_string) + return len(m.group(0)) == len(match_string) def raise_unsupported(): - raise io.UnsupportedOperation + raise io.UnsupportedOperation def get_trigger_config(trigger): - try: - return json.loads(trigger.config) - except ValueError: - return {} + try: + return json.loads(trigger.config) + except ValueError: + return {} diff --git a/conf/gunicorn_local.py b/conf/gunicorn_local.py index b33558ef2..ab9afc0ec 100644 --- a/conf/gunicorn_local.py +++ b/conf/gunicorn_local.py @@ -1,5 +1,6 @@ import sys import os + sys.path.append(os.path.join(os.path.dirname(__file__), "../")) import logging @@ -10,18 +11,24 @@ from util.workers import get_worker_count logconfig = logfile_path(debug=True) -bind = '0.0.0.0:5000' -workers = get_worker_count('local', 2, minimum=2, maximum=8) -worker_class = 'gevent' +bind = "0.0.0.0:5000" +workers = get_worker_count("local", 2, minimum=2, maximum=8) +worker_class = "gevent" daemon = False -pythonpath = '.' +pythonpath = "." preload_app = True + def post_fork(server, worker): - # Reset the Random library to ensure it won't raise the "PID check failed." error after - # gunicorn forks. - Random.atfork() + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + def when_ready(server): - logger = logging.getLogger(__name__) - logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class) + logger = logging.getLogger(__name__) + logger.debug( + "Starting local gunicorn with %s workers and %s worker class", + workers, + worker_class, + ) diff --git a/conf/gunicorn_registry.py b/conf/gunicorn_registry.py index 23590ba45..c072c740f 100644 --- a/conf/gunicorn_registry.py +++ b/conf/gunicorn_registry.py @@ -1,5 +1,6 @@ import sys import os + sys.path.append(os.path.join(os.path.dirname(__file__), "../")) import logging @@ -10,19 +11,23 @@ from util.workers import get_worker_count logconfig = logfile_path(debug=False) -bind = 'unix:/tmp/gunicorn_registry.sock' -workers = get_worker_count('registry', 4, minimum=8, maximum=64) -worker_class = 'gevent' -pythonpath = '.' +bind = "unix:/tmp/gunicorn_registry.sock" +workers = get_worker_count("registry", 4, minimum=8, maximum=64) +worker_class = "gevent" +pythonpath = "." preload_app = True def post_fork(server, worker): - # Reset the Random library to ensure it won't raise the "PID check failed." error after - # gunicorn forks. - Random.atfork() + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + def when_ready(server): - logger = logging.getLogger(__name__) - logger.debug('Starting registry gunicorn with %s workers and %s worker class', workers, - worker_class) + logger = logging.getLogger(__name__) + logger.debug( + "Starting registry gunicorn with %s workers and %s worker class", + workers, + worker_class, + ) diff --git a/conf/gunicorn_secscan.py b/conf/gunicorn_secscan.py index daea39c38..788d79808 100644 --- a/conf/gunicorn_secscan.py +++ b/conf/gunicorn_secscan.py @@ -1,5 +1,6 @@ import sys import os + sys.path.append(os.path.join(os.path.dirname(__file__), "../")) import logging @@ -10,19 +11,23 @@ from util.workers import get_worker_count logconfig = logfile_path(debug=False) -bind = 'unix:/tmp/gunicorn_secscan.sock' -workers = get_worker_count('secscan', 2, minimum=2, maximum=4) -worker_class = 'gevent' -pythonpath = '.' +bind = "unix:/tmp/gunicorn_secscan.sock" +workers = get_worker_count("secscan", 2, minimum=2, maximum=4) +worker_class = "gevent" +pythonpath = "." preload_app = True def post_fork(server, worker): - # Reset the Random library to ensure it won't raise the "PID check failed." error after - # gunicorn forks. - Random.atfork() + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + def when_ready(server): - logger = logging.getLogger(__name__) - logger.debug('Starting secscan gunicorn with %s workers and %s worker class', workers, - worker_class) + logger = logging.getLogger(__name__) + logger.debug( + "Starting secscan gunicorn with %s workers and %s worker class", + workers, + worker_class, + ) diff --git a/conf/gunicorn_verbs.py b/conf/gunicorn_verbs.py index 9502f7563..2e6482384 100644 --- a/conf/gunicorn_verbs.py +++ b/conf/gunicorn_verbs.py @@ -1,5 +1,6 @@ import sys import os + sys.path.append(os.path.join(os.path.dirname(__file__), "../")) import logging @@ -10,18 +11,21 @@ from util.workers import get_worker_count logconfig = logfile_path(debug=False) -bind = 'unix:/tmp/gunicorn_verbs.sock' -workers = get_worker_count('verbs', 2, minimum=2, maximum=32) -pythonpath = '.' +bind = "unix:/tmp/gunicorn_verbs.sock" +workers = get_worker_count("verbs", 2, minimum=2, maximum=32) +pythonpath = "." preload_app = True timeout = 2000 # Because sync workers def post_fork(server, worker): - # Reset the Random library to ensure it won't raise the "PID check failed." error after - # gunicorn forks. - Random.atfork() + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + def when_ready(server): - logger = logging.getLogger(__name__) - logger.debug('Starting verbs gunicorn with %s workers and sync worker class', workers) + logger = logging.getLogger(__name__) + logger.debug( + "Starting verbs gunicorn with %s workers and sync worker class", workers + ) diff --git a/conf/gunicorn_web.py b/conf/gunicorn_web.py index 8bd1abaa0..2461861c2 100644 --- a/conf/gunicorn_web.py +++ b/conf/gunicorn_web.py @@ -1,5 +1,6 @@ import sys import os + sys.path.append(os.path.join(os.path.dirname(__file__), "../")) import logging @@ -11,18 +12,23 @@ from util.workers import get_worker_count logconfig = logfile_path(debug=False) -bind = 'unix:/tmp/gunicorn_web.sock' -workers = get_worker_count('web', 2, minimum=2, maximum=32) -worker_class = 'gevent' -pythonpath = '.' +bind = "unix:/tmp/gunicorn_web.sock" +workers = get_worker_count("web", 2, minimum=2, maximum=32) +worker_class = "gevent" +pythonpath = "." preload_app = True + def post_fork(server, worker): - # Reset the Random library to ensure it won't raise the "PID check failed." error after - # gunicorn forks. - Random.atfork() + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + def when_ready(server): - logger = logging.getLogger(__name__) - logger.debug('Starting web gunicorn with %s workers and %s worker class', workers, - worker_class) + logger = logging.getLogger(__name__) + logger.debug( + "Starting web gunicorn with %s workers and %s worker class", + workers, + worker_class, + ) diff --git a/conf/init/nginx_conf_create.py b/conf/init/nginx_conf_create.py index 56a59a2d2..7b264a015 100644 --- a/conf/init/nginx_conf_create.py +++ b/conf/init/nginx_conf_create.py @@ -7,120 +7,130 @@ import jinja2 QUAYPATH = os.getenv("QUAYPATH", ".") QUAYDIR = os.getenv("QUAYDIR", "/") QUAYCONF_DIR = os.getenv("QUAYCONF", os.path.join(QUAYDIR, QUAYPATH, "conf")) -STATIC_DIR = os.path.join(QUAYDIR, 'static') +STATIC_DIR = os.path.join(QUAYDIR, "static") -SSL_PROTOCOL_DEFAULTS = ['TLSv1', 'TLSv1.1', 'TLSv1.2'] +SSL_PROTOCOL_DEFAULTS = ["TLSv1", "TLSv1.1", "TLSv1.2"] SSL_CIPHER_DEFAULTS = [ - 'ECDHE-RSA-AES128-GCM-SHA256', - 'ECDHE-ECDSA-AES128-GCM-SHA256', - 'ECDHE-RSA-AES256-GCM-SHA384', - 'ECDHE-ECDSA-AES256-GCM-SHA384', - 'DHE-RSA-AES128-GCM-SHA256', - 'DHE-DSS-AES128-GCM-SHA256', - 'kEDH+AESGCM', - 'ECDHE-RSA-AES128-SHA256', - 'ECDHE-ECDSA-AES128-SHA256', - 'ECDHE-RSA-AES128-SHA', - 'ECDHE-ECDSA-AES128-SHA', - 'ECDHE-RSA-AES256-SHA384', - 'ECDHE-ECDSA-AES256-SHA384', - 'ECDHE-RSA-AES256-SHA', - 'ECDHE-ECDSA-AES256-SHA', - 'DHE-RSA-AES128-SHA256', - 'DHE-RSA-AES128-SHA', - 'DHE-DSS-AES128-SHA256', - 'DHE-RSA-AES256-SHA256', - 'DHE-DSS-AES256-SHA', - 'DHE-RSA-AES256-SHA', - 'AES128-GCM-SHA256', - 'AES256-GCM-SHA384', - 'AES128-SHA256', - 'AES256-SHA256', - 'AES128-SHA', - 'AES256-SHA', - 'AES', - 'CAMELLIA', - '!3DES', - '!aNULL', - '!eNULL', - '!EXPORT', - '!DES', - '!RC4', - '!MD5', - '!PSK', - '!aECDH', - '!EDH-DSS-DES-CBC3-SHA', - '!EDH-RSA-DES-CBC3-SHA', - '!KRB5-DES-CBC3-SHA', + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-DSS-AES128-GCM-SHA256", + "kEDH+AESGCM", + "ECDHE-RSA-AES128-SHA256", + "ECDHE-ECDSA-AES128-SHA256", + "ECDHE-RSA-AES128-SHA", + "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES256-SHA384", + "ECDHE-ECDSA-AES256-SHA384", + "ECDHE-RSA-AES256-SHA", + "ECDHE-ECDSA-AES256-SHA", + "DHE-RSA-AES128-SHA256", + "DHE-RSA-AES128-SHA", + "DHE-DSS-AES128-SHA256", + "DHE-RSA-AES256-SHA256", + "DHE-DSS-AES256-SHA", + "DHE-RSA-AES256-SHA", + "AES128-GCM-SHA256", + "AES256-GCM-SHA384", + "AES128-SHA256", + "AES256-SHA256", + "AES128-SHA", + "AES256-SHA", + "AES", + "CAMELLIA", + "!3DES", + "!aNULL", + "!eNULL", + "!EXPORT", + "!DES", + "!RC4", + "!MD5", + "!PSK", + "!aECDH", + "!EDH-DSS-DES-CBC3-SHA", + "!EDH-RSA-DES-CBC3-SHA", + "!KRB5-DES-CBC3-SHA", ] -def write_config(filename, **kwargs): - with open(filename + ".jnj") as f: - template = jinja2.Template(f.read()) - rendered = template.render(kwargs) - with open(filename, 'w') as f: - f.write(rendered) +def write_config(filename, **kwargs): + with open(filename + ".jnj") as f: + template = jinja2.Template(f.read()) + rendered = template.render(kwargs) + + with open(filename, "w") as f: + f.write(rendered) def generate_nginx_config(config): - """ + """ Generates nginx config from the app config """ - config = config or {} - use_https = os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/ssl.key')) - use_old_certs = os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/ssl.old.key')) - v1_only_domain = config.get('V1_ONLY_DOMAIN', None) - enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False) - ssl_protocols = config.get('SSL_PROTOCOLS', SSL_PROTOCOL_DEFAULTS) - ssl_ciphers = config.get('SSL_CIPHERS', SSL_CIPHER_DEFAULTS) + config = config or {} + use_https = os.path.exists(os.path.join(QUAYCONF_DIR, "stack/ssl.key")) + use_old_certs = os.path.exists(os.path.join(QUAYCONF_DIR, "stack/ssl.old.key")) + v1_only_domain = config.get("V1_ONLY_DOMAIN", None) + enable_rate_limits = config.get("FEATURE_RATE_LIMITS", False) + ssl_protocols = config.get("SSL_PROTOCOLS", SSL_PROTOCOL_DEFAULTS) + ssl_ciphers = config.get("SSL_CIPHERS", SSL_CIPHER_DEFAULTS) - write_config(os.path.join(QUAYCONF_DIR, 'nginx/nginx.conf'), use_https=use_https, - use_old_certs=use_old_certs, - enable_rate_limits=enable_rate_limits, - v1_only_domain=v1_only_domain, - ssl_protocols=ssl_protocols, - ssl_ciphers=':'.join(ssl_ciphers)) + write_config( + os.path.join(QUAYCONF_DIR, "nginx/nginx.conf"), + use_https=use_https, + use_old_certs=use_old_certs, + enable_rate_limits=enable_rate_limits, + v1_only_domain=v1_only_domain, + ssl_protocols=ssl_protocols, + ssl_ciphers=":".join(ssl_ciphers), + ) def generate_server_config(config): - """ + """ Generates server config from the app config """ - config = config or {} - tuf_server = config.get('TUF_SERVER', None) - tuf_host = config.get('TUF_HOST', None) - signing_enabled = config.get('FEATURE_SIGNING', False) - maximum_layer_size = config.get('MAXIMUM_LAYER_SIZE', '20G') - enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False) + config = config or {} + tuf_server = config.get("TUF_SERVER", None) + tuf_host = config.get("TUF_HOST", None) + signing_enabled = config.get("FEATURE_SIGNING", False) + maximum_layer_size = config.get("MAXIMUM_LAYER_SIZE", "20G") + enable_rate_limits = config.get("FEATURE_RATE_LIMITS", False) - write_config( - os.path.join(QUAYCONF_DIR, 'nginx/server-base.conf'), tuf_server=tuf_server, tuf_host=tuf_host, - signing_enabled=signing_enabled, maximum_layer_size=maximum_layer_size, - enable_rate_limits=enable_rate_limits, - static_dir=STATIC_DIR) + write_config( + os.path.join(QUAYCONF_DIR, "nginx/server-base.conf"), + tuf_server=tuf_server, + tuf_host=tuf_host, + signing_enabled=signing_enabled, + maximum_layer_size=maximum_layer_size, + enable_rate_limits=enable_rate_limits, + static_dir=STATIC_DIR, + ) def generate_rate_limiting_config(config): - """ + """ Generates rate limiting config from the app config """ - config = config or {} - non_rate_limited_namespaces = config.get('NON_RATE_LIMITED_NAMESPACES') or set() - enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False) - write_config( - os.path.join(QUAYCONF_DIR, 'nginx/rate-limiting.conf'), - non_rate_limited_namespaces=non_rate_limited_namespaces, - enable_rate_limits=enable_rate_limits, - static_dir=STATIC_DIR) + config = config or {} + non_rate_limited_namespaces = config.get("NON_RATE_LIMITED_NAMESPACES") or set() + enable_rate_limits = config.get("FEATURE_RATE_LIMITS", False) + write_config( + os.path.join(QUAYCONF_DIR, "nginx/rate-limiting.conf"), + non_rate_limited_namespaces=non_rate_limited_namespaces, + enable_rate_limits=enable_rate_limits, + static_dir=STATIC_DIR, + ) + if __name__ == "__main__": - if os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/config.yaml')): - with open(os.path.join(QUAYCONF_DIR, 'stack/config.yaml'), 'r') as f: - config = yaml.load(f) - else: - config = None + if os.path.exists(os.path.join(QUAYCONF_DIR, "stack/config.yaml")): + with open(os.path.join(QUAYCONF_DIR, "stack/config.yaml"), "r") as f: + config = yaml.load(f) + else: + config = None - generate_rate_limiting_config(config) - generate_server_config(config) - generate_nginx_config(config) + generate_rate_limiting_config(config) + generate_server_config(config) + generate_nginx_config(config) diff --git a/conf/init/supervisord_conf_create.py b/conf/init/supervisord_conf_create.py index 50f5cabbf..0463d6dfd 100644 --- a/conf/init/supervisord_conf_create.py +++ b/conf/init/supervisord_conf_create.py @@ -12,136 +12,74 @@ QUAY_OVERRIDE_SERVICES = os.getenv("QUAY_OVERRIDE_SERVICES", []) def default_services(): - return { - "blobuploadcleanupworker": { - "autostart": "true" - }, - "buildlogsarchiver": { - "autostart": "true" - }, - "builder": { - "autostart": "true" - }, - "chunkcleanupworker": { - "autostart": "true" - }, - "expiredappspecifictokenworker": { - "autostart": "true" - }, - "exportactionlogsworker": { - "autostart": "true" - }, - "gcworker": { - "autostart": "true" - }, - "globalpromstats": { - "autostart": "true" - }, - "labelbackfillworker": { - "autostart": "true" - }, - "logrotateworker": { - "autostart": "true" - }, - "namespacegcworker": { - "autostart": "true" - }, - "notificationworker": { - "autostart": "true" - }, - "queuecleanupworker": { - "autostart": "true" - }, - "repositoryactioncounter": { - "autostart": "true" - }, - "security_notification_worker": { - "autostart": "true" - }, - "securityworker": { - "autostart": "true" - }, - "storagereplication": { - "autostart": "true" - }, - "tagbackfillworker": { - "autostart": "true" - }, - "teamsyncworker": { - "autostart": "true" - }, - "dnsmasq": { - "autostart": "true" - }, - "gunicorn-registry": { - "autostart": "true" - }, - "gunicorn-secscan": { - "autostart": "true" - }, - "gunicorn-verbs": { - "autostart": "true" - }, - "gunicorn-web": { - "autostart": "true" - }, - "ip-resolver-update-worker": { - "autostart": "true" - }, - "jwtproxy": { - "autostart": "true" - }, - "memcache": { - "autostart": "true" - }, - "nginx": { - "autostart": "true" - }, - "prometheus-aggregator": { - "autostart": "true" - }, - "servicekey": { - "autostart": "true" - }, - "repomirrorworker": { - "autostart": "false" + return { + "blobuploadcleanupworker": {"autostart": "true"}, + "buildlogsarchiver": {"autostart": "true"}, + "builder": {"autostart": "true"}, + "chunkcleanupworker": {"autostart": "true"}, + "expiredappspecifictokenworker": {"autostart": "true"}, + "exportactionlogsworker": {"autostart": "true"}, + "gcworker": {"autostart": "true"}, + "globalpromstats": {"autostart": "true"}, + "labelbackfillworker": {"autostart": "true"}, + "logrotateworker": {"autostart": "true"}, + "namespacegcworker": {"autostart": "true"}, + "notificationworker": {"autostart": "true"}, + "queuecleanupworker": {"autostart": "true"}, + "repositoryactioncounter": {"autostart": "true"}, + "security_notification_worker": {"autostart": "true"}, + "securityworker": {"autostart": "true"}, + "storagereplication": {"autostart": "true"}, + "tagbackfillworker": {"autostart": "true"}, + "teamsyncworker": {"autostart": "true"}, + "dnsmasq": {"autostart": "true"}, + "gunicorn-registry": {"autostart": "true"}, + "gunicorn-secscan": {"autostart": "true"}, + "gunicorn-verbs": {"autostart": "true"}, + "gunicorn-web": {"autostart": "true"}, + "ip-resolver-update-worker": {"autostart": "true"}, + "jwtproxy": {"autostart": "true"}, + "memcache": {"autostart": "true"}, + "nginx": {"autostart": "true"}, + "prometheus-aggregator": {"autostart": "true"}, + "servicekey": {"autostart": "true"}, + "repomirrorworker": {"autostart": "false"}, } -} def generate_supervisord_config(filename, config): - with open(filename + ".jnj") as f: - template = jinja2.Template(f.read()) - rendered = template.render(config=config) + with open(filename + ".jnj") as f: + template = jinja2.Template(f.read()) + rendered = template.render(config=config) - with open(filename, 'w') as f: - f.write(rendered) + with open(filename, "w") as f: + f.write(rendered) def limit_services(config, enabled_services): - if enabled_services == []: - return + if enabled_services == []: + return - for service in config.keys(): - if service in enabled_services: - config[service]["autostart"] = "true" - else: - config[service]["autostart"] = "false" + for service in config.keys(): + if service in enabled_services: + config[service]["autostart"] = "true" + else: + config[service]["autostart"] = "false" def override_services(config, override_services): - if override_services == []: - return + if override_services == []: + return - for service in config.keys(): - if service + "=true" in override_services: - config[service]["autostart"] = "true" - elif service + "=false" in override_services: - config[service]["autostart"] = "false" + for service in config.keys(): + if service + "=true" in override_services: + config[service]["autostart"] = "true" + elif service + "=false" in override_services: + config[service]["autostart"] = "false" if __name__ == "__main__": - config = default_services() - limit_services(config, QUAY_SERVICES) - override_services(config, QUAY_OVERRIDE_SERVICES) - generate_supervisord_config(os.path.join(QUAYCONF_DIR, 'supervisord.conf'), config) + config = default_services() + limit_services(config, QUAY_SERVICES) + override_services(config, QUAY_OVERRIDE_SERVICES) + generate_supervisord_config(os.path.join(QUAYCONF_DIR, "supervisord.conf"), config) diff --git a/conf/init/test/test_supervisord_conf_create.py b/conf/init/test/test_supervisord_conf_create.py index 8972b2e39..75c7313d4 100644 --- a/conf/init/test/test_supervisord_conf_create.py +++ b/conf/init/test/test_supervisord_conf_create.py @@ -6,17 +6,23 @@ import jinja2 from ..supervisord_conf_create import QUAYCONF_DIR, default_services, limit_services + def render_supervisord_conf(config): - with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../supervisord.conf.jnj")) as f: - template = jinja2.Template(f.read()) - return template.render(config=config) + with open( + os.path.join( + os.path.dirname(os.path.abspath(__file__)), "../../supervisord.conf.jnj" + ) + ) as f: + template = jinja2.Template(f.read()) + return template.render(config=config) + def test_supervisord_conf_create_defaults(): - config = default_services() - limit_services(config, []) - rendered = render_supervisord_conf(config) + config = default_services() + limit_services(config, []) + rendered = render_supervisord_conf(config) - expected = """[supervisord] + expected = """[supervisord] nodaemon=true [unix_http_server] @@ -392,14 +398,15 @@ stderr_logfile_maxbytes=0 stdout_events_enabled = true stderr_events_enabled = true # EOF NO NEWLINE""" - assert rendered == expected + assert rendered == expected + def test_supervisord_conf_create_all_overrides(): - config = default_services() - limit_services(config, "servicekey,prometheus-aggregator") - rendered = render_supervisord_conf(config) + config = default_services() + limit_services(config, "servicekey,prometheus-aggregator") + rendered = render_supervisord_conf(config) - expected = """[supervisord] + expected = """[supervisord] nodaemon=true [unix_http_server] @@ -775,4 +782,4 @@ stderr_logfile_maxbytes=0 stdout_events_enabled = true stderr_events_enabled = true # EOF NO NEWLINE""" - assert rendered == expected + assert rendered == expected diff --git a/config.py b/config.py index ae742ece8..df693df58 100644 --- a/config.py +++ b/config.py @@ -7,603 +7,682 @@ from _init import ROOT_DIR, CONF_DIR def build_requests_session(): - sess = requests.Session() - adapter = requests.adapters.HTTPAdapter(pool_connections=100, - pool_maxsize=100) - sess.mount('http://', adapter) - sess.mount('https://', adapter) - return sess + sess = requests.Session() + adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100) + sess.mount("http://", adapter) + sess.mount("https://", adapter) + return sess # The set of configuration key names that will be accessible in the client. Since these # values are sent to the frontend, DO NOT PLACE ANY SECRETS OR KEYS in this list. -CLIENT_WHITELIST = ['SERVER_HOSTNAME', 'PREFERRED_URL_SCHEME', 'MIXPANEL_KEY', - 'STRIPE_PUBLISHABLE_KEY', 'ENTERPRISE_LOGO_URL', 'SENTRY_PUBLIC_DSN', - 'AUTHENTICATION_TYPE', 'REGISTRY_TITLE', 'REGISTRY_TITLE_SHORT', - 'CONTACT_INFO', 'AVATAR_KIND', 'LOCAL_OAUTH_HANDLER', - 'SETUP_COMPLETE', 'DEBUG', 'MARKETO_MUNCHKIN_ID', - 'STATIC_SITE_BUCKET', 'RECAPTCHA_SITE_KEY', 'CHANNEL_COLORS', - 'TAG_EXPIRATION_OPTIONS', 'INTERNAL_OIDC_SERVICE_ID', - 'SEARCH_RESULTS_PER_PAGE', 'SEARCH_MAX_RESULT_PAGE_COUNT', 'BRANDING'] +CLIENT_WHITELIST = [ + "SERVER_HOSTNAME", + "PREFERRED_URL_SCHEME", + "MIXPANEL_KEY", + "STRIPE_PUBLISHABLE_KEY", + "ENTERPRISE_LOGO_URL", + "SENTRY_PUBLIC_DSN", + "AUTHENTICATION_TYPE", + "REGISTRY_TITLE", + "REGISTRY_TITLE_SHORT", + "CONTACT_INFO", + "AVATAR_KIND", + "LOCAL_OAUTH_HANDLER", + "SETUP_COMPLETE", + "DEBUG", + "MARKETO_MUNCHKIN_ID", + "STATIC_SITE_BUCKET", + "RECAPTCHA_SITE_KEY", + "CHANNEL_COLORS", + "TAG_EXPIRATION_OPTIONS", + "INTERNAL_OIDC_SERVICE_ID", + "SEARCH_RESULTS_PER_PAGE", + "SEARCH_MAX_RESULT_PAGE_COUNT", + "BRANDING", +] def frontend_visible_config(config_dict): - visible_dict = {} - for name in CLIENT_WHITELIST: - if name.lower().find('secret') >= 0: - raise Exception('Cannot whitelist secrets: %s' % name) + visible_dict = {} + for name in CLIENT_WHITELIST: + if name.lower().find("secret") >= 0: + raise Exception("Cannot whitelist secrets: %s" % name) - if name in config_dict: - visible_dict[name] = config_dict.get(name, None) - if 'ENTERPRISE_LOGO_URL' in config_dict: - visible_dict['BRANDING'] = visible_dict.get('BRANDING', {}) - visible_dict['BRANDING']['logo'] = config_dict['ENTERPRISE_LOGO_URL'] + if name in config_dict: + visible_dict[name] = config_dict.get(name, None) + if "ENTERPRISE_LOGO_URL" in config_dict: + visible_dict["BRANDING"] = visible_dict.get("BRANDING", {}) + visible_dict["BRANDING"]["logo"] = config_dict["ENTERPRISE_LOGO_URL"] - return visible_dict + return visible_dict # Configuration that should not be changed by end users class ImmutableConfig(object): - # Requests based HTTP client with a large request pool - HTTPCLIENT = build_requests_session() + # Requests based HTTP client with a large request pool + HTTPCLIENT = build_requests_session() - # Status tag config - STATUS_TAGS = {} - for tag_name in ['building', 'failed', 'none', 'ready', 'cancelled']: - tag_path = os.path.join(ROOT_DIR, 'buildstatus', tag_name + '.svg') - with open(tag_path) as tag_svg: - STATUS_TAGS[tag_name] = tag_svg.read() + # Status tag config + STATUS_TAGS = {} + for tag_name in ["building", "failed", "none", "ready", "cancelled"]: + tag_path = os.path.join(ROOT_DIR, "buildstatus", tag_name + ".svg") + with open(tag_path) as tag_svg: + STATUS_TAGS[tag_name] = tag_svg.read() - # Reverse DNS prefixes that are reserved for internal use on labels and should not be allowable - # to be set via the API. - DEFAULT_LABEL_KEY_RESERVED_PREFIXES = ['com.docker.', 'io.docker.', 'org.dockerproject.', - 'org.opencontainers.', 'io.cncf.', - 'io.kubernetes.', 'io.k8s.', - 'io.quay', 'com.coreos', 'com.tectonic', - 'internal', 'quay'] + # Reverse DNS prefixes that are reserved for internal use on labels and should not be allowable + # to be set via the API. + DEFAULT_LABEL_KEY_RESERVED_PREFIXES = [ + "com.docker.", + "io.docker.", + "org.dockerproject.", + "org.opencontainers.", + "io.cncf.", + "io.kubernetes.", + "io.k8s.", + "io.quay", + "com.coreos", + "com.tectonic", + "internal", + "quay", + ] - # Colors for local avatars. - AVATAR_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', - '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', - '#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79', - '#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b', - '#8c6d31', '#ad494a', '#e7ba52', '#a55194'] + # Colors for local avatars. + AVATAR_COLORS = [ + "#969696", + "#aec7e8", + "#ff7f0e", + "#ffbb78", + "#2ca02c", + "#98df8a", + "#d62728", + "#ff9896", + "#9467bd", + "#c5b0d5", + "#8c564b", + "#c49c94", + "#e377c2", + "#f7b6d2", + "#7f7f7f", + "#c7c7c7", + "#bcbd22", + "#1f77b4", + "#17becf", + "#9edae5", + "#393b79", + "#5254a3", + "#6b6ecf", + "#9c9ede", + "#9ecae1", + "#31a354", + "#b5cf6b", + "#a1d99b", + "#8c6d31", + "#ad494a", + "#e7ba52", + "#a55194", + ] - # Colors for channels. - CHANNEL_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', - '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', - '#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79', - '#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b', - '#8c6d31', '#ad494a', '#e7ba52', '#a55194'] + # Colors for channels. + CHANNEL_COLORS = [ + "#969696", + "#aec7e8", + "#ff7f0e", + "#ffbb78", + "#2ca02c", + "#98df8a", + "#d62728", + "#ff9896", + "#9467bd", + "#c5b0d5", + "#8c564b", + "#c49c94", + "#e377c2", + "#f7b6d2", + "#7f7f7f", + "#c7c7c7", + "#bcbd22", + "#1f77b4", + "#17becf", + "#9edae5", + "#393b79", + "#5254a3", + "#6b6ecf", + "#9c9ede", + "#9ecae1", + "#31a354", + "#b5cf6b", + "#a1d99b", + "#8c6d31", + "#ad494a", + "#e7ba52", + "#a55194", + ] - PROPAGATE_EXCEPTIONS = True + PROPAGATE_EXCEPTIONS = True class DefaultConfig(ImmutableConfig): - # Flask config - JSONIFY_PRETTYPRINT_REGULAR = False - SESSION_COOKIE_SECURE = False - - SESSION_COOKIE_HTTPONLY = True - SESSION_COOKIE_SAMESITE = 'Lax' + # Flask config + JSONIFY_PRETTYPRINT_REGULAR = False + SESSION_COOKIE_SECURE = False - LOGGING_LEVEL = 'DEBUG' - SEND_FILE_MAX_AGE_DEFAULT = 0 - PREFERRED_URL_SCHEME = 'http' - SERVER_HOSTNAME = 'localhost:5000' + SESSION_COOKIE_HTTPONLY = True + SESSION_COOKIE_SAMESITE = "Lax" - REGISTRY_TITLE = 'Project Quay' - REGISTRY_TITLE_SHORT = 'Project Quay' + LOGGING_LEVEL = "DEBUG" + SEND_FILE_MAX_AGE_DEFAULT = 0 + PREFERRED_URL_SCHEME = "http" + SERVER_HOSTNAME = "localhost:5000" - CONTACT_INFO = [] + REGISTRY_TITLE = "Project Quay" + REGISTRY_TITLE_SHORT = "Project Quay" - # Mail config - MAIL_SERVER = '' - MAIL_USE_TLS = True - MAIL_PORT = 587 - MAIL_USERNAME = None - MAIL_PASSWORD = None - MAIL_DEFAULT_SENDER = 'example@projectquay.io' - MAIL_FAIL_SILENTLY = False - TESTING = True + CONTACT_INFO = [] - # DB config - DB_URI = 'sqlite:///test/data/test.db' - DB_CONNECTION_ARGS = { - 'threadlocals': True, - 'autorollback': True, - } + # Mail config + MAIL_SERVER = "" + MAIL_USE_TLS = True + MAIL_PORT = 587 + MAIL_USERNAME = None + MAIL_PASSWORD = None + MAIL_DEFAULT_SENDER = "example@projectquay.io" + MAIL_FAIL_SILENTLY = False + TESTING = True - @staticmethod - def create_transaction(db): - return db.transaction() + # DB config + DB_URI = "sqlite:///test/data/test.db" + DB_CONNECTION_ARGS = {"threadlocals": True, "autorollback": True} - DB_TRANSACTION_FACTORY = create_transaction + @staticmethod + def create_transaction(db): + return db.transaction() - # If set to 'readonly', the entire registry is placed into read only mode and no write operations - # may be performed against it. - REGISTRY_STATE = 'normal' + DB_TRANSACTION_FACTORY = create_transaction - # If set to true, TLS is used, but is terminated by an external service (such as a load balancer). - # Note that PREFERRED_URL_SCHEME must be `https` when this flag is set or it can lead to undefined - # behavior. - EXTERNAL_TLS_TERMINATION = False + # If set to 'readonly', the entire registry is placed into read only mode and no write operations + # may be performed against it. + REGISTRY_STATE = "normal" - # If true, CDN URLs will be used for our external dependencies, rather than the local - # copies. - USE_CDN = False + # If set to true, TLS is used, but is terminated by an external service (such as a load balancer). + # Note that PREFERRED_URL_SCHEME must be `https` when this flag is set or it can lead to undefined + # behavior. + EXTERNAL_TLS_TERMINATION = False - # Authentication - AUTHENTICATION_TYPE = 'Database' + # If true, CDN URLs will be used for our external dependencies, rather than the local + # copies. + USE_CDN = False - # Build logs - BUILDLOGS_REDIS = {'host': 'localhost'} - BUILDLOGS_OPTIONS = [] + # Authentication + AUTHENTICATION_TYPE = "Database" - # Real-time user events - USER_EVENTS_REDIS = {'host': 'localhost'} + # Build logs + BUILDLOGS_REDIS = {"host": "localhost"} + BUILDLOGS_OPTIONS = [] - # Stripe config - BILLING_TYPE = 'FakeStripe' + # Real-time user events + USER_EVENTS_REDIS = {"host": "localhost"} - # Analytics - ANALYTICS_TYPE = 'FakeAnalytics' + # Stripe config + BILLING_TYPE = "FakeStripe" - # Build Queue Metrics - QUEUE_METRICS_TYPE = 'Null' - QUEUE_WORKER_METRICS_REFRESH_SECONDS = 300 + # Analytics + ANALYTICS_TYPE = "FakeAnalytics" - # Exception logging - EXCEPTION_LOG_TYPE = 'FakeSentry' - SENTRY_DSN = None - SENTRY_PUBLIC_DSN = None + # Build Queue Metrics + QUEUE_METRICS_TYPE = "Null" + QUEUE_WORKER_METRICS_REFRESH_SECONDS = 300 - # Github Config - GITHUB_LOGIN_CONFIG = None - GITHUB_TRIGGER_CONFIG = None + # Exception logging + EXCEPTION_LOG_TYPE = "FakeSentry" + SENTRY_DSN = None + SENTRY_PUBLIC_DSN = None - # Google Config. - GOOGLE_LOGIN_CONFIG = None + # Github Config + GITHUB_LOGIN_CONFIG = None + GITHUB_TRIGGER_CONFIG = None - # Bitbucket Config. - BITBUCKET_TRIGGER_CONFIG = None + # Google Config. + GOOGLE_LOGIN_CONFIG = None - # Gitlab Config. - GITLAB_TRIGGER_CONFIG = None + # Bitbucket Config. + BITBUCKET_TRIGGER_CONFIG = None - NOTIFICATION_QUEUE_NAME = 'notification' - DOCKERFILE_BUILD_QUEUE_NAME = 'dockerfilebuild' - REPLICATION_QUEUE_NAME = 'imagestoragereplication' - SECSCAN_NOTIFICATION_QUEUE_NAME = 'security_notification' - CHUNK_CLEANUP_QUEUE_NAME = 'chunk_cleanup' - NAMESPACE_GC_QUEUE_NAME = 'namespacegc' - EXPORT_ACTION_LOGS_QUEUE_NAME = 'exportactionlogs' + # Gitlab Config. + GITLAB_TRIGGER_CONFIG = None - # Super user config. Note: This MUST BE an empty list for the default config. - SUPER_USERS = [] + NOTIFICATION_QUEUE_NAME = "notification" + DOCKERFILE_BUILD_QUEUE_NAME = "dockerfilebuild" + REPLICATION_QUEUE_NAME = "imagestoragereplication" + SECSCAN_NOTIFICATION_QUEUE_NAME = "security_notification" + CHUNK_CLEANUP_QUEUE_NAME = "chunk_cleanup" + NAMESPACE_GC_QUEUE_NAME = "namespacegc" + EXPORT_ACTION_LOGS_QUEUE_NAME = "exportactionlogs" - # Feature Flag: Whether sessions are permanent. - FEATURE_PERMANENT_SESSIONS = True + # Super user config. Note: This MUST BE an empty list for the default config. + SUPER_USERS = [] - # Feature Flag: Whether super users are supported. - FEATURE_SUPER_USERS = True + # Feature Flag: Whether sessions are permanent. + FEATURE_PERMANENT_SESSIONS = True - # Feature Flag: Whether to allow anonymous users to browse and pull public repositories. - FEATURE_ANONYMOUS_ACCESS = True + # Feature Flag: Whether super users are supported. + FEATURE_SUPER_USERS = True - # Feature Flag: Whether billing is required. - FEATURE_BILLING = False + # Feature Flag: Whether to allow anonymous users to browse and pull public repositories. + FEATURE_ANONYMOUS_ACCESS = True - # Feature Flag: Whether user accounts automatically have usage log access. - FEATURE_USER_LOG_ACCESS = False + # Feature Flag: Whether billing is required. + FEATURE_BILLING = False - # Feature Flag: Whether GitHub login is supported. - FEATURE_GITHUB_LOGIN = False + # Feature Flag: Whether user accounts automatically have usage log access. + FEATURE_USER_LOG_ACCESS = False - # Feature Flag: Whether Google login is supported. - FEATURE_GOOGLE_LOGIN = False + # Feature Flag: Whether GitHub login is supported. + FEATURE_GITHUB_LOGIN = False - # Feature Flag: Whether to support GitHub build triggers. - FEATURE_GITHUB_BUILD = False + # Feature Flag: Whether Google login is supported. + FEATURE_GOOGLE_LOGIN = False - # Feature Flag: Whether to support Bitbucket build triggers. - FEATURE_BITBUCKET_BUILD = False + # Feature Flag: Whether to support GitHub build triggers. + FEATURE_GITHUB_BUILD = False - # Feature Flag: Whether to support GitLab build triggers. - FEATURE_GITLAB_BUILD = False + # Feature Flag: Whether to support Bitbucket build triggers. + FEATURE_BITBUCKET_BUILD = False - # Feature Flag: Dockerfile build support. - FEATURE_BUILD_SUPPORT = True + # Feature Flag: Whether to support GitLab build triggers. + FEATURE_GITLAB_BUILD = False - # Feature Flag: Whether emails are enabled. - FEATURE_MAILING = True + # Feature Flag: Dockerfile build support. + FEATURE_BUILD_SUPPORT = True - # Feature Flag: Whether users can be created (by non-super users). - FEATURE_USER_CREATION = True + # Feature Flag: Whether emails are enabled. + FEATURE_MAILING = True - # Feature Flag: Whether users being created must be invited by another user. - # If FEATURE_USER_CREATION is off, this flag has no effect. - FEATURE_INVITE_ONLY_USER_CREATION = False + # Feature Flag: Whether users can be created (by non-super users). + FEATURE_USER_CREATION = True - # Feature Flag: Whether users can be renamed - FEATURE_USER_RENAME = False + # Feature Flag: Whether users being created must be invited by another user. + # If FEATURE_USER_CREATION is off, this flag has no effect. + FEATURE_INVITE_ONLY_USER_CREATION = False - # Feature Flag: Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for - # basic auth. - FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH = False + # Feature Flag: Whether users can be renamed + FEATURE_USER_RENAME = False - # Feature Flag: Whether to automatically replicate between storage engines. - FEATURE_STORAGE_REPLICATION = False + # Feature Flag: Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for + # basic auth. + FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH = False - # Feature Flag: Whether users can directly login to the UI. - FEATURE_DIRECT_LOGIN = True + # Feature Flag: Whether to automatically replicate between storage engines. + FEATURE_STORAGE_REPLICATION = False - # Feature Flag: Whether the v2/ endpoint is visible - FEATURE_ADVERTISE_V2 = True + # Feature Flag: Whether users can directly login to the UI. + FEATURE_DIRECT_LOGIN = True - # Semver spec for which Docker versions we will blacklist - # Documentation: http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec - BLACKLIST_V2_SPEC = '<1.6.0' + # Feature Flag: Whether the v2/ endpoint is visible + FEATURE_ADVERTISE_V2 = True - # Feature Flag: Whether to restrict V1 pushes to the whitelist. - FEATURE_RESTRICTED_V1_PUSH = False - V1_PUSH_WHITELIST = [] + # Semver spec for which Docker versions we will blacklist + # Documentation: http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec + BLACKLIST_V2_SPEC = "<1.6.0" - # Feature Flag: Whether or not to rotate old action logs to storage. - FEATURE_ACTION_LOG_ROTATION = False + # Feature Flag: Whether to restrict V1 pushes to the whitelist. + FEATURE_RESTRICTED_V1_PUSH = False + V1_PUSH_WHITELIST = [] - # Feature Flag: Whether to enable conversion to ACIs. - FEATURE_ACI_CONVERSION = False + # Feature Flag: Whether or not to rotate old action logs to storage. + FEATURE_ACTION_LOG_ROTATION = False - # Feature Flag: Whether to allow for "namespace-less" repositories when pulling and pushing from - # Docker. - FEATURE_LIBRARY_SUPPORT = True + # Feature Flag: Whether to enable conversion to ACIs. + FEATURE_ACI_CONVERSION = False - # Feature Flag: Whether to require invitations when adding a user to a team. - FEATURE_REQUIRE_TEAM_INVITE = True + # Feature Flag: Whether to allow for "namespace-less" repositories when pulling and pushing from + # Docker. + FEATURE_LIBRARY_SUPPORT = True - # Feature Flag: Whether to proxy all direct download URLs in storage via the registry's nginx. - FEATURE_PROXY_STORAGE = False + # Feature Flag: Whether to require invitations when adding a user to a team. + FEATURE_REQUIRE_TEAM_INVITE = True - # Feature Flag: Whether to collect and support user metadata. - FEATURE_USER_METADATA = False + # Feature Flag: Whether to proxy all direct download URLs in storage via the registry's nginx. + FEATURE_PROXY_STORAGE = False - # Feature Flag: Whether to support signing - FEATURE_SIGNING = False + # Feature Flag: Whether to collect and support user metadata. + FEATURE_USER_METADATA = False - # Feature Flag: Whether to enable support for App repositories. - FEATURE_APP_REGISTRY = False + # Feature Flag: Whether to support signing + FEATURE_SIGNING = False - # Feature Flag: Whether app registry is in a read-only mode. - FEATURE_READONLY_APP_REGISTRY = False + # Feature Flag: Whether to enable support for App repositories. + FEATURE_APP_REGISTRY = False - # Feature Flag: If set to true, the _catalog endpoint returns public repositories. Otherwise, - # only private repositories can be returned. - FEATURE_PUBLIC_CATALOG = False + # Feature Flag: Whether app registry is in a read-only mode. + FEATURE_READONLY_APP_REGISTRY = False - # Feature Flag: If set to true, build logs may be read by those with read access to the repo, - # rather than only write access or admin access. - FEATURE_READER_BUILD_LOGS = False + # Feature Flag: If set to true, the _catalog endpoint returns public repositories. Otherwise, + # only private repositories can be returned. + FEATURE_PUBLIC_CATALOG = False - # Feature Flag: If set to true, autocompletion will apply to partial usernames. - FEATURE_PARTIAL_USER_AUTOCOMPLETE = True + # Feature Flag: If set to true, build logs may be read by those with read access to the repo, + # rather than only write access or admin access. + FEATURE_READER_BUILD_LOGS = False - # Feature Flag: If set to true, users can confirm (and modify) their initial usernames when - # logging in via OIDC or a non-database internal auth provider. - FEATURE_USERNAME_CONFIRMATION = True + # Feature Flag: If set to true, autocompletion will apply to partial usernames. + FEATURE_PARTIAL_USER_AUTOCOMPLETE = True - # If a namespace is defined in the public namespace list, then it will appear on *all* - # user's repository list pages, regardless of whether that user is a member of the namespace. - # Typically, this is used by an enterprise customer in configuring a set of "well-known" - # namespaces. - PUBLIC_NAMESPACES = [] + # Feature Flag: If set to true, users can confirm (and modify) their initial usernames when + # logging in via OIDC or a non-database internal auth provider. + FEATURE_USERNAME_CONFIRMATION = True - # The namespace to use for library repositories. - # Note: This must remain 'library' until Docker removes their hard-coded namespace for libraries. - # See: https://github.com/docker/docker/blob/master/registry/session.go#L320 - LIBRARY_NAMESPACE = 'library' + # If a namespace is defined in the public namespace list, then it will appear on *all* + # user's repository list pages, regardless of whether that user is a member of the namespace. + # Typically, this is used by an enterprise customer in configuring a set of "well-known" + # namespaces. + PUBLIC_NAMESPACES = [] - BUILD_MANAGER = ('enterprise', {}) + # The namespace to use for library repositories. + # Note: This must remain 'library' until Docker removes their hard-coded namespace for libraries. + # See: https://github.com/docker/docker/blob/master/registry/session.go#L320 + LIBRARY_NAMESPACE = "library" - DISTRIBUTED_STORAGE_CONFIG = { - 'local_eu': ['LocalStorage', {'storage_path': 'test/data/registry/eu'}], - 'local_us': ['LocalStorage', {'storage_path': 'test/data/registry/us'}], - } + BUILD_MANAGER = ("enterprise", {}) - DISTRIBUTED_STORAGE_PREFERENCE = ['local_us'] - DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS = ['local_us'] + DISTRIBUTED_STORAGE_CONFIG = { + "local_eu": ["LocalStorage", {"storage_path": "test/data/registry/eu"}], + "local_us": ["LocalStorage", {"storage_path": "test/data/registry/us"}], + } - # Health checker. - HEALTH_CHECKER = ('LocalHealthCheck', {}) + DISTRIBUTED_STORAGE_PREFERENCE = ["local_us"] + DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS = ["local_us"] - # Userfiles - USERFILES_LOCATION = 'local_us' - USERFILES_PATH = 'userfiles/' + # Health checker. + HEALTH_CHECKER = ("LocalHealthCheck", {}) - # Build logs archive - LOG_ARCHIVE_LOCATION = 'local_us' - LOG_ARCHIVE_PATH = 'logarchive/' + # Userfiles + USERFILES_LOCATION = "local_us" + USERFILES_PATH = "userfiles/" - # Action logs archive - ACTION_LOG_ARCHIVE_LOCATION = 'local_us' - ACTION_LOG_ARCHIVE_PATH = 'actionlogarchive/' - ACTION_LOG_ROTATION_THRESHOLD = '30d' + # Build logs archive + LOG_ARCHIVE_LOCATION = "local_us" + LOG_ARCHIVE_PATH = "logarchive/" - # Allow registry pulls when unable to write to the audit log - ALLOW_PULLS_WITHOUT_STRICT_LOGGING = False + # Action logs archive + ACTION_LOG_ARCHIVE_LOCATION = "local_us" + ACTION_LOG_ARCHIVE_PATH = "actionlogarchive/" + ACTION_LOG_ROTATION_THRESHOLD = "30d" - # Temporary tag expiration in seconds, this may actually be longer based on GC policy - PUSH_TEMP_TAG_EXPIRATION_SEC = 60 * 60 # One hour per layer + # Allow registry pulls when unable to write to the audit log + ALLOW_PULLS_WITHOUT_STRICT_LOGGING = False - # Signed registry grant token expiration in seconds - SIGNED_GRANT_EXPIRATION_SEC = 60 * 60 * 24 # One day to complete a push/pull + # Temporary tag expiration in seconds, this may actually be longer based on GC policy + PUSH_TEMP_TAG_EXPIRATION_SEC = 60 * 60 # One hour per layer - # Registry v2 JWT Auth config - REGISTRY_JWT_AUTH_MAX_FRESH_S = 60 * 60 + 60 # At most signed one hour, accounting for clock skew + # Signed registry grant token expiration in seconds + SIGNED_GRANT_EXPIRATION_SEC = 60 * 60 * 24 # One day to complete a push/pull - # The URL endpoint to which we redirect OAuth when generating a token locally. - LOCAL_OAUTH_HANDLER = '/oauth/localapp' + # Registry v2 JWT Auth config + REGISTRY_JWT_AUTH_MAX_FRESH_S = ( + 60 * 60 + 60 + ) # At most signed one hour, accounting for clock skew - # The various avatar background colors. - AVATAR_KIND = 'local' + # The URL endpoint to which we redirect OAuth when generating a token locally. + LOCAL_OAUTH_HANDLER = "/oauth/localapp" - # Custom branding - BRANDING = { - 'logo': '/static/img/quay-horizontal-color.svg', - 'footer_img': None, - 'footer_url': None, - } + # The various avatar background colors. + AVATAR_KIND = "local" - # How often the Garbage Collection worker runs. - GARBAGE_COLLECTION_FREQUENCY = 30 # seconds + # Custom branding + BRANDING = { + "logo": "/static/img/quay-horizontal-color.svg", + "footer_img": None, + "footer_url": None, + } - # How long notifications will try to send before timing out. - NOTIFICATION_SEND_TIMEOUT = 10 + # How often the Garbage Collection worker runs. + GARBAGE_COLLECTION_FREQUENCY = 30 # seconds - # Security scanner - FEATURE_SECURITY_SCANNER = False - FEATURE_SECURITY_NOTIFICATIONS = False + # How long notifications will try to send before timing out. + NOTIFICATION_SEND_TIMEOUT = 10 - # The endpoint for the security scanner. - SECURITY_SCANNER_ENDPOINT = 'http://192.168.99.101:6060' + # Security scanner + FEATURE_SECURITY_SCANNER = False + FEATURE_SECURITY_NOTIFICATIONS = False - # The number of seconds between indexing intervals in the security scanner - SECURITY_SCANNER_INDEXING_INTERVAL = 30 + # The endpoint for the security scanner. + SECURITY_SCANNER_ENDPOINT = "http://192.168.99.101:6060" - # If specified, the security scanner will only index images newer than the provided ID. - SECURITY_SCANNER_INDEXING_MIN_ID = None + # The number of seconds between indexing intervals in the security scanner + SECURITY_SCANNER_INDEXING_INTERVAL = 30 - # If specified, the endpoint to be used for all POST calls to the security scanner. - SECURITY_SCANNER_ENDPOINT_BATCH = None + # If specified, the security scanner will only index images newer than the provided ID. + SECURITY_SCANNER_INDEXING_MIN_ID = None - # If specified, GET requests that return non-200 will be retried at the following instances. - SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS = [] + # If specified, the endpoint to be used for all POST calls to the security scanner. + SECURITY_SCANNER_ENDPOINT_BATCH = None - # The indexing engine version running inside the security scanner. - SECURITY_SCANNER_ENGINE_VERSION_TARGET = 3 + # If specified, GET requests that return non-200 will be retried at the following instances. + SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS = [] - # The version of the API to use for the security scanner. - SECURITY_SCANNER_API_VERSION = 'v1' + # The indexing engine version running inside the security scanner. + SECURITY_SCANNER_ENGINE_VERSION_TARGET = 3 - # API call timeout for the security scanner. - SECURITY_SCANNER_API_TIMEOUT_SECONDS = 10 + # The version of the API to use for the security scanner. + SECURITY_SCANNER_API_VERSION = "v1" - # POST call timeout for the security scanner. - SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS = 480 + # API call timeout for the security scanner. + SECURITY_SCANNER_API_TIMEOUT_SECONDS = 10 - # The issuer name for the security scanner. - SECURITY_SCANNER_ISSUER_NAME = 'security_scanner' + # POST call timeout for the security scanner. + SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS = 480 - # Repository mirror - FEATURE_REPO_MIRROR = False + # The issuer name for the security scanner. + SECURITY_SCANNER_ISSUER_NAME = "security_scanner" - # The number of seconds between indexing intervals in the repository mirror - REPO_MIRROR_INTERVAL = 30 + # Repository mirror + FEATURE_REPO_MIRROR = False - # Require HTTPS and verify certificates of Quay registry during mirror. - REPO_MIRROR_TLS_VERIFY = True + # The number of seconds between indexing intervals in the repository mirror + REPO_MIRROR_INTERVAL = 30 - # Replaces the SERVER_HOSTNAME as the destination for mirroring. - REPO_MIRROR_SERVER_HOSTNAME = None + # Require HTTPS and verify certificates of Quay registry during mirror. + REPO_MIRROR_TLS_VERIFY = True - # JWTProxy Settings - # The address (sans schema) to proxy outgoing requests through the jwtproxy - # to be signed - JWTPROXY_SIGNER = 'localhost:8081' + # Replaces the SERVER_HOSTNAME as the destination for mirroring. + REPO_MIRROR_SERVER_HOSTNAME = None - # The audience that jwtproxy should verify on incoming requests - # If None, will be calculated off of the SERVER_HOSTNAME (default) - JWTPROXY_AUDIENCE = None + # JWTProxy Settings + # The address (sans schema) to proxy outgoing requests through the jwtproxy + # to be signed + JWTPROXY_SIGNER = "localhost:8081" - # Torrent management flags - FEATURE_BITTORRENT = False - BITTORRENT_PIECE_SIZE = 512 * 1024 - BITTORRENT_ANNOUNCE_URL = 'https://localhost:6881/announce' - BITTORRENT_FILENAME_PEPPER = str(uuid4()) - BITTORRENT_WEBSEED_LIFETIME = 3600 + # The audience that jwtproxy should verify on incoming requests + # If None, will be calculated off of the SERVER_HOSTNAME (default) + JWTPROXY_AUDIENCE = None - # "Secret" key for generating encrypted paging tokens. Only needed to be secret to - # hide the ID range for production (in which this value is overridden). Should *not* - # be relied upon for secure encryption otherwise. - # This value is a Fernet key and should be 32bytes URL-safe base64 encoded. - PAGE_TOKEN_KEY = '0OYrc16oBuksR8T3JGB-xxYSlZ2-7I_zzqrLzggBJ58=' + # Torrent management flags + FEATURE_BITTORRENT = False + BITTORRENT_PIECE_SIZE = 512 * 1024 + BITTORRENT_ANNOUNCE_URL = "https://localhost:6881/announce" + BITTORRENT_FILENAME_PEPPER = str(uuid4()) + BITTORRENT_WEBSEED_LIFETIME = 3600 - # The timeout for service key approval. - UNAPPROVED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 # One day + # "Secret" key for generating encrypted paging tokens. Only needed to be secret to + # hide the ID range for production (in which this value is overridden). Should *not* + # be relied upon for secure encryption otherwise. + # This value is a Fernet key and should be 32bytes URL-safe base64 encoded. + PAGE_TOKEN_KEY = "0OYrc16oBuksR8T3JGB-xxYSlZ2-7I_zzqrLzggBJ58=" - # How long to wait before GCing an expired service key. - EXPIRED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 * 7 # One week + # The timeout for service key approval. + UNAPPROVED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 # One day - # The ID of the user account in the database to be used for service audit logs. If none, the - # lowest user in the database will be used. - SERVICE_LOG_ACCOUNT_ID = None + # How long to wait before GCing an expired service key. + EXPIRED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 * 7 # One week - # The service key ID for the instance service. - # NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated. - INSTANCE_SERVICE_KEY_SERVICE = 'quay' + # The ID of the user account in the database to be used for service audit logs. If none, the + # lowest user in the database will be used. + SERVICE_LOG_ACCOUNT_ID = None - # The location of the key ID file generated for this instance. - INSTANCE_SERVICE_KEY_KID_LOCATION = os.path.join(CONF_DIR, 'quay.kid') + # The service key ID for the instance service. + # NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated. + INSTANCE_SERVICE_KEY_SERVICE = "quay" - # The location of the private key generated for this instance. - # NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated. - INSTANCE_SERVICE_KEY_LOCATION = os.path.join(CONF_DIR, 'quay.pem') + # The location of the key ID file generated for this instance. + INSTANCE_SERVICE_KEY_KID_LOCATION = os.path.join(CONF_DIR, "quay.kid") - # This instance's service key expiration in minutes. - INSTANCE_SERVICE_KEY_EXPIRATION = 120 + # The location of the private key generated for this instance. + # NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated. + INSTANCE_SERVICE_KEY_LOCATION = os.path.join(CONF_DIR, "quay.pem") - # Number of minutes between expiration refresh in minutes. Should be the expiration / 2 minus - # some additional window time. - INSTANCE_SERVICE_KEY_REFRESH = 55 + # This instance's service key expiration in minutes. + INSTANCE_SERVICE_KEY_EXPIRATION = 120 - # The whitelist of client IDs for OAuth applications that allow for direct login. - DIRECT_OAUTH_CLIENTID_WHITELIST = [] + # Number of minutes between expiration refresh in minutes. Should be the expiration / 2 minus + # some additional window time. + INSTANCE_SERVICE_KEY_REFRESH = 55 - # URL that specifies the location of the prometheus stats aggregator. - PROMETHEUS_AGGREGATOR_URL = 'http://localhost:9092' + # The whitelist of client IDs for OAuth applications that allow for direct login. + DIRECT_OAUTH_CLIENTID_WHITELIST = [] - # Namespace prefix for all prometheus metrics. - PROMETHEUS_NAMESPACE = 'quay' + # URL that specifies the location of the prometheus stats aggregator. + PROMETHEUS_AGGREGATOR_URL = "http://localhost:9092" - # Overridable list of reverse DNS prefixes that are reserved for internal use on labels. - LABEL_KEY_RESERVED_PREFIXES = [] + # Namespace prefix for all prometheus metrics. + PROMETHEUS_NAMESPACE = "quay" - # Delays workers from starting until a random point in time between 0 and their regular interval. - STAGGER_WORKERS = True + # Overridable list of reverse DNS prefixes that are reserved for internal use on labels. + LABEL_KEY_RESERVED_PREFIXES = [] - # Location of the static marketing site. - STATIC_SITE_BUCKET = None + # Delays workers from starting until a random point in time between 0 and their regular interval. + STAGGER_WORKERS = True - # Site key and secret key for using recaptcha. - FEATURE_RECAPTCHA = False - RECAPTCHA_SITE_KEY = None - RECAPTCHA_SECRET_KEY = None + # Location of the static marketing site. + STATIC_SITE_BUCKET = None - # Server where TUF metadata can be found - TUF_SERVER = None + # Site key and secret key for using recaptcha. + FEATURE_RECAPTCHA = False + RECAPTCHA_SITE_KEY = None + RECAPTCHA_SECRET_KEY = None - # Prefix to add to metadata e.g. // - TUF_GUN_PREFIX = None + # Server where TUF metadata can be found + TUF_SERVER = None - # Maximum size allowed for layers in the registry. - MAXIMUM_LAYER_SIZE = '20G' + # Prefix to add to metadata e.g. // + TUF_GUN_PREFIX = None - # Feature Flag: Whether team syncing from the backing auth is enabled. - FEATURE_TEAM_SYNCING = False - TEAM_RESYNC_STALE_TIME = '30m' - TEAM_SYNC_WORKER_FREQUENCY = 60 # seconds + # Maximum size allowed for layers in the registry. + MAXIMUM_LAYER_SIZE = "20G" - # Feature Flag: If enabled, non-superusers can setup team syncing. - FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP = False + # Feature Flag: Whether team syncing from the backing auth is enabled. + FEATURE_TEAM_SYNCING = False + TEAM_RESYNC_STALE_TIME = "30m" + TEAM_SYNC_WORKER_FREQUENCY = 60 # seconds - # The default configurable tag expiration time for time machine. - DEFAULT_TAG_EXPIRATION = '2w' + # Feature Flag: If enabled, non-superusers can setup team syncing. + FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP = False - # The options to present in namespace settings for the tag expiration. If empty, no option - # will be given and the default will be displayed read-only. - TAG_EXPIRATION_OPTIONS = ['0s', '1d', '1w', '2w', '4w'] + # The default configurable tag expiration time for time machine. + DEFAULT_TAG_EXPIRATION = "2w" - # Feature Flag: Whether users can view and change their tag expiration. - FEATURE_CHANGE_TAG_EXPIRATION = True + # The options to present in namespace settings for the tag expiration. If empty, no option + # will be given and the default will be displayed read-only. + TAG_EXPIRATION_OPTIONS = ["0s", "1d", "1w", "2w", "4w"] - # Defines a secret for enabling the health-check endpoint's debug information. - ENABLE_HEALTH_DEBUG_SECRET = None + # Feature Flag: Whether users can view and change their tag expiration. + FEATURE_CHANGE_TAG_EXPIRATION = True - # The lifetime for a user recovery token before it becomes invalid. - USER_RECOVERY_TOKEN_LIFETIME = '30m' + # Defines a secret for enabling the health-check endpoint's debug information. + ENABLE_HEALTH_DEBUG_SECRET = None - # If specified, when app specific passwords expire by default. - APP_SPECIFIC_TOKEN_EXPIRATION = None + # The lifetime for a user recovery token before it becomes invalid. + USER_RECOVERY_TOKEN_LIFETIME = "30m" - # Feature Flag: If enabled, users can create and use app specific tokens to login via the CLI. - FEATURE_APP_SPECIFIC_TOKENS = True + # If specified, when app specific passwords expire by default. + APP_SPECIFIC_TOKEN_EXPIRATION = None - # How long expired app specific tokens should remain visible to users before being automatically - # deleted. Set to None to turn off garbage collection. - EXPIRED_APP_SPECIFIC_TOKEN_GC = '1d' + # Feature Flag: If enabled, users can create and use app specific tokens to login via the CLI. + FEATURE_APP_SPECIFIC_TOKENS = True - # The size of pages returned by the Docker V2 API. - V2_PAGINATION_SIZE = 50 + # How long expired app specific tokens should remain visible to users before being automatically + # deleted. Set to None to turn off garbage collection. + EXPIRED_APP_SPECIFIC_TOKEN_GC = "1d" - # If enabled, ensures that API calls are made with the X-Requested-With header - # when called from a browser. - BROWSER_API_CALLS_XHR_ONLY = True + # The size of pages returned by the Docker V2 API. + V2_PAGINATION_SIZE = 50 - # If set to a non-None integer value, the default number of maximum builds for a namespace. - DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT = None + # If enabled, ensures that API calls are made with the X-Requested-With header + # when called from a browser. + BROWSER_API_CALLS_XHR_ONLY = True - # If set to a non-None integer value, the default number of maximum builds for a namespace whose - # creator IP is deemed a threat. - THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT = None + # If set to a non-None integer value, the default number of maximum builds for a namespace. + DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT = None - # The API Key to use when requesting IP information. - IP_DATA_API_KEY = None + # If set to a non-None integer value, the default number of maximum builds for a namespace whose + # creator IP is deemed a threat. + THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT = None - # For Billing Support Only: The number of allowed builds on a namespace that has been billed - # successfully. - BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT = None + # The API Key to use when requesting IP information. + IP_DATA_API_KEY = None - # Configuration for the data model cache. - DATA_MODEL_CACHE_CONFIG = { - 'engine': 'memcached', - 'endpoint': ('127.0.0.1', 18080), - } + # For Billing Support Only: The number of allowed builds on a namespace that has been billed + # successfully. + BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT = None - # Defines the number of successive failures of a build trigger's build before the trigger is - # automatically disabled. - SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD = 100 + # Configuration for the data model cache. + DATA_MODEL_CACHE_CONFIG = {"engine": "memcached", "endpoint": ("127.0.0.1", 18080)} - # Defines the number of successive internal errors of a build trigger's build before the - # trigger is automatically disabled. - SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD = 5 + # Defines the number of successive failures of a build trigger's build before the trigger is + # automatically disabled. + SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD = 100 - # Defines the delay required (in seconds) before the last_accessed field of a user/robot or access - # token will be updated after the previous update. - LAST_ACCESSED_UPDATE_THRESHOLD_S = 60 + # Defines the number of successive internal errors of a build trigger's build before the + # trigger is automatically disabled. + SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD = 5 - # Defines the number of results per page used to show search results - SEARCH_RESULTS_PER_PAGE = 10 + # Defines the delay required (in seconds) before the last_accessed field of a user/robot or access + # token will be updated after the previous update. + LAST_ACCESSED_UPDATE_THRESHOLD_S = 60 - # Defines the maximum number of pages the user can paginate before they are limited - SEARCH_MAX_RESULT_PAGE_COUNT = 10 + # Defines the number of results per page used to show search results + SEARCH_RESULTS_PER_PAGE = 10 - # Feature Flag: Whether to record when users were last accessed. - FEATURE_USER_LAST_ACCESSED = True + # Defines the maximum number of pages the user can paginate before they are limited + SEARCH_MAX_RESULT_PAGE_COUNT = 10 - # Feature Flag: Whether to allow users to retrieve aggregated log counts. - FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL = True + # Feature Flag: Whether to record when users were last accessed. + FEATURE_USER_LAST_ACCESSED = True - # Feature Flag: Whether rate limiting is enabled. - FEATURE_RATE_LIMITS = False + # Feature Flag: Whether to allow users to retrieve aggregated log counts. + FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL = True - # Feature Flag: Whether to support log exporting. - FEATURE_LOG_EXPORT = True + # Feature Flag: Whether rate limiting is enabled. + FEATURE_RATE_LIMITS = False - # Maximum number of action logs pages that can be returned via the API. - ACTION_LOG_MAX_PAGE = None + # Feature Flag: Whether to support log exporting. + FEATURE_LOG_EXPORT = True - # Log model - LOGS_MODEL = 'database' - LOGS_MODEL_CONFIG = {} + # Maximum number of action logs pages that can be returned via the API. + ACTION_LOG_MAX_PAGE = None - # Namespace in which all audit logging is disabled. - DISABLED_FOR_AUDIT_LOGS = [] + # Log model + LOGS_MODEL = "database" + LOGS_MODEL_CONFIG = {} - # Namespace in which pull audit logging is disabled. - DISABLED_FOR_PULL_LOGS = [] + # Namespace in which all audit logging is disabled. + DISABLED_FOR_AUDIT_LOGS = [] - # Feature Flag: Whether pull logs are disabled for free namespace. - FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES = False + # Namespace in which pull audit logging is disabled. + DISABLED_FOR_PULL_LOGS = [] - # Feature Flag: If set to true, no account using blacklisted email addresses will be allowed - # to be created. - FEATURE_BLACKLISTED_EMAILS = False + # Feature Flag: Whether pull logs are disabled for free namespace. + FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES = False - # The list of domains, including subdomains, for which any *new* User with a matching - # email address will be denied creation. This option is only used if - # FEATURE_BLACKLISTED_EMAILS is enabled. - BLACKLISTED_EMAIL_DOMAINS = [] + # Feature Flag: If set to true, no account using blacklisted email addresses will be allowed + # to be created. + FEATURE_BLACKLISTED_EMAILS = False - # Feature Flag: Whether garbage collection is enabled. - FEATURE_GARBAGE_COLLECTION = True + # The list of domains, including subdomains, for which any *new* User with a matching + # email address will be denied creation. This option is only used if + # FEATURE_BLACKLISTED_EMAILS is enabled. + BLACKLISTED_EMAIL_DOMAINS = [] + + # Feature Flag: Whether garbage collection is enabled. + FEATURE_GARBAGE_COLLECTION = True diff --git a/config_app/_init_config.py b/config_app/_init_config.py index 8b0533570..bd2eb5826 100644 --- a/config_app/_init_config.py +++ b/config_app/_init_config.py @@ -7,31 +7,31 @@ import subprocess ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/")) -STATIC_DIR = os.path.join(ROOT_DIR, 'static/') -STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/') -STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/') -TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/') -IS_KUBERNETES = 'KUBERNETES_SERVICE_HOST' in os.environ +STATIC_DIR = os.path.join(ROOT_DIR, "static/") +STATIC_LDN_DIR = os.path.join(STATIC_DIR, "ldn/") +STATIC_FONTS_DIR = os.path.join(STATIC_DIR, "fonts/") +TEMPLATE_DIR = os.path.join(ROOT_DIR, "templates/") +IS_KUBERNETES = "KUBERNETES_SERVICE_HOST" in os.environ def _get_version_number_changelog(): - try: - with open(os.path.join(ROOT_DIR, 'CHANGELOG.md')) as f: - return re.search(r'(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0) - except IOError: - return '' + try: + with open(os.path.join(ROOT_DIR, "CHANGELOG.md")) as f: + return re.search(r"(v[0-9]+\.[0-9]+\.[0-9]+)", f.readline()).group(0) + except IOError: + return "" def _get_git_sha(): - if os.path.exists("GIT_HEAD"): - with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f: - return f.read() - else: - try: - return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8] - except (OSError, subprocess.CalledProcessError): - pass - return "unknown" + if os.path.exists("GIT_HEAD"): + with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f: + return f.read() + else: + try: + return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8] + except (OSError, subprocess.CalledProcessError): + pass + return "unknown" __version__ = _get_version_number_changelog() diff --git a/config_app/c_app.py b/config_app/c_app.py index 0df198dd1..38847d6c9 100644 --- a/config_app/c_app.py +++ b/config_app/c_app.py @@ -15,28 +15,29 @@ app = Flask(__name__) logger = logging.getLogger(__name__) -OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, 'config_app/conf/stack') -INIT_SCRIPTS_LOCATION = '/conf/init/' +OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, "config_app/conf/stack") +INIT_SCRIPTS_LOCATION = "/conf/init/" -is_testing = 'TEST' in os.environ +is_testing = "TEST" in os.environ is_kubernetes = IS_KUBERNETES -logger.debug('Configuration is on a kubernetes deployment: %s' % IS_KUBERNETES) +logger.debug("Configuration is on a kubernetes deployment: %s" % IS_KUBERNETES) -config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', - testing=is_testing) +config_provider = get_config_provider( + OVERRIDE_CONFIG_DIRECTORY, "config.yaml", "config.py", testing=is_testing +) if is_testing: - from test.testconfig import TestConfig + from test.testconfig import TestConfig - logger.debug('Loading test config.') - app.config.from_object(TestConfig()) + logger.debug("Loading test config.") + app.config.from_object(TestConfig()) else: - from config import DefaultConfig + from config import DefaultConfig - logger.debug('Loading default config.') - app.config.from_object(DefaultConfig()) - app.teardown_request(database.close_db_filter) + logger.debug("Loading default config.") + app.config.from_object(DefaultConfig()) + app.teardown_request(database.close_db_filter) # Load the override config via the provider. config_provider.update_app_config(app.config) diff --git a/config_app/conf/gunicorn_local.py b/config_app/conf/gunicorn_local.py index d0ea0a758..add20b457 100644 --- a/config_app/conf/gunicorn_local.py +++ b/config_app/conf/gunicorn_local.py @@ -1,5 +1,6 @@ import sys import os + sys.path.append(os.path.join(os.path.dirname(__file__), "../")) import logging @@ -9,18 +10,24 @@ from config_app.config_util.log import logfile_path logconfig = logfile_path(debug=True) -bind = '0.0.0.0:5000' +bind = "0.0.0.0:5000" workers = 1 -worker_class = 'gevent' +worker_class = "gevent" daemon = False -pythonpath = '.' +pythonpath = "." preload_app = True + def post_fork(server, worker): - # Reset the Random library to ensure it won't raise the "PID check failed." error after - # gunicorn forks. - Random.atfork() + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + def when_ready(server): - logger = logging.getLogger(__name__) - logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class) + logger = logging.getLogger(__name__) + logger.debug( + "Starting local gunicorn with %s workers and %s worker class", + workers, + worker_class, + ) diff --git a/config_app/conf/gunicorn_web.py b/config_app/conf/gunicorn_web.py index 14225fe72..107d8c395 100644 --- a/config_app/conf/gunicorn_web.py +++ b/config_app/conf/gunicorn_web.py @@ -1,5 +1,6 @@ import sys import os + sys.path.append(os.path.join(os.path.dirname(__file__), "../")) import logging @@ -10,17 +11,23 @@ from config_app.config_util.log import logfile_path logconfig = logfile_path(debug=True) -bind = 'unix:/tmp/gunicorn_web.sock' +bind = "unix:/tmp/gunicorn_web.sock" workers = 1 -worker_class = 'gevent' -pythonpath = '.' +worker_class = "gevent" +pythonpath = "." preload_app = True + def post_fork(server, worker): - # Reset the Random library to ensure it won't raise the "PID check failed." error after - # gunicorn forks. - Random.atfork() + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + def when_ready(server): - logger = logging.getLogger(__name__) - logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class) + logger = logging.getLogger(__name__) + logger.debug( + "Starting local gunicorn with %s workers and %s worker class", + workers, + worker_class, + ) diff --git a/config_app/config_application.py b/config_app/config_application.py index 43676e354..a6e5d9fa3 100644 --- a/config_app/config_application.py +++ b/config_app/config_application.py @@ -3,6 +3,6 @@ from config_app.c_app import app as application # Bind all of the blueprints import config_web -if __name__ == '__main__': - logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) - application.run(port=5000, debug=True, threaded=True, host='0.0.0.0') +if __name__ == "__main__": + logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) + application.run(port=5000, debug=True, threaded=True, host="0.0.0.0") diff --git a/config_app/config_endpoints/api/__init__.py b/config_app/config_endpoints/api/__init__.py index c80fc1c9c..0620fed63 100644 --- a/config_app/config_endpoints/api/__init__.py +++ b/config_app/config_endpoints/api/__init__.py @@ -13,141 +13,153 @@ from config_app.c_app import app, IS_KUBERNETES from config_app.config_endpoints.exception import InvalidResponse, InvalidRequest logger = logging.getLogger(__name__) -api_bp = Blueprint('api', __name__) +api_bp = Blueprint("api", __name__) -CROSS_DOMAIN_HEADERS = ['Authorization', 'Content-Type', 'X-Requested-With'] +CROSS_DOMAIN_HEADERS = ["Authorization", "Content-Type", "X-Requested-With"] class ApiExceptionHandlingApi(Api): - pass + pass - @crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS) - def handle_error(self, error): - return super(ApiExceptionHandlingApi, self).handle_error(error) + @crossdomain(origin="*", headers=CROSS_DOMAIN_HEADERS) + def handle_error(self, error): + return super(ApiExceptionHandlingApi, self).handle_error(error) api = ApiExceptionHandlingApi() api.init_app(api_bp) + def log_action(kind, user_or_orgname, metadata=None, repo=None, repo_name=None): - if not metadata: - metadata = {} + if not metadata: + metadata = {} - if repo: - repo_name = repo.name + if repo: + repo_name = repo.name + + model.log.log_action( + kind, user_or_orgname, repo_name, user_or_orgname, request.remote_addr, metadata + ) - model.log.log_action(kind, user_or_orgname, repo_name, user_or_orgname, request.remote_addr, metadata) def format_date(date): - """ Output an RFC822 date format. """ - if date is None: - return None - return formatdate(timegm(date.utctimetuple())) - + """ Output an RFC822 date format. """ + if date is None: + return None + return formatdate(timegm(date.utctimetuple())) def resource(*urls, **kwargs): - def wrapper(api_resource): - if not api_resource: - return None + def wrapper(api_resource): + if not api_resource: + return None - api_resource.registered = True - api.add_resource(api_resource, *urls, **kwargs) - return api_resource + api_resource.registered = True + api.add_resource(api_resource, *urls, **kwargs) + return api_resource - return wrapper + return wrapper class ApiResource(Resource): - registered = False - method_decorators = [] + registered = False + method_decorators = [] - def options(self): - return None, 200 + def options(self): + return None, 200 def add_method_metadata(name, value): - def modifier(func): - if func is None: - return None + def modifier(func): + if func is None: + return None - if '__api_metadata' not in dir(func): - func.__api_metadata = {} - func.__api_metadata[name] = value - return func + if "__api_metadata" not in dir(func): + func.__api_metadata = {} + func.__api_metadata[name] = value + return func - return modifier + return modifier def method_metadata(func, name): - if func is None: - return None + if func is None: + return None - if '__api_metadata' in dir(func): - return func.__api_metadata.get(name, None) - return None + if "__api_metadata" in dir(func): + return func.__api_metadata.get(name, None) + return None def no_cache(f): - @wraps(f) - def add_no_cache(*args, **kwargs): - response = f(*args, **kwargs) - if response is not None: - response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate' - return response - return add_no_cache + @wraps(f) + def add_no_cache(*args, **kwargs): + response = f(*args, **kwargs) + if response is not None: + response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" + return response + + return add_no_cache def define_json_response(schema_name): - def wrapper(func): - @add_method_metadata('response_schema', schema_name) - @wraps(func) - def wrapped(self, *args, **kwargs): - schema = self.schemas[schema_name] - resp = func(self, *args, **kwargs) + def wrapper(func): + @add_method_metadata("response_schema", schema_name) + @wraps(func) + def wrapped(self, *args, **kwargs): + schema = self.schemas[schema_name] + resp = func(self, *args, **kwargs) - if app.config['TESTING']: - try: - validate(resp, schema) - except ValidationError as ex: - raise InvalidResponse(ex.message) + if app.config["TESTING"]: + try: + validate(resp, schema) + except ValidationError as ex: + raise InvalidResponse(ex.message) - return resp - return wrapped - return wrapper + return resp + + return wrapped + + return wrapper def validate_json_request(schema_name, optional=False): - def wrapper(func): - @add_method_metadata('request_schema', schema_name) - @wraps(func) - def wrapped(self, *args, **kwargs): - schema = self.schemas[schema_name] - try: - json_data = request.get_json() - if json_data is None: - if not optional: - raise InvalidRequest('Missing JSON body') - else: - validate(json_data, schema) - return func(self, *args, **kwargs) - except ValidationError as ex: - raise InvalidRequest(ex.message) - return wrapped - return wrapper + def wrapper(func): + @add_method_metadata("request_schema", schema_name) + @wraps(func) + def wrapped(self, *args, **kwargs): + schema = self.schemas[schema_name] + try: + json_data = request.get_json() + if json_data is None: + if not optional: + raise InvalidRequest("Missing JSON body") + else: + validate(json_data, schema) + return func(self, *args, **kwargs) + except ValidationError as ex: + raise InvalidRequest(ex.message) + + return wrapped + + return wrapper + def kubernetes_only(f): - """ Aborts the request with a 400 if the app is not running on kubernetes """ - @wraps(f) - def abort_if_not_kube(*args, **kwargs): - if not IS_KUBERNETES: - abort(400) + """ Aborts the request with a 400 if the app is not running on kubernetes """ - return f(*args, **kwargs) - return abort_if_not_kube + @wraps(f) + def abort_if_not_kube(*args, **kwargs): + if not IS_KUBERNETES: + abort(400) -nickname = partial(add_method_metadata, 'nickname') + return f(*args, **kwargs) + + return abort_if_not_kube + + +nickname = partial(add_method_metadata, "nickname") import config_app.config_endpoints.api.discovery diff --git a/config_app/config_endpoints/api/discovery.py b/config_app/config_endpoints/api/discovery.py index 183963ea3..eef5536e5 100644 --- a/config_app/config_endpoints/api/discovery.py +++ b/config_app/config_endpoints/api/discovery.py @@ -5,250 +5,253 @@ from collections import OrderedDict from config_app.c_app import app from config_app.config_endpoints.api import method_metadata -from config_app.config_endpoints.common import fully_qualified_name, PARAM_REGEX, TYPE_CONVERTER +from config_app.config_endpoints.common import ( + fully_qualified_name, + PARAM_REGEX, + TYPE_CONVERTER, +) logger = logging.getLogger(__name__) def generate_route_data(): - include_internal = True - compact = True + include_internal = True + compact = True - def swagger_parameter(name, description, kind='path', param_type='string', required=True, - enum=None, schema=None): - # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject - parameter_info = { - 'name': name, - 'in': kind, - 'required': required - } + def swagger_parameter( + name, + description, + kind="path", + param_type="string", + required=True, + enum=None, + schema=None, + ): + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject + parameter_info = {"name": name, "in": kind, "required": required} - if schema: - parameter_info['schema'] = { - '$ref': '#/definitions/%s' % schema - } - else: - parameter_info['type'] = param_type - - if enum is not None and len(list(enum)) > 0: - parameter_info['enum'] = list(enum) - - return parameter_info - - paths = {} - models = {} - tags = [] - tags_added = set() - operation_ids = set() - - for rule in app.url_map.iter_rules(): - endpoint_method = app.view_functions[rule.endpoint] - - # Verify that we have a view class for this API method. - if not 'view_class' in dir(endpoint_method): - continue - - view_class = endpoint_method.view_class - - # Hide the class if it is internal. - internal = method_metadata(view_class, 'internal') - if not include_internal and internal: - continue - - # Build the tag. - parts = fully_qualified_name(view_class).split('.') - tag_name = parts[-2] - if not tag_name in tags_added: - tags_added.add(tag_name) - tags.append({ - 'name': tag_name, - 'description': (sys.modules[view_class.__module__].__doc__ or '').strip() - }) - - # Build the Swagger data for the path. - swagger_path = PARAM_REGEX.sub(r'{\2}', rule.rule) - full_name = fully_qualified_name(view_class) - path_swagger = { - 'x-name': full_name, - 'x-path': swagger_path, - 'x-tag': tag_name - } - - related_user_res = method_metadata(view_class, 'related_user_resource') - if related_user_res is not None: - path_swagger['x-user-related'] = fully_qualified_name(related_user_res) - - paths[swagger_path] = path_swagger - - # Add any global path parameters. - param_data_map = view_class.__api_path_params if '__api_path_params' in dir( - view_class) else {} - if param_data_map: - path_parameters_swagger = [] - for path_parameter in param_data_map: - description = param_data_map[path_parameter].get('description') - path_parameters_swagger.append(swagger_parameter(path_parameter, description)) - - path_swagger['parameters'] = path_parameters_swagger - - # Add the individual HTTP operations. - method_names = list(rule.methods.difference(['HEAD', 'OPTIONS'])) - for method_name in method_names: - # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object - method = getattr(view_class, method_name.lower(), None) - if method is None: - logger.debug('Unable to find method for %s in class %s', method_name, view_class) - continue - - operationId = method_metadata(method, 'nickname') - operation_swagger = { - 'operationId': operationId, - 'parameters': [], - } - - if operationId is None: - continue - - if operationId in operation_ids: - raise Exception('Duplicate operation Id: %s' % operationId) - - operation_ids.add(operationId) - - # Mark the method as internal. - internal = method_metadata(method, 'internal') - if internal is not None: - operation_swagger['x-internal'] = True - - if include_internal: - requires_fresh_login = method_metadata(method, 'requires_fresh_login') - if requires_fresh_login is not None: - operation_swagger['x-requires-fresh-login'] = True - - # Add the path parameters. - if rule.arguments: - for path_parameter in rule.arguments: - description = param_data_map.get(path_parameter, {}).get('description') - operation_swagger['parameters'].append( - swagger_parameter(path_parameter, description)) - - # Add the query parameters. - if '__api_query_params' in dir(method): - for query_parameter_info in method.__api_query_params: - name = query_parameter_info['name'] - description = query_parameter_info['help'] - param_type = TYPE_CONVERTER[query_parameter_info['type']] - required = query_parameter_info['required'] - - operation_swagger['parameters'].append( - swagger_parameter(name, description, kind='query', - param_type=param_type, - required=required, - enum=query_parameter_info['choices'])) - - # Add the OAuth security block. - # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject - scope = method_metadata(method, 'oauth2_scope') - if scope and not compact: - operation_swagger['security'] = [{'oauth2_implicit': [scope.scope]}] - - # Add the responses block. - # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject - response_schema_name = method_metadata(method, 'response_schema') - if not compact: - if response_schema_name: - models[response_schema_name] = view_class.schemas[response_schema_name] - - models['ApiError'] = { - 'type': 'object', - 'properties': { - 'status': { - 'type': 'integer', - 'description': 'Status code of the response.' - }, - 'type': { - 'type': 'string', - 'description': 'Reference to the type of the error.' - }, - 'detail': { - 'type': 'string', - 'description': 'Details about the specific instance of the error.' - }, - 'title': { - 'type': 'string', - 'description': 'Unique error code to identify the type of error.' - }, - 'error_message': { - 'type': 'string', - 'description': 'Deprecated; alias for detail' - }, - 'error_type': { - 'type': 'string', - 'description': 'Deprecated; alias for detail' - } - }, - 'required': [ - 'status', - 'type', - 'title', - ] - } - - responses = { - '400': { - 'description': 'Bad Request', - }, - - '401': { - 'description': 'Session required', - }, - - '403': { - 'description': 'Unauthorized access', - }, - - '404': { - 'description': 'Not found', - }, - } - - for _, body in responses.items(): - body['schema'] = {'$ref': '#/definitions/ApiError'} - - if method_name == 'DELETE': - responses['204'] = { - 'description': 'Deleted' - } - elif method_name == 'POST': - responses['201'] = { - 'description': 'Successful creation' - } + if schema: + parameter_info["schema"] = {"$ref": "#/definitions/%s" % schema} else: - responses['200'] = { - 'description': 'Successful invocation' - } + parameter_info["type"] = param_type - if response_schema_name: - responses['200']['schema'] = { - '$ref': '#/definitions/%s' % response_schema_name - } + if enum is not None and len(list(enum)) > 0: + parameter_info["enum"] = list(enum) - operation_swagger['responses'] = responses + return parameter_info - # Add the request block. - request_schema_name = method_metadata(method, 'request_schema') - if request_schema_name and not compact: - models[request_schema_name] = view_class.schemas[request_schema_name] + paths = {} + models = {} + tags = [] + tags_added = set() + operation_ids = set() - operation_swagger['parameters'].append( - swagger_parameter('body', 'Request body contents.', kind='body', - schema=request_schema_name)) + for rule in app.url_map.iter_rules(): + endpoint_method = app.view_functions[rule.endpoint] - # Add the operation to the parent path. - if not internal or (internal and include_internal): - path_swagger[method_name.lower()] = operation_swagger + # Verify that we have a view class for this API method. + if not "view_class" in dir(endpoint_method): + continue - tags.sort(key=lambda t: t['name']) - paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]['x-tag'])) + view_class = endpoint_method.view_class - if compact: - return {'paths': paths} + # Hide the class if it is internal. + internal = method_metadata(view_class, "internal") + if not include_internal and internal: + continue + + # Build the tag. + parts = fully_qualified_name(view_class).split(".") + tag_name = parts[-2] + if not tag_name in tags_added: + tags_added.add(tag_name) + tags.append( + { + "name": tag_name, + "description": ( + sys.modules[view_class.__module__].__doc__ or "" + ).strip(), + } + ) + + # Build the Swagger data for the path. + swagger_path = PARAM_REGEX.sub(r"{\2}", rule.rule) + full_name = fully_qualified_name(view_class) + path_swagger = {"x-name": full_name, "x-path": swagger_path, "x-tag": tag_name} + + related_user_res = method_metadata(view_class, "related_user_resource") + if related_user_res is not None: + path_swagger["x-user-related"] = fully_qualified_name(related_user_res) + + paths[swagger_path] = path_swagger + + # Add any global path parameters. + param_data_map = ( + view_class.__api_path_params + if "__api_path_params" in dir(view_class) + else {} + ) + if param_data_map: + path_parameters_swagger = [] + for path_parameter in param_data_map: + description = param_data_map[path_parameter].get("description") + path_parameters_swagger.append( + swagger_parameter(path_parameter, description) + ) + + path_swagger["parameters"] = path_parameters_swagger + + # Add the individual HTTP operations. + method_names = list(rule.methods.difference(["HEAD", "OPTIONS"])) + for method_name in method_names: + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object + method = getattr(view_class, method_name.lower(), None) + if method is None: + logger.debug( + "Unable to find method for %s in class %s", method_name, view_class + ) + continue + + operationId = method_metadata(method, "nickname") + operation_swagger = {"operationId": operationId, "parameters": []} + + if operationId is None: + continue + + if operationId in operation_ids: + raise Exception("Duplicate operation Id: %s" % operationId) + + operation_ids.add(operationId) + + # Mark the method as internal. + internal = method_metadata(method, "internal") + if internal is not None: + operation_swagger["x-internal"] = True + + if include_internal: + requires_fresh_login = method_metadata(method, "requires_fresh_login") + if requires_fresh_login is not None: + operation_swagger["x-requires-fresh-login"] = True + + # Add the path parameters. + if rule.arguments: + for path_parameter in rule.arguments: + description = param_data_map.get(path_parameter, {}).get( + "description" + ) + operation_swagger["parameters"].append( + swagger_parameter(path_parameter, description) + ) + + # Add the query parameters. + if "__api_query_params" in dir(method): + for query_parameter_info in method.__api_query_params: + name = query_parameter_info["name"] + description = query_parameter_info["help"] + param_type = TYPE_CONVERTER[query_parameter_info["type"]] + required = query_parameter_info["required"] + + operation_swagger["parameters"].append( + swagger_parameter( + name, + description, + kind="query", + param_type=param_type, + required=required, + enum=query_parameter_info["choices"], + ) + ) + + # Add the OAuth security block. + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject + scope = method_metadata(method, "oauth2_scope") + if scope and not compact: + operation_swagger["security"] = [{"oauth2_implicit": [scope.scope]}] + + # Add the responses block. + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject + response_schema_name = method_metadata(method, "response_schema") + if not compact: + if response_schema_name: + models[response_schema_name] = view_class.schemas[ + response_schema_name + ] + + models["ApiError"] = { + "type": "object", + "properties": { + "status": { + "type": "integer", + "description": "Status code of the response.", + }, + "type": { + "type": "string", + "description": "Reference to the type of the error.", + }, + "detail": { + "type": "string", + "description": "Details about the specific instance of the error.", + }, + "title": { + "type": "string", + "description": "Unique error code to identify the type of error.", + }, + "error_message": { + "type": "string", + "description": "Deprecated; alias for detail", + }, + "error_type": { + "type": "string", + "description": "Deprecated; alias for detail", + }, + }, + "required": ["status", "type", "title"], + } + + responses = { + "400": {"description": "Bad Request"}, + "401": {"description": "Session required"}, + "403": {"description": "Unauthorized access"}, + "404": {"description": "Not found"}, + } + + for _, body in responses.items(): + body["schema"] = {"$ref": "#/definitions/ApiError"} + + if method_name == "DELETE": + responses["204"] = {"description": "Deleted"} + elif method_name == "POST": + responses["201"] = {"description": "Successful creation"} + else: + responses["200"] = {"description": "Successful invocation"} + + if response_schema_name: + responses["200"]["schema"] = { + "$ref": "#/definitions/%s" % response_schema_name + } + + operation_swagger["responses"] = responses + + # Add the request block. + request_schema_name = method_metadata(method, "request_schema") + if request_schema_name and not compact: + models[request_schema_name] = view_class.schemas[request_schema_name] + + operation_swagger["parameters"].append( + swagger_parameter( + "body", + "Request body contents.", + kind="body", + schema=request_schema_name, + ) + ) + + # Add the operation to the parent path. + if not internal or (internal and include_internal): + path_swagger[method_name.lower()] = operation_swagger + + tags.sort(key=lambda t: t["name"]) + paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]["x-tag"])) + + if compact: + return {"paths": paths} diff --git a/config_app/config_endpoints/api/kube_endpoints.py b/config_app/config_endpoints/api/kube_endpoints.py index a7143412d..b71d2fe61 100644 --- a/config_app/config_endpoints/api/kube_endpoints.py +++ b/config_app/config_endpoints/api/kube_endpoints.py @@ -6,138 +6,152 @@ from config_app.config_util.config import get_config_as_kube_secret from data.database import configure from config_app.c_app import app, config_provider -from config_app.config_endpoints.api import resource, ApiResource, nickname, kubernetes_only, validate_json_request -from config_app.config_util.k8saccessor import KubernetesAccessorSingleton, K8sApiException +from config_app.config_endpoints.api import ( + resource, + ApiResource, + nickname, + kubernetes_only, + validate_json_request, +) +from config_app.config_util.k8saccessor import ( + KubernetesAccessorSingleton, + K8sApiException, +) logger = logging.getLogger(__name__) -@resource('/v1/kubernetes/deployments/') + +@resource("/v1/kubernetes/deployments/") class SuperUserKubernetesDeployment(ApiResource): - """ Resource for the getting the status of Red Hat Quay deployments and cycling them """ - schemas = { - 'ValidateDeploymentNames': { - 'type': 'object', - 'description': 'Validates deployment names for cycling', - 'required': [ - 'deploymentNames' - ], - 'properties': { - 'deploymentNames': { - 'type': 'array', - 'description': 'The names of the deployments to cycle' - }, - }, + """ Resource for the getting the status of Red Hat Quay deployments and cycling them """ + + schemas = { + "ValidateDeploymentNames": { + "type": "object", + "description": "Validates deployment names for cycling", + "required": ["deploymentNames"], + "properties": { + "deploymentNames": { + "type": "array", + "description": "The names of the deployments to cycle", + } + }, + } } - } - @kubernetes_only - @nickname('scGetNumDeployments') - def get(self): - return KubernetesAccessorSingleton.get_instance().get_qe_deployments() + @kubernetes_only + @nickname("scGetNumDeployments") + def get(self): + return KubernetesAccessorSingleton.get_instance().get_qe_deployments() - @kubernetes_only - @validate_json_request('ValidateDeploymentNames') - @nickname('scCycleQEDeployments') - def put(self): - deployment_names = request.get_json()['deploymentNames'] - return KubernetesAccessorSingleton.get_instance().cycle_qe_deployments(deployment_names) + @kubernetes_only + @validate_json_request("ValidateDeploymentNames") + @nickname("scCycleQEDeployments") + def put(self): + deployment_names = request.get_json()["deploymentNames"] + return KubernetesAccessorSingleton.get_instance().cycle_qe_deployments( + deployment_names + ) -@resource('/v1/kubernetes/deployment//status') +@resource("/v1/kubernetes/deployment//status") class QEDeploymentRolloutStatus(ApiResource): - @kubernetes_only - @nickname('scGetDeploymentRolloutStatus') - def get(self, deployment): - deployment_rollout_status = KubernetesAccessorSingleton.get_instance().get_deployment_rollout_status(deployment) - return { - 'status': deployment_rollout_status.status, - 'message': deployment_rollout_status.message, - } + @kubernetes_only + @nickname("scGetDeploymentRolloutStatus") + def get(self, deployment): + deployment_rollout_status = KubernetesAccessorSingleton.get_instance().get_deployment_rollout_status( + deployment + ) + return { + "status": deployment_rollout_status.status, + "message": deployment_rollout_status.message, + } -@resource('/v1/kubernetes/deployments/rollback') +@resource("/v1/kubernetes/deployments/rollback") class QEDeploymentRollback(ApiResource): - """ Resource for rolling back deployments """ - schemas = { - 'ValidateDeploymentNames': { - 'type': 'object', - 'description': 'Validates deployment names for rolling back', - 'required': [ - 'deploymentNames' - ], - 'properties': { - 'deploymentNames': { - 'type': 'array', - 'description': 'The names of the deployments to rollback' - }, - }, - } - } + """ Resource for rolling back deployments """ - @kubernetes_only - @nickname('scRollbackDeployments') - @validate_json_request('ValidateDeploymentNames') - def post(self): - """ + schemas = { + "ValidateDeploymentNames": { + "type": "object", + "description": "Validates deployment names for rolling back", + "required": ["deploymentNames"], + "properties": { + "deploymentNames": { + "type": "array", + "description": "The names of the deployments to rollback", + } + }, + } + } + + @kubernetes_only + @nickname("scRollbackDeployments") + @validate_json_request("ValidateDeploymentNames") + def post(self): + """ Returns the config to its original state and rolls back deployments :return: """ - deployment_names = request.get_json()['deploymentNames'] + deployment_names = request.get_json()["deploymentNames"] - # To roll back a deployment, we must do 2 things: - # 1. Roll back the config secret to its old value (discarding changes we made in this session) - # 2. Trigger a rollback to the previous revision, so that the pods will be restarted with - # the old config - old_secret = get_config_as_kube_secret(config_provider.get_old_config_dir()) - kube_accessor = KubernetesAccessorSingleton.get_instance() - kube_accessor.replace_qe_secret(old_secret) + # To roll back a deployment, we must do 2 things: + # 1. Roll back the config secret to its old value (discarding changes we made in this session) + # 2. Trigger a rollback to the previous revision, so that the pods will be restarted with + # the old config + old_secret = get_config_as_kube_secret(config_provider.get_old_config_dir()) + kube_accessor = KubernetesAccessorSingleton.get_instance() + kube_accessor.replace_qe_secret(old_secret) - try: - for name in deployment_names: - kube_accessor.rollback_deployment(name) - except K8sApiException as e: - logger.exception('Failed to rollback deployment.') - return make_response(e.message, 503) + try: + for name in deployment_names: + kube_accessor.rollback_deployment(name) + except K8sApiException as e: + logger.exception("Failed to rollback deployment.") + return make_response(e.message, 503) - return make_response('Ok', 204) + return make_response("Ok", 204) -@resource('/v1/kubernetes/config') +@resource("/v1/kubernetes/config") class SuperUserKubernetesConfiguration(ApiResource): - """ Resource for saving the config files to kubernetes secrets. """ + """ Resource for saving the config files to kubernetes secrets. """ - @kubernetes_only - @nickname('scDeployConfiguration') - def post(self): - try: - new_secret = get_config_as_kube_secret(config_provider.get_config_dir_path()) - KubernetesAccessorSingleton.get_instance().replace_qe_secret(new_secret) - except K8sApiException as e: - logger.exception('Failed to deploy qe config secret to kubernetes.') - return make_response(e.message, 503) + @kubernetes_only + @nickname("scDeployConfiguration") + def post(self): + try: + new_secret = get_config_as_kube_secret( + config_provider.get_config_dir_path() + ) + KubernetesAccessorSingleton.get_instance().replace_qe_secret(new_secret) + except K8sApiException as e: + logger.exception("Failed to deploy qe config secret to kubernetes.") + return make_response(e.message, 503) - return make_response('Ok', 201) + return make_response("Ok", 201) -@resource('/v1/kubernetes/config/populate') +@resource("/v1/kubernetes/config/populate") class KubernetesConfigurationPopulator(ApiResource): - """ Resource for populating the local configuration from the cluster's kubernetes secrets. """ + """ Resource for populating the local configuration from the cluster's kubernetes secrets. """ - @kubernetes_only - @nickname('scKubePopulateConfig') - def post(self): - # Get a clean transient directory to write the config into - config_provider.new_config_dir() + @kubernetes_only + @nickname("scKubePopulateConfig") + def post(self): + # Get a clean transient directory to write the config into + config_provider.new_config_dir() - kube_accessor = KubernetesAccessorSingleton.get_instance() - kube_accessor.save_secret_to_directory(config_provider.get_config_dir_path()) - config_provider.create_copy_of_config_dir() + kube_accessor = KubernetesAccessorSingleton.get_instance() + kube_accessor.save_secret_to_directory(config_provider.get_config_dir_path()) + config_provider.create_copy_of_config_dir() - # We update the db configuration to connect to their specified one - # (Note, even if this DB isn't valid, it won't affect much in the config app, since we'll report an error, - # and all of the options create a new clean dir, so we'll never pollute configs) - combined = dict(**app.config) - combined.update(config_provider.get_config()) - configure(combined) + # We update the db configuration to connect to their specified one + # (Note, even if this DB isn't valid, it won't affect much in the config app, since we'll report an error, + # and all of the options create a new clean dir, so we'll never pollute configs) + combined = dict(**app.config) + combined.update(config_provider.get_config()) + configure(combined) - return 200 + return 200 diff --git a/config_app/config_endpoints/api/suconfig.py b/config_app/config_endpoints/api/suconfig.py index 810d4a229..29c3545b7 100644 --- a/config_app/config_endpoints/api/suconfig.py +++ b/config_app/config_endpoints/api/suconfig.py @@ -2,301 +2,283 @@ import logging from flask import abort, request -from config_app.config_endpoints.api.suconfig_models_pre_oci import pre_oci_model as model -from config_app.config_endpoints.api import resource, ApiResource, nickname, validate_json_request -from config_app.c_app import (app, config_provider, superusers, ip_resolver, - instance_keys, INIT_SCRIPTS_LOCATION) +from config_app.config_endpoints.api.suconfig_models_pre_oci import ( + pre_oci_model as model, +) +from config_app.config_endpoints.api import ( + resource, + ApiResource, + nickname, + validate_json_request, +) +from config_app.c_app import ( + app, + config_provider, + superusers, + ip_resolver, + instance_keys, + INIT_SCRIPTS_LOCATION, +) from data.database import configure from data.runmigration import run_alembic_migration from util.config.configutil import add_enterprise_config_defaults -from util.config.validator import validate_service_for_config, ValidatorContext, \ - is_valid_config_upload_filename +from util.config.validator import ( + validate_service_for_config, + ValidatorContext, + is_valid_config_upload_filename, +) logger = logging.getLogger(__name__) def database_is_valid(): - """ Returns whether the database, as configured, is valid. """ - return model.is_valid() + """ Returns whether the database, as configured, is valid. """ + return model.is_valid() def database_has_users(): - """ Returns whether the database has any users defined. """ - return model.has_users() + """ Returns whether the database has any users defined. """ + return model.has_users() -@resource('/v1/superuser/config') +@resource("/v1/superuser/config") class SuperUserConfig(ApiResource): - """ Resource for fetching and updating the current configuration, if any. """ - schemas = { - 'UpdateConfig': { - 'type': 'object', - 'description': 'Updates the YAML config file', - 'required': [ - 'config', - ], - 'properties': { - 'config': { - 'type': 'object' - }, - 'password': { - 'type': 'string' - }, - }, - }, - } + """ Resource for fetching and updating the current configuration, if any. """ - @nickname('scGetConfig') - def get(self): - """ Returns the currently defined configuration, if any. """ - config_object = config_provider.get_config() - return { - 'config': config_object + schemas = { + "UpdateConfig": { + "type": "object", + "description": "Updates the YAML config file", + "required": ["config"], + "properties": { + "config": {"type": "object"}, + "password": {"type": "string"}, + }, + } } - @nickname('scUpdateConfig') - @validate_json_request('UpdateConfig') - def put(self): - """ Updates the config override file. """ - # Note: This method is called to set the database configuration before super users exists, - # so we also allow it to be called if there is no valid registry configuration setup. - config_object = request.get_json()['config'] + @nickname("scGetConfig") + def get(self): + """ Returns the currently defined configuration, if any. """ + config_object = config_provider.get_config() + return {"config": config_object} - # Add any enterprise defaults missing from the config. - add_enterprise_config_defaults(config_object, app.config['SECRET_KEY']) + @nickname("scUpdateConfig") + @validate_json_request("UpdateConfig") + def put(self): + """ Updates the config override file. """ + # Note: This method is called to set the database configuration before super users exists, + # so we also allow it to be called if there is no valid registry configuration setup. + config_object = request.get_json()["config"] - # Write the configuration changes to the config override file. - config_provider.save_config(config_object) + # Add any enterprise defaults missing from the config. + add_enterprise_config_defaults(config_object, app.config["SECRET_KEY"]) - # now try to connect to the db provided in their config to validate it works - combined = dict(**app.config) - combined.update(config_provider.get_config()) - configure(combined, testing=app.config['TESTING']) + # Write the configuration changes to the config override file. + config_provider.save_config(config_object) - return { - 'exists': True, - 'config': config_object - } + # now try to connect to the db provided in their config to validate it works + combined = dict(**app.config) + combined.update(config_provider.get_config()) + configure(combined, testing=app.config["TESTING"]) + + return {"exists": True, "config": config_object} -@resource('/v1/superuser/registrystatus') +@resource("/v1/superuser/registrystatus") class SuperUserRegistryStatus(ApiResource): - """ Resource for determining the status of the registry, such as if config exists, + """ Resource for determining the status of the registry, such as if config exists, if a database is configured, and if it has any defined users. """ - @nickname('scRegistryStatus') - def get(self): - """ Returns the status of the registry. """ - # If there is no config file, we need to setup the database. - if not config_provider.config_exists(): - return { - 'status': 'config-db' - } + @nickname("scRegistryStatus") + def get(self): + """ Returns the status of the registry. """ + # If there is no config file, we need to setup the database. + if not config_provider.config_exists(): + return {"status": "config-db"} - # If the database isn't yet valid, then we need to set it up. - if not database_is_valid(): - return { - 'status': 'setup-db' - } + # If the database isn't yet valid, then we need to set it up. + if not database_is_valid(): + return {"status": "setup-db"} - config = config_provider.get_config() - if config and config.get('SETUP_COMPLETE'): - return { - 'status': 'config' - } + config = config_provider.get_config() + if config and config.get("SETUP_COMPLETE"): + return {"status": "config"} - return { - 'status': 'create-superuser' if not database_has_users() else 'config' - } + return {"status": "create-superuser" if not database_has_users() else "config"} class _AlembicLogHandler(logging.Handler): - def __init__(self): - super(_AlembicLogHandler, self).__init__() - self.records = [] + def __init__(self): + super(_AlembicLogHandler, self).__init__() + self.records = [] - def emit(self, record): - self.records.append({ - 'level': record.levelname, - 'message': record.getMessage() - }) + def emit(self, record): + self.records.append({"level": record.levelname, "message": record.getMessage()}) def _reload_config(): - combined = dict(**app.config) - combined.update(config_provider.get_config()) - configure(combined) - return combined + combined = dict(**app.config) + combined.update(config_provider.get_config()) + configure(combined) + return combined -@resource('/v1/superuser/setupdb') +@resource("/v1/superuser/setupdb") class SuperUserSetupDatabase(ApiResource): - """ Resource for invoking alembic to setup the database. """ + """ Resource for invoking alembic to setup the database. """ - @nickname('scSetupDatabase') - def get(self): - """ Invokes the alembic upgrade process. """ - # Note: This method is called after the database configured is saved, but before the - # database has any tables. Therefore, we only allow it to be run in that unique case. - if config_provider.config_exists() and not database_is_valid(): - combined = _reload_config() + @nickname("scSetupDatabase") + def get(self): + """ Invokes the alembic upgrade process. """ + # Note: This method is called after the database configured is saved, but before the + # database has any tables. Therefore, we only allow it to be run in that unique case. + if config_provider.config_exists() and not database_is_valid(): + combined = _reload_config() - app.config['DB_URI'] = combined['DB_URI'] - db_uri = app.config['DB_URI'] - escaped_db_uri = db_uri.replace('%', '%%') + app.config["DB_URI"] = combined["DB_URI"] + db_uri = app.config["DB_URI"] + escaped_db_uri = db_uri.replace("%", "%%") - log_handler = _AlembicLogHandler() + log_handler = _AlembicLogHandler() - try: - run_alembic_migration(escaped_db_uri, log_handler, setup_app=False) - except Exception as ex: - return { - 'error': str(ex) - } + try: + run_alembic_migration(escaped_db_uri, log_handler, setup_app=False) + except Exception as ex: + return {"error": str(ex)} - return { - 'logs': log_handler.records - } + return {"logs": log_handler.records} - abort(403) + abort(403) -@resource('/v1/superuser/config/createsuperuser') +@resource("/v1/superuser/config/createsuperuser") class SuperUserCreateInitialSuperUser(ApiResource): - """ Resource for creating the initial super user. """ - schemas = { - 'CreateSuperUser': { - 'type': 'object', - 'description': 'Information for creating the initial super user', - 'required': [ - 'username', - 'password', - 'email' - ], - 'properties': { - 'username': { - 'type': 'string', - 'description': 'The username for the superuser' - }, - 'password': { - 'type': 'string', - 'description': 'The password for the superuser' - }, - 'email': { - 'type': 'string', - 'description': 'The e-mail address for the superuser' - }, - }, - }, - } + """ Resource for creating the initial super user. """ - @nickname('scCreateInitialSuperuser') - @validate_json_request('CreateSuperUser') - def post(self): - """ Creates the initial super user, updates the underlying configuration and + schemas = { + "CreateSuperUser": { + "type": "object", + "description": "Information for creating the initial super user", + "required": ["username", "password", "email"], + "properties": { + "username": { + "type": "string", + "description": "The username for the superuser", + }, + "password": { + "type": "string", + "description": "The password for the superuser", + }, + "email": { + "type": "string", + "description": "The e-mail address for the superuser", + }, + }, + } + } + + @nickname("scCreateInitialSuperuser") + @validate_json_request("CreateSuperUser") + def post(self): + """ Creates the initial super user, updates the underlying configuration and sets the current session to have that super user. """ - _reload_config() + _reload_config() - # Special security check: This method is only accessible when: - # - There is a valid config YAML file. - # - There are currently no users in the database (clean install) - # - # We do this special security check because at the point this method is called, the database - # is clean but does not (yet) have any super users for our permissions code to check against. - if config_provider.config_exists() and not database_has_users(): - data = request.get_json() - username = data['username'] - password = data['password'] - email = data['email'] + # Special security check: This method is only accessible when: + # - There is a valid config YAML file. + # - There are currently no users in the database (clean install) + # + # We do this special security check because at the point this method is called, the database + # is clean but does not (yet) have any super users for our permissions code to check against. + if config_provider.config_exists() and not database_has_users(): + data = request.get_json() + username = data["username"] + password = data["password"] + email = data["email"] - # Create the user in the database. - superuser_uuid = model.create_superuser(username, password, email) + # Create the user in the database. + superuser_uuid = model.create_superuser(username, password, email) - # Add the user to the config. - config_object = config_provider.get_config() - config_object['SUPER_USERS'] = [username] - config_provider.save_config(config_object) + # Add the user to the config. + config_object = config_provider.get_config() + config_object["SUPER_USERS"] = [username] + config_provider.save_config(config_object) - # Update the in-memory config for the new superuser. - superusers.register_superuser(username) + # Update the in-memory config for the new superuser. + superusers.register_superuser(username) - return { - 'status': True - } + return {"status": True} - abort(403) + abort(403) -@resource('/v1/superuser/config/validate/') +@resource("/v1/superuser/config/validate/") class SuperUserConfigValidate(ApiResource): - """ Resource for validating a block of configuration against an external service. """ - schemas = { - 'ValidateConfig': { - 'type': 'object', - 'description': 'Validates configuration', - 'required': [ - 'config' - ], - 'properties': { - 'config': { - 'type': 'object' - }, - 'password': { - 'type': 'string', - 'description': 'The users password, used for auth validation' + """ Resource for validating a block of configuration against an external service. """ + + schemas = { + "ValidateConfig": { + "type": "object", + "description": "Validates configuration", + "required": ["config"], + "properties": { + "config": {"type": "object"}, + "password": { + "type": "string", + "description": "The users password, used for auth validation", + }, + }, } - }, - }, - } + } - @nickname('scValidateConfig') - @validate_json_request('ValidateConfig') - def post(self, service): - """ Validates the given config for the given service. """ - # Note: This method is called to validate the database configuration before super users exists, - # so we also allow it to be called if there is no valid registry configuration setup. Note that - # this is also safe since this method does not access any information not given in the request. - config = request.get_json()['config'] - validator_context = ValidatorContext.from_app(app, config, - request.get_json().get('password', ''), - instance_keys=instance_keys, - ip_resolver=ip_resolver, - config_provider=config_provider, - init_scripts_location=INIT_SCRIPTS_LOCATION) + @nickname("scValidateConfig") + @validate_json_request("ValidateConfig") + def post(self, service): + """ Validates the given config for the given service. """ + # Note: This method is called to validate the database configuration before super users exists, + # so we also allow it to be called if there is no valid registry configuration setup. Note that + # this is also safe since this method does not access any information not given in the request. + config = request.get_json()["config"] + validator_context = ValidatorContext.from_app( + app, + config, + request.get_json().get("password", ""), + instance_keys=instance_keys, + ip_resolver=ip_resolver, + config_provider=config_provider, + init_scripts_location=INIT_SCRIPTS_LOCATION, + ) - return validate_service_for_config(service, validator_context) + return validate_service_for_config(service, validator_context) -@resource('/v1/superuser/config/file/') +@resource("/v1/superuser/config/file/") class SuperUserConfigFile(ApiResource): - """ Resource for fetching the status of config files and overriding them. """ + """ Resource for fetching the status of config files and overriding them. """ - @nickname('scConfigFileExists') - def get(self, filename): - """ Returns whether the configuration file with the given name exists. """ - if not is_valid_config_upload_filename(filename): - abort(404) + @nickname("scConfigFileExists") + def get(self, filename): + """ Returns whether the configuration file with the given name exists. """ + if not is_valid_config_upload_filename(filename): + abort(404) - return { - 'exists': config_provider.volume_file_exists(filename) - } + return {"exists": config_provider.volume_file_exists(filename)} - @nickname('scUpdateConfigFile') - def post(self, filename): - """ Updates the configuration file with the given name. """ - if not is_valid_config_upload_filename(filename): - abort(404) + @nickname("scUpdateConfigFile") + def post(self, filename): + """ Updates the configuration file with the given name. """ + if not is_valid_config_upload_filename(filename): + abort(404) - # Note: This method can be called before the configuration exists - # to upload the database SSL cert. - uploaded_file = request.files['file'] - if not uploaded_file: - abort(400) + # Note: This method can be called before the configuration exists + # to upload the database SSL cert. + uploaded_file = request.files["file"] + if not uploaded_file: + abort(400) - config_provider.save_volume_file(filename, uploaded_file) - return { - 'status': True - } + config_provider.save_volume_file(filename, uploaded_file) + return {"status": True} diff --git a/config_app/config_endpoints/api/suconfig_models_interface.py b/config_app/config_endpoints/api/suconfig_models_interface.py index 9f8cbd0cb..d41a97d11 100644 --- a/config_app/config_endpoints/api/suconfig_models_interface.py +++ b/config_app/config_endpoints/api/suconfig_models_interface.py @@ -4,36 +4,36 @@ from six import add_metaclass @add_metaclass(ABCMeta) class SuperuserConfigDataInterface(object): - """ + """ Interface that represents all data store interactions required by the superuser config API. """ - @abstractmethod - def is_valid(self): - """ + @abstractmethod + def is_valid(self): + """ Returns true if the configured database is valid. """ - @abstractmethod - def has_users(self): - """ + @abstractmethod + def has_users(self): + """ Returns true if there are any users defined. """ - @abstractmethod - def create_superuser(self, username, password, email): - """ + @abstractmethod + def create_superuser(self, username, password, email): + """ Creates a new superuser with the given username, password and email. Returns the user's UUID. """ - @abstractmethod - def has_federated_login(self, username, service_name): - """ + @abstractmethod + def has_federated_login(self, username, service_name): + """ Returns true if the matching user has a federated login under the matching service. """ - @abstractmethod - def attach_federated_login(self, username, service_name, federated_username): - """ + @abstractmethod + def attach_federated_login(self, username, service_name, federated_username): + """ Attaches a federatated login to the matching user, under the given service. """ diff --git a/config_app/config_endpoints/api/suconfig_models_pre_oci.py b/config_app/config_endpoints/api/suconfig_models_pre_oci.py index fbc238078..9e512e88a 100644 --- a/config_app/config_endpoints/api/suconfig_models_pre_oci.py +++ b/config_app/config_endpoints/api/suconfig_models_pre_oci.py @@ -1,37 +1,39 @@ from data import model from data.database import User -from config_app.config_endpoints.api.suconfig_models_interface import SuperuserConfigDataInterface +from config_app.config_endpoints.api.suconfig_models_interface import ( + SuperuserConfigDataInterface, +) class PreOCIModel(SuperuserConfigDataInterface): - # Note: this method is different than has_users: the user select will throw if the user - # table does not exist, whereas has_users assumes the table is valid - def is_valid(self): - try: - list(User.select().limit(1)) - return True - except: - return False + # Note: this method is different than has_users: the user select will throw if the user + # table does not exist, whereas has_users assumes the table is valid + def is_valid(self): + try: + list(User.select().limit(1)) + return True + except: + return False - def has_users(self): - return bool(list(User.select().limit(1))) + def has_users(self): + return bool(list(User.select().limit(1))) - def create_superuser(self, username, password, email): - return model.user.create_user(username, password, email, auto_verify=True).uuid + def create_superuser(self, username, password, email): + return model.user.create_user(username, password, email, auto_verify=True).uuid - def has_federated_login(self, username, service_name): - user = model.user.get_user(username) - if user is None: - return False + def has_federated_login(self, username, service_name): + user = model.user.get_user(username) + if user is None: + return False - return bool(model.user.lookup_federated_login(user, service_name)) + return bool(model.user.lookup_federated_login(user, service_name)) - def attach_federated_login(self, username, service_name, federated_username): - user = model.user.get_user(username) - if user is None: - return False + def attach_federated_login(self, username, service_name, federated_username): + user = model.user.get_user(username) + if user is None: + return False - model.user.attach_federated_login(user, service_name, federated_username) + model.user.attach_federated_login(user, service_name, federated_username) pre_oci_model = PreOCIModel() diff --git a/config_app/config_endpoints/api/superuser.py b/config_app/config_endpoints/api/superuser.py index 7e5adccb5..db5d1d81c 100644 --- a/config_app/config_endpoints/api/superuser.py +++ b/config_app/config_endpoints/api/superuser.py @@ -12,7 +12,13 @@ from data.model import ServiceKeyDoesNotExist from util.config.validator import EXTRA_CA_DIRECTORY from config_app.config_endpoints.exception import InvalidRequest -from config_app.config_endpoints.api import resource, ApiResource, nickname, log_action, validate_json_request +from config_app.config_endpoints.api import ( + resource, + ApiResource, + nickname, + log_action, + validate_json_request, +) from config_app.config_endpoints.api.superuser_models_pre_oci import pre_oci_model from config_app.config_util.ssl import load_certificate, CertInvalidException from config_app.c_app import app, config_provider, INIT_SCRIPTS_LOCATION @@ -21,228 +27,233 @@ from config_app.c_app import app, config_provider, INIT_SCRIPTS_LOCATION logger = logging.getLogger(__name__) -@resource('/v1/superuser/customcerts/') +@resource("/v1/superuser/customcerts/") class SuperUserCustomCertificate(ApiResource): - """ Resource for managing a custom certificate. """ + """ Resource for managing a custom certificate. """ - @nickname('uploadCustomCertificate') - def post(self, certpath): - uploaded_file = request.files['file'] - if not uploaded_file: - raise InvalidRequest('Missing certificate file') + @nickname("uploadCustomCertificate") + def post(self, certpath): + uploaded_file = request.files["file"] + if not uploaded_file: + raise InvalidRequest("Missing certificate file") - # Save the certificate. - certpath = pathvalidate.sanitize_filename(certpath) - if not certpath.endswith('.crt'): - raise InvalidRequest('Invalid certificate file: must have suffix `.crt`') + # Save the certificate. + certpath = pathvalidate.sanitize_filename(certpath) + if not certpath.endswith(".crt"): + raise InvalidRequest("Invalid certificate file: must have suffix `.crt`") - logger.debug('Saving custom certificate %s', certpath) - cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) - config_provider.save_volume_file(cert_full_path, uploaded_file) - logger.debug('Saved custom certificate %s', certpath) + logger.debug("Saving custom certificate %s", certpath) + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) + config_provider.save_volume_file(cert_full_path, uploaded_file) + logger.debug("Saved custom certificate %s", certpath) - # Validate the certificate. - try: - logger.debug('Loading custom certificate %s', certpath) - with config_provider.get_volume_file(cert_full_path) as f: - load_certificate(f.read()) - except CertInvalidException: - logger.exception('Got certificate invalid error for cert %s', certpath) - return '', 204 - except IOError: - logger.exception('Got IO error for cert %s', certpath) - return '', 204 + # Validate the certificate. + try: + logger.debug("Loading custom certificate %s", certpath) + with config_provider.get_volume_file(cert_full_path) as f: + load_certificate(f.read()) + except CertInvalidException: + logger.exception("Got certificate invalid error for cert %s", certpath) + return "", 204 + except IOError: + logger.exception("Got IO error for cert %s", certpath) + return "", 204 - # Call the update script with config dir location to install the certificate immediately. - if not app.config['TESTING']: - cert_dir = os.path.join(config_provider.get_config_dir_path(), EXTRA_CA_DIRECTORY) - if subprocess.call([os.path.join(INIT_SCRIPTS_LOCATION, 'certs_install.sh')], env={ 'CERTDIR': cert_dir }) != 0: - raise Exception('Could not install certificates') + # Call the update script with config dir location to install the certificate immediately. + if not app.config["TESTING"]: + cert_dir = os.path.join( + config_provider.get_config_dir_path(), EXTRA_CA_DIRECTORY + ) + if ( + subprocess.call( + [os.path.join(INIT_SCRIPTS_LOCATION, "certs_install.sh")], + env={"CERTDIR": cert_dir}, + ) + != 0 + ): + raise Exception("Could not install certificates") - return '', 204 + return "", 204 - @nickname('deleteCustomCertificate') - def delete(self, certpath): - cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) - config_provider.remove_volume_file(cert_full_path) - return '', 204 + @nickname("deleteCustomCertificate") + def delete(self, certpath): + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) + config_provider.remove_volume_file(cert_full_path) + return "", 204 -@resource('/v1/superuser/customcerts') +@resource("/v1/superuser/customcerts") class SuperUserCustomCertificates(ApiResource): - """ Resource for managing custom certificates. """ + """ Resource for managing custom certificates. """ - @nickname('getCustomCertificates') - def get(self): - has_extra_certs_path = config_provider.volume_file_exists(EXTRA_CA_DIRECTORY) - extra_certs_found = config_provider.list_volume_directory(EXTRA_CA_DIRECTORY) - if extra_certs_found is None: - return { - 'status': 'file' if has_extra_certs_path else 'none', - } + @nickname("getCustomCertificates") + def get(self): + has_extra_certs_path = config_provider.volume_file_exists(EXTRA_CA_DIRECTORY) + extra_certs_found = config_provider.list_volume_directory(EXTRA_CA_DIRECTORY) + if extra_certs_found is None: + return {"status": "file" if has_extra_certs_path else "none"} - cert_views = [] - for extra_cert_path in extra_certs_found: - try: - cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, extra_cert_path) - with config_provider.get_volume_file(cert_full_path) as f: - certificate = load_certificate(f.read()) - cert_views.append({ - 'path': extra_cert_path, - 'names': list(certificate.names), - 'expired': certificate.expired, - }) - except CertInvalidException as cie: - cert_views.append({ - 'path': extra_cert_path, - 'error': cie.message, - }) - except IOError as ioe: - cert_views.append({ - 'path': extra_cert_path, - 'error': ioe.message, - }) + cert_views = [] + for extra_cert_path in extra_certs_found: + try: + cert_full_path = config_provider.get_volume_path( + EXTRA_CA_DIRECTORY, extra_cert_path + ) + with config_provider.get_volume_file(cert_full_path) as f: + certificate = load_certificate(f.read()) + cert_views.append( + { + "path": extra_cert_path, + "names": list(certificate.names), + "expired": certificate.expired, + } + ) + except CertInvalidException as cie: + cert_views.append({"path": extra_cert_path, "error": cie.message}) + except IOError as ioe: + cert_views.append({"path": extra_cert_path, "error": ioe.message}) - return { - 'status': 'directory', - 'certs': cert_views, - } + return {"status": "directory", "certs": cert_views} -@resource('/v1/superuser/keys') +@resource("/v1/superuser/keys") class SuperUserServiceKeyManagement(ApiResource): - """ Resource for managing service keys.""" - schemas = { - 'CreateServiceKey': { - 'id': 'CreateServiceKey', - 'type': 'object', - 'description': 'Description of creation of a service key', - 'required': ['service', 'expiration'], - 'properties': { - 'service': { - 'type': 'string', - 'description': 'The service authenticating with this key', - }, - 'name': { - 'type': 'string', - 'description': 'The friendly name of a service key', - }, - 'metadata': { - 'type': 'object', - 'description': 'The key/value pairs of this key\'s metadata', - }, - 'notes': { - 'type': 'string', - 'description': 'If specified, the extra notes for the key', - }, - 'expiration': { - 'description': 'The expiration date as a unix timestamp', - 'anyOf': [{'type': 'number'}, {'type': 'null'}], - }, - }, - }, - } + """ Resource for managing service keys.""" - @nickname('listServiceKeys') - def get(self): - keys = pre_oci_model.list_all_service_keys() - - return jsonify({ - 'keys': [key.to_dict() for key in keys], - }) - - @nickname('createServiceKey') - @validate_json_request('CreateServiceKey') - def post(self): - body = request.get_json() - - # Ensure we have a valid expiration date if specified. - expiration_date = body.get('expiration', None) - if expiration_date is not None: - try: - expiration_date = datetime.utcfromtimestamp(float(expiration_date)) - except ValueError as ve: - raise InvalidRequest('Invalid expiration date: %s' % ve) - - if expiration_date <= datetime.now(): - raise InvalidRequest('Expiration date cannot be in the past') - - # Create the metadata for the key. - metadata = body.get('metadata', {}) - metadata.update({ - 'created_by': 'Quay Superuser Panel', - 'ip': request.remote_addr, - }) - - # Generate a key with a private key that we *never save*. - (private_key, key_id) = pre_oci_model.generate_service_key(body['service'], expiration_date, - metadata=metadata, - name=body.get('name', '')) - # Auto-approve the service key. - pre_oci_model.approve_service_key(key_id, ServiceKeyApprovalType.SUPERUSER, - notes=body.get('notes', '')) - - # Log the creation and auto-approval of the service key. - key_log_metadata = { - 'kid': key_id, - 'preshared': True, - 'service': body['service'], - 'name': body.get('name', ''), - 'expiration_date': expiration_date, - 'auto_approved': True, + schemas = { + "CreateServiceKey": { + "id": "CreateServiceKey", + "type": "object", + "description": "Description of creation of a service key", + "required": ["service", "expiration"], + "properties": { + "service": { + "type": "string", + "description": "The service authenticating with this key", + }, + "name": { + "type": "string", + "description": "The friendly name of a service key", + }, + "metadata": { + "type": "object", + "description": "The key/value pairs of this key's metadata", + }, + "notes": { + "type": "string", + "description": "If specified, the extra notes for the key", + }, + "expiration": { + "description": "The expiration date as a unix timestamp", + "anyOf": [{"type": "number"}, {"type": "null"}], + }, + }, + } } - log_action('service_key_create', None, key_log_metadata) - log_action('service_key_approve', None, key_log_metadata) + @nickname("listServiceKeys") + def get(self): + keys = pre_oci_model.list_all_service_keys() - return jsonify({ - 'kid': key_id, - 'name': body.get('name', ''), - 'service': body['service'], - 'public_key': private_key.publickey().exportKey('PEM'), - 'private_key': private_key.exportKey('PEM'), - }) + return jsonify({"keys": [key.to_dict() for key in keys]}) -@resource('/v1/superuser/approvedkeys/') + @nickname("createServiceKey") + @validate_json_request("CreateServiceKey") + def post(self): + body = request.get_json() + + # Ensure we have a valid expiration date if specified. + expiration_date = body.get("expiration", None) + if expiration_date is not None: + try: + expiration_date = datetime.utcfromtimestamp(float(expiration_date)) + except ValueError as ve: + raise InvalidRequest("Invalid expiration date: %s" % ve) + + if expiration_date <= datetime.now(): + raise InvalidRequest("Expiration date cannot be in the past") + + # Create the metadata for the key. + metadata = body.get("metadata", {}) + metadata.update( + {"created_by": "Quay Superuser Panel", "ip": request.remote_addr} + ) + + # Generate a key with a private key that we *never save*. + (private_key, key_id) = pre_oci_model.generate_service_key( + body["service"], + expiration_date, + metadata=metadata, + name=body.get("name", ""), + ) + # Auto-approve the service key. + pre_oci_model.approve_service_key( + key_id, ServiceKeyApprovalType.SUPERUSER, notes=body.get("notes", "") + ) + + # Log the creation and auto-approval of the service key. + key_log_metadata = { + "kid": key_id, + "preshared": True, + "service": body["service"], + "name": body.get("name", ""), + "expiration_date": expiration_date, + "auto_approved": True, + } + + log_action("service_key_create", None, key_log_metadata) + log_action("service_key_approve", None, key_log_metadata) + + return jsonify( + { + "kid": key_id, + "name": body.get("name", ""), + "service": body["service"], + "public_key": private_key.publickey().exportKey("PEM"), + "private_key": private_key.exportKey("PEM"), + } + ) + + +@resource("/v1/superuser/approvedkeys/") class SuperUserServiceKeyApproval(ApiResource): - """ Resource for approving service keys. """ + """ Resource for approving service keys. """ - schemas = { - 'ApproveServiceKey': { - 'id': 'ApproveServiceKey', - 'type': 'object', - 'description': 'Information for approving service keys', - 'properties': { - 'notes': { - 'type': 'string', - 'description': 'Optional approval notes', - }, - }, - }, - } + schemas = { + "ApproveServiceKey": { + "id": "ApproveServiceKey", + "type": "object", + "description": "Information for approving service keys", + "properties": { + "notes": {"type": "string", "description": "Optional approval notes"} + }, + } + } - @nickname('approveServiceKey') - @validate_json_request('ApproveServiceKey') - def post(self, kid): - notes = request.get_json().get('notes', '') - try: - key = pre_oci_model.approve_service_key(kid, ServiceKeyApprovalType.SUPERUSER, notes=notes) + @nickname("approveServiceKey") + @validate_json_request("ApproveServiceKey") + def post(self, kid): + notes = request.get_json().get("notes", "") + try: + key = pre_oci_model.approve_service_key( + kid, ServiceKeyApprovalType.SUPERUSER, notes=notes + ) - # Log the approval of the service key. - key_log_metadata = { - 'kid': kid, - 'service': key.service, - 'name': key.name, - 'expiration_date': key.expiration_date, - } + # Log the approval of the service key. + key_log_metadata = { + "kid": kid, + "service": key.service, + "name": key.name, + "expiration_date": key.expiration_date, + } - # Note: this may not actually be the current person modifying the config, but if they're in the config tool, - # they have full access to the DB and could pretend to be any user, so pulling any superuser is likely fine - super_user = app.config.get('SUPER_USERS', [None])[0] - log_action('service_key_approve', super_user, key_log_metadata) - except ServiceKeyDoesNotExist: - raise NotFound() - except ServiceKeyAlreadyApproved: - pass + # Note: this may not actually be the current person modifying the config, but if they're in the config tool, + # they have full access to the DB and could pretend to be any user, so pulling any superuser is likely fine + super_user = app.config.get("SUPER_USERS", [None])[0] + log_action("service_key_approve", super_user, key_log_metadata) + except ServiceKeyDoesNotExist: + raise NotFound() + except ServiceKeyAlreadyApproved: + pass - return make_response('', 201) + return make_response("", 201) diff --git a/config_app/config_endpoints/api/superuser_models_interface.py b/config_app/config_endpoints/api/superuser_models_interface.py index 53efc9aec..efd8a0f04 100644 --- a/config_app/config_endpoints/api/superuser_models_interface.py +++ b/config_app/config_endpoints/api/superuser_models_interface.py @@ -6,21 +6,33 @@ from config_app.config_endpoints.api import format_date def user_view(user): - return { - 'name': user.username, - 'kind': 'user', - 'is_robot': user.robot, - } + return {"name": user.username, "kind": "user", "is_robot": user.robot} -class RepositoryBuild(namedtuple('RepositoryBuild', - ['uuid', 'logs_archived', 'repository_namespace_user_username', - 'repository_name', - 'can_write', 'can_read', 'pull_robot', 'resource_key', 'trigger', - 'display_name', - 'started', 'job_config', 'phase', 'status', 'error', - 'archive_url'])): - """ +class RepositoryBuild( + namedtuple( + "RepositoryBuild", + [ + "uuid", + "logs_archived", + "repository_namespace_user_username", + "repository_name", + "can_write", + "can_read", + "pull_robot", + "resource_key", + "trigger", + "display_name", + "started", + "job_config", + "phase", + "status", + "error", + "archive_url", + ], + ) +): + """ RepositoryBuild represents a build associated with a repostiory :type uuid: string :type logs_archived: boolean @@ -40,42 +52,46 @@ class RepositoryBuild(namedtuple('RepositoryBuild', :type archive_url: string """ - def to_dict(self): + def to_dict(self): - resp = { - 'id': self.uuid, - 'phase': self.phase, - 'started': format_date(self.started), - 'display_name': self.display_name, - 'status': self.status or {}, - 'subdirectory': self.job_config.get('build_subdir', ''), - 'dockerfile_path': self.job_config.get('build_subdir', ''), - 'context': self.job_config.get('context', ''), - 'tags': self.job_config.get('docker_tags', []), - 'manual_user': self.job_config.get('manual_user', None), - 'is_writer': self.can_write, - 'trigger': self.trigger.to_dict(), - 'trigger_metadata': self.job_config.get('trigger_metadata', None) if self.can_read else None, - 'resource_key': self.resource_key, - 'pull_robot': user_view(self.pull_robot) if self.pull_robot else None, - 'repository': { - 'namespace': self.repository_namespace_user_username, - 'name': self.repository_name - }, - 'error': self.error, - } + resp = { + "id": self.uuid, + "phase": self.phase, + "started": format_date(self.started), + "display_name": self.display_name, + "status": self.status or {}, + "subdirectory": self.job_config.get("build_subdir", ""), + "dockerfile_path": self.job_config.get("build_subdir", ""), + "context": self.job_config.get("context", ""), + "tags": self.job_config.get("docker_tags", []), + "manual_user": self.job_config.get("manual_user", None), + "is_writer": self.can_write, + "trigger": self.trigger.to_dict(), + "trigger_metadata": self.job_config.get("trigger_metadata", None) + if self.can_read + else None, + "resource_key": self.resource_key, + "pull_robot": user_view(self.pull_robot) if self.pull_robot else None, + "repository": { + "namespace": self.repository_namespace_user_username, + "name": self.repository_name, + }, + "error": self.error, + } - if self.can_write: - if self.resource_key is not None: - resp['archive_url'] = self.archive_url - elif self.job_config.get('archive_url', None): - resp['archive_url'] = self.job_config['archive_url'] + if self.can_write: + if self.resource_key is not None: + resp["archive_url"] = self.archive_url + elif self.job_config.get("archive_url", None): + resp["archive_url"] = self.job_config["archive_url"] - return resp + return resp -class Approval(namedtuple('Approval', ['approver', 'approval_type', 'approved_date', 'notes'])): - """ +class Approval( + namedtuple("Approval", ["approver", "approval_type", "approved_date", "notes"]) +): + """ Approval represents whether a key has been approved or not :type approver: User :type approval_type: string @@ -83,19 +99,32 @@ class Approval(namedtuple('Approval', ['approver', 'approval_type', 'approved_da :type notes: string """ - def to_dict(self): - return { - 'approver': self.approver.to_dict() if self.approver else None, - 'approval_type': self.approval_type, - 'approved_date': self.approved_date, - 'notes': self.notes, - } + def to_dict(self): + return { + "approver": self.approver.to_dict() if self.approver else None, + "approval_type": self.approval_type, + "approved_date": self.approved_date, + "notes": self.notes, + } class ServiceKey( - namedtuple('ServiceKey', ['name', 'kid', 'service', 'jwk', 'metadata', 'created_date', - 'expiration_date', 'rotation_duration', 'approval'])): - """ + namedtuple( + "ServiceKey", + [ + "name", + "kid", + "service", + "jwk", + "metadata", + "created_date", + "expiration_date", + "rotation_duration", + "approval", + ], + ) +): + """ ServiceKey is an apostille signing key :type name: string :type kid: int @@ -109,22 +138,22 @@ class ServiceKey( """ - def to_dict(self): - return { - 'name': self.name, - 'kid': self.kid, - 'service': self.service, - 'jwk': self.jwk, - 'metadata': self.metadata, - 'created_date': self.created_date, - 'expiration_date': self.expiration_date, - 'rotation_duration': self.rotation_duration, - 'approval': self.approval.to_dict() if self.approval is not None else None, - } + def to_dict(self): + return { + "name": self.name, + "kid": self.kid, + "service": self.service, + "jwk": self.jwk, + "metadata": self.metadata, + "created_date": self.created_date, + "expiration_date": self.expiration_date, + "rotation_duration": self.rotation_duration, + "approval": self.approval.to_dict() if self.approval is not None else None, + } -class User(namedtuple('User', ['username', 'email', 'verified', 'enabled', 'robot'])): - """ +class User(namedtuple("User", ["username", "email", "verified", "enabled", "robot"])): + """ User represents a single user. :type username: string :type email: string @@ -133,41 +162,38 @@ class User(namedtuple('User', ['username', 'email', 'verified', 'enabled', 'robo :type robot: User """ - def to_dict(self): - user_data = { - 'kind': 'user', - 'name': self.username, - 'username': self.username, - 'email': self.email, - 'verified': self.verified, - 'enabled': self.enabled, - } + def to_dict(self): + user_data = { + "kind": "user", + "name": self.username, + "username": self.username, + "email": self.email, + "verified": self.verified, + "enabled": self.enabled, + } - return user_data + return user_data -class Organization(namedtuple('Organization', ['username', 'email'])): - """ +class Organization(namedtuple("Organization", ["username", "email"])): + """ Organization represents a single org. :type username: string :type email: string """ - def to_dict(self): - return { - 'name': self.username, - 'email': self.email, - } + def to_dict(self): + return {"name": self.username, "email": self.email} @add_metaclass(ABCMeta) class SuperuserDataInterface(object): - """ + """ Interface that represents all data store interactions required by a superuser api. """ - @abstractmethod - def list_all_service_keys(self): - """ + @abstractmethod + def list_all_service_keys(self): + """ Returns a list of service keys """ diff --git a/config_app/config_endpoints/api/superuser_models_pre_oci.py b/config_app/config_endpoints/api/superuser_models_pre_oci.py index c35b94243..37864ceee 100644 --- a/config_app/config_endpoints/api/superuser_models_pre_oci.py +++ b/config_app/config_endpoints/api/superuser_models_pre_oci.py @@ -1,60 +1,85 @@ from data import model -from config_app.config_endpoints.api.superuser_models_interface import (SuperuserDataInterface, User, ServiceKey, - Approval) +from config_app.config_endpoints.api.superuser_models_interface import ( + SuperuserDataInterface, + User, + ServiceKey, + Approval, +) def _create_user(user): - if user is None: - return None - return User(user.username, user.email, user.verified, user.enabled, user.robot) + if user is None: + return None + return User(user.username, user.email, user.verified, user.enabled, user.robot) def _create_key(key): - approval = None - if key.approval is not None: - approval = Approval(_create_user(key.approval.approver), key.approval.approval_type, - key.approval.approved_date, - key.approval.notes) + approval = None + if key.approval is not None: + approval = Approval( + _create_user(key.approval.approver), + key.approval.approval_type, + key.approval.approved_date, + key.approval.notes, + ) - return ServiceKey(key.name, key.kid, key.service, key.jwk, key.metadata, key.created_date, - key.expiration_date, - key.rotation_duration, approval) + return ServiceKey( + key.name, + key.kid, + key.service, + key.jwk, + key.metadata, + key.created_date, + key.expiration_date, + key.rotation_duration, + approval, + ) class ServiceKeyDoesNotExist(Exception): - pass + pass class ServiceKeyAlreadyApproved(Exception): - pass + pass class PreOCIModel(SuperuserDataInterface): - """ + """ PreOCIModel implements the data model for the SuperUser using a database schema before it was changed to support the OCI specification. """ - def list_all_service_keys(self): - keys = model.service_keys.list_all_keys() - return [_create_key(key) for key in keys] + def list_all_service_keys(self): + keys = model.service_keys.list_all_keys() + return [_create_key(key) for key in keys] - def approve_service_key(self, kid, approval_type, notes=''): - try: - key = model.service_keys.approve_service_key(kid, approval_type, notes=notes) - return _create_key(key) - except model.ServiceKeyDoesNotExist: - raise ServiceKeyDoesNotExist - except model.ServiceKeyAlreadyApproved: - raise ServiceKeyAlreadyApproved + def approve_service_key(self, kid, approval_type, notes=""): + try: + key = model.service_keys.approve_service_key( + kid, approval_type, notes=notes + ) + return _create_key(key) + except model.ServiceKeyDoesNotExist: + raise ServiceKeyDoesNotExist + except model.ServiceKeyAlreadyApproved: + raise ServiceKeyAlreadyApproved - def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, - rotation_duration=None): - (private_key, key) = model.service_keys.generate_service_key(service, expiration_date, - metadata=metadata, name=name) + def generate_service_key( + self, + service, + expiration_date, + kid=None, + name="", + metadata=None, + rotation_duration=None, + ): + (private_key, key) = model.service_keys.generate_service_key( + service, expiration_date, metadata=metadata, name=name + ) - return private_key, key.kid + return private_key, key.kid pre_oci_model = PreOCIModel() diff --git a/config_app/config_endpoints/api/tar_config_loader.py b/config_app/config_endpoints/api/tar_config_loader.py index 8944d9092..06b57cb85 100644 --- a/config_app/config_endpoints/api/tar_config_loader.py +++ b/config_app/config_endpoints/api/tar_config_loader.py @@ -10,53 +10,59 @@ from data.database import configure from config_app.c_app import app, config_provider from config_app.config_endpoints.api import resource, ApiResource, nickname -from config_app.config_util.tar import tarinfo_filter_partial, strip_absolute_path_and_add_trailing_dir +from config_app.config_util.tar import ( + tarinfo_filter_partial, + strip_absolute_path_and_add_trailing_dir, +) -@resource('/v1/configapp/initialization') +@resource("/v1/configapp/initialization") class ConfigInitialization(ApiResource): - """ + """ Resource for dealing with any initialization logic for the config app """ - @nickname('scStartNewConfig') - def post(self): - config_provider.new_config_dir() - return make_response('OK') + @nickname("scStartNewConfig") + def post(self): + config_provider.new_config_dir() + return make_response("OK") -@resource('/v1/configapp/tarconfig') +@resource("/v1/configapp/tarconfig") class TarConfigLoader(ApiResource): - """ + """ Resource for dealing with configuration as a tarball, including loading and generating functions """ - @nickname('scGetConfigTarball') - def get(self): - config_path = config_provider.get_config_dir_path() - tar_dir_prefix = strip_absolute_path_and_add_trailing_dir(config_path) - temp = tempfile.NamedTemporaryFile() + @nickname("scGetConfigTarball") + def get(self): + config_path = config_provider.get_config_dir_path() + tar_dir_prefix = strip_absolute_path_and_add_trailing_dir(config_path) + temp = tempfile.NamedTemporaryFile() - with closing(tarfile.open(temp.name, mode="w|gz")) as tar: - for name in os.listdir(config_path): - tar.add(os.path.join(config_path, name), filter=tarinfo_filter_partial(tar_dir_prefix)) - return send_file(temp.name, mimetype='application/gzip') + with closing(tarfile.open(temp.name, mode="w|gz")) as tar: + for name in os.listdir(config_path): + tar.add( + os.path.join(config_path, name), + filter=tarinfo_filter_partial(tar_dir_prefix), + ) + return send_file(temp.name, mimetype="application/gzip") - @nickname('scUploadTarballConfig') - def put(self): - """ Loads tarball config into the config provider """ - # Generate a new empty dir to load the config into - config_provider.new_config_dir() - input_stream = request.stream - with tarfile.open(mode="r|gz", fileobj=input_stream) as tar_stream: - tar_stream.extractall(config_provider.get_config_dir_path()) + @nickname("scUploadTarballConfig") + def put(self): + """ Loads tarball config into the config provider """ + # Generate a new empty dir to load the config into + config_provider.new_config_dir() + input_stream = request.stream + with tarfile.open(mode="r|gz", fileobj=input_stream) as tar_stream: + tar_stream.extractall(config_provider.get_config_dir_path()) - config_provider.create_copy_of_config_dir() + config_provider.create_copy_of_config_dir() - # now try to connect to the db provided in their config to validate it works - combined = dict(**app.config) - combined.update(config_provider.get_config()) - configure(combined) + # now try to connect to the db provided in their config to validate it works + combined = dict(**app.config) + combined.update(config_provider.get_config()) + configure(combined) - return make_response('OK') + return make_response("OK") diff --git a/config_app/config_endpoints/api/user.py b/config_app/config_endpoints/api/user.py index 85008c87e..9ab787a47 100644 --- a/config_app/config_endpoints/api/user.py +++ b/config_app/config_endpoints/api/user.py @@ -3,12 +3,12 @@ from config_app.config_endpoints.api import resource, ApiResource, nickname from config_app.config_endpoints.api.superuser_models_interface import user_view -@resource('/v1/user/') +@resource("/v1/user/") class User(ApiResource): - """ Operations related to users. """ + """ Operations related to users. """ - @nickname('getLoggedInUser') - def get(self): - """ Get user information for the authenticated user. """ - user = get_authenticated_user() - return user_view(user) + @nickname("getLoggedInUser") + def get(self): + """ Get user information for the authenticated user. """ + user = get_authenticated_user() + return user_view(user) diff --git a/config_app/config_endpoints/common.py b/config_app/config_endpoints/common.py index c277f3b35..1cf874d5d 100644 --- a/config_app/config_endpoints/common.py +++ b/config_app/config_endpoints/common.py @@ -14,60 +14,72 @@ from config_app.config_util.k8sconfig import get_k8s_namespace def truthy_bool(param): - return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'} + return param not in {False, "false", "False", "0", "FALSE", "", "null"} -DEFAULT_JS_BUNDLE_NAME = 'configapp' -PARAM_REGEX = re.compile(r'<([^:>]+:)*([\w]+)>') +DEFAULT_JS_BUNDLE_NAME = "configapp" +PARAM_REGEX = re.compile(r"<([^:>]+:)*([\w]+)>") logger = logging.getLogger(__name__) TYPE_CONVERTER = { - truthy_bool: 'boolean', - str: 'string', - basestring: 'string', - reqparse.text_type: 'string', - int: 'integer', + truthy_bool: "boolean", + str: "string", + basestring: "string", + reqparse.text_type: "string", + int: "integer", } def _list_files(path, extension, contains=""): - """ Returns a list of all the files with the given extension found under the given path. """ + """ Returns a list of all the files with the given extension found under the given path. """ - def matches(f): - return os.path.splitext(f)[1] == '.' + extension and contains in os.path.splitext(f)[0] + def matches(f): + return ( + os.path.splitext(f)[1] == "." + extension + and contains in os.path.splitext(f)[0] + ) - def join_path(dp, f): - # Remove the static/ prefix. It is added in the template. - return os.path.join(dp, f)[len(ROOT_DIR) + 1 + len('config_app/static/'):] + def join_path(dp, f): + # Remove the static/ prefix. It is added in the template. + return os.path.join(dp, f)[len(ROOT_DIR) + 1 + len("config_app/static/") :] - filepath = os.path.join(os.path.join(ROOT_DIR, 'config_app/static/'), path) - return [join_path(dp, f) for dp, _, files in os.walk(filepath) for f in files if matches(f)] + filepath = os.path.join(os.path.join(ROOT_DIR, "config_app/static/"), path) + return [ + join_path(dp, f) + for dp, _, files in os.walk(filepath) + for f in files + if matches(f) + ] -FONT_AWESOME_4 = 'netdna.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.css' +FONT_AWESOME_4 = "netdna.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.css" -def render_page_template(name, route_data=None, js_bundle_name=DEFAULT_JS_BUNDLE_NAME, **kwargs): - """ Renders the page template with the given name as the response and returns its contents. """ - main_scripts = _list_files('build', 'js', js_bundle_name) +def render_page_template( + name, route_data=None, js_bundle_name=DEFAULT_JS_BUNDLE_NAME, **kwargs +): + """ Renders the page template with the given name as the response and returns its contents. """ + main_scripts = _list_files("build", "js", js_bundle_name) - use_cdn = os.getenv('TESTING') == 'true' + use_cdn = os.getenv("TESTING") == "true" - external_styles = get_external_css(local=not use_cdn, exclude=FONT_AWESOME_4) - external_scripts = get_external_javascript(local=not use_cdn) + external_styles = get_external_css(local=not use_cdn, exclude=FONT_AWESOME_4) + external_scripts = get_external_javascript(local=not use_cdn) - contents = render_template(name, - route_data=route_data, - main_scripts=main_scripts, - external_styles=external_styles, - external_scripts=external_scripts, - config_set=frontend_visible_config(app.config), - kubernetes_namespace=IS_KUBERNETES and get_k8s_namespace(), - **kwargs) + contents = render_template( + name, + route_data=route_data, + main_scripts=main_scripts, + external_styles=external_styles, + external_scripts=external_scripts, + config_set=frontend_visible_config(app.config), + kubernetes_namespace=IS_KUBERNETES and get_k8s_namespace(), + **kwargs + ) - resp = make_response(contents) - resp.headers['X-FRAME-OPTIONS'] = 'DENY' - return resp + resp = make_response(contents) + resp.headers["X-FRAME-OPTIONS"] = "DENY" + return resp def fully_qualified_name(method_view_class): - return '%s.%s' % (method_view_class.__module__, method_view_class.__name__) + return "%s.%s" % (method_view_class.__module__, method_view_class.__name__) diff --git a/config_app/config_endpoints/exception.py b/config_app/config_endpoints/exception.py index 7f7f75a41..03e29fba9 100644 --- a/config_app/config_endpoints/exception.py +++ b/config_app/config_endpoints/exception.py @@ -5,11 +5,11 @@ from werkzeug.exceptions import HTTPException class ApiErrorType(Enum): - invalid_request = 'invalid_request' + invalid_request = "invalid_request" class ApiException(HTTPException): - """ + """ Represents an error in the application/problem+json format. See: https://tools.ietf.org/html/rfc7807 @@ -31,36 +31,42 @@ class ApiException(HTTPException): information if dereferenced. """ - def __init__(self, error_type, status_code, error_description, payload=None): - Exception.__init__(self) - self.error_description = error_description - self.code = status_code - self.payload = payload - self.error_type = error_type - self.data = self.to_dict() + def __init__(self, error_type, status_code, error_description, payload=None): + Exception.__init__(self) + self.error_description = error_description + self.code = status_code + self.payload = payload + self.error_type = error_type + self.data = self.to_dict() - super(ApiException, self).__init__(error_description, None) + super(ApiException, self).__init__(error_description, None) - def to_dict(self): - rv = dict(self.payload or ()) + def to_dict(self): + rv = dict(self.payload or ()) - if self.error_description is not None: - rv['detail'] = self.error_description - rv['error_message'] = self.error_description # TODO: deprecate + if self.error_description is not None: + rv["detail"] = self.error_description + rv["error_message"] = self.error_description # TODO: deprecate - rv['error_type'] = self.error_type.value # TODO: deprecate - rv['title'] = self.error_type.value - rv['type'] = url_for('api.error', error_type=self.error_type.value, _external=True) - rv['status'] = self.code + rv["error_type"] = self.error_type.value # TODO: deprecate + rv["title"] = self.error_type.value + rv["type"] = url_for( + "api.error", error_type=self.error_type.value, _external=True + ) + rv["status"] = self.code - return rv + return rv class InvalidRequest(ApiException): - def __init__(self, error_description, payload=None): - ApiException.__init__(self, ApiErrorType.invalid_request, 400, error_description, payload) + def __init__(self, error_description, payload=None): + ApiException.__init__( + self, ApiErrorType.invalid_request, 400, error_description, payload + ) class InvalidResponse(ApiException): - def __init__(self, error_description, payload=None): - ApiException.__init__(self, ApiErrorType.invalid_response, 400, error_description, payload) + def __init__(self, error_description, payload=None): + ApiException.__init__( + self, ApiErrorType.invalid_response, 400, error_description, payload + ) diff --git a/config_app/config_endpoints/setup_web.py b/config_app/config_endpoints/setup_web.py index 32dda15e2..8db100705 100644 --- a/config_app/config_endpoints/setup_web.py +++ b/config_app/config_endpoints/setup_web.py @@ -5,19 +5,21 @@ from config_app.config_endpoints.common import render_page_template from config_app.config_endpoints.api.discovery import generate_route_data from config_app.config_endpoints.api import no_cache -setup_web = Blueprint('setup_web', __name__, template_folder='templates') +setup_web = Blueprint("setup_web", __name__, template_folder="templates") @lru_cache(maxsize=1) def _get_route_data(): - return generate_route_data() + return generate_route_data() def render_page_template_with_routedata(name, *args, **kwargs): - return render_page_template(name, _get_route_data(), *args, **kwargs) + return render_page_template(name, _get_route_data(), *args, **kwargs) @no_cache -@setup_web.route('/', methods=['GET'], defaults={'path': ''}) +@setup_web.route("/", methods=["GET"], defaults={"path": ""}) def index(path, **kwargs): - return render_page_template_with_routedata('index.html', js_bundle_name='configapp', **kwargs) + return render_page_template_with_routedata( + "index.html", js_bundle_name="configapp", **kwargs + ) diff --git a/config_app/config_test/test_api_usage.py b/config_app/config_test/test_api_usage.py index aa34b3495..4816e0aa5 100644 --- a/config_app/config_test/test_api_usage.py +++ b/config_app/config_test/test_api_usage.py @@ -5,204 +5,242 @@ from data import database, model from util.security.test.test_ssl_util import generate_test_cert from config_app.c_app import app -from config_app.config_test import ApiTestCase, all_queues, ADMIN_ACCESS_USER, ADMIN_ACCESS_EMAIL +from config_app.config_test import ( + ApiTestCase, + all_queues, + ADMIN_ACCESS_USER, + ADMIN_ACCESS_EMAIL, +) from config_app.config_endpoints.api import api_bp -from config_app.config_endpoints.api.superuser import SuperUserCustomCertificate, SuperUserCustomCertificates -from config_app.config_endpoints.api.suconfig import SuperUserConfig, SuperUserCreateInitialSuperUser, \ - SuperUserConfigFile, SuperUserRegistryStatus +from config_app.config_endpoints.api.superuser import ( + SuperUserCustomCertificate, + SuperUserCustomCertificates, +) +from config_app.config_endpoints.api.suconfig import ( + SuperUserConfig, + SuperUserCreateInitialSuperUser, + SuperUserConfigFile, + SuperUserRegistryStatus, +) try: - app.register_blueprint(api_bp, url_prefix='/api') + app.register_blueprint(api_bp, url_prefix="/api") except ValueError: - # This blueprint was already registered - pass + # This blueprint was already registered + pass class TestSuperUserCreateInitialSuperUser(ApiTestCase): - def test_create_superuser(self): - data = { - 'username': 'newsuper', - 'password': 'password', - 'email': 'jschorr+fake@devtable.com', - } + def test_create_superuser(self): + data = { + "username": "newsuper", + "password": "password", + "email": "jschorr+fake@devtable.com", + } - # Add some fake config. - fake_config = { - 'AUTHENTICATION_TYPE': 'Database', - 'SECRET_KEY': 'fakekey', - } + # Add some fake config. + fake_config = {"AUTHENTICATION_TYPE": "Database", "SECRET_KEY": "fakekey"} - self.putJsonResponse(SuperUserConfig, data=dict(config=fake_config, hostname='fakehost')) + self.putJsonResponse( + SuperUserConfig, data=dict(config=fake_config, hostname="fakehost") + ) - # Try to write with config. Should 403 since there are users in the DB. - self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) + # Try to write with config. Should 403 since there are users in the DB. + self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) - # Delete all users in the DB. - for user in list(database.User.select()): - model.user.delete_user(user, all_queues) + # Delete all users in the DB. + for user in list(database.User.select()): + model.user.delete_user(user, all_queues) - # Create the superuser. - self.postJsonResponse(SuperUserCreateInitialSuperUser, data=data) + # Create the superuser. + self.postJsonResponse(SuperUserCreateInitialSuperUser, data=data) - # Ensure the user exists in the DB. - self.assertIsNotNone(model.user.get_user('newsuper')) + # Ensure the user exists in the DB. + self.assertIsNotNone(model.user.get_user("newsuper")) - # Ensure that the current user is a superuser in the config. - json = self.getJsonResponse(SuperUserConfig) - self.assertEquals(['newsuper'], json['config']['SUPER_USERS']) + # Ensure that the current user is a superuser in the config. + json = self.getJsonResponse(SuperUserConfig) + self.assertEquals(["newsuper"], json["config"]["SUPER_USERS"]) - # Ensure that the current user is a superuser in memory by trying to call an API - # that will fail otherwise. - self.getResponse(SuperUserConfigFile, params=dict(filename='ssl.cert')) + # Ensure that the current user is a superuser in memory by trying to call an API + # that will fail otherwise. + self.getResponse(SuperUserConfigFile, params=dict(filename="ssl.cert")) class TestSuperUserConfig(ApiTestCase): - def test_get_status_update_config(self): - # With no config the status should be 'config-db'. - json = self.getJsonResponse(SuperUserRegistryStatus) - self.assertEquals('config-db', json['status']) + def test_get_status_update_config(self): + # With no config the status should be 'config-db'. + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertEquals("config-db", json["status"]) - # Add some fake config. - fake_config = { - 'AUTHENTICATION_TYPE': 'Database', - 'SECRET_KEY': 'fakekey', - } + # Add some fake config. + fake_config = {"AUTHENTICATION_TYPE": "Database", "SECRET_KEY": "fakekey"} - json = self.putJsonResponse(SuperUserConfig, data=dict(config=fake_config, - hostname='fakehost')) - self.assertEquals('fakekey', json['config']['SECRET_KEY']) - self.assertEquals('fakehost', json['config']['SERVER_HOSTNAME']) - self.assertEquals('Database', json['config']['AUTHENTICATION_TYPE']) + json = self.putJsonResponse( + SuperUserConfig, data=dict(config=fake_config, hostname="fakehost") + ) + self.assertEquals("fakekey", json["config"]["SECRET_KEY"]) + self.assertEquals("fakehost", json["config"]["SERVER_HOSTNAME"]) + self.assertEquals("Database", json["config"]["AUTHENTICATION_TYPE"]) - # With config the status should be 'setup-db'. - # TODO: fix this test - # json = self.getJsonResponse(SuperUserRegistryStatus) - # self.assertEquals('setup-db', json['status']) + # With config the status should be 'setup-db'. + # TODO: fix this test + # json = self.getJsonResponse(SuperUserRegistryStatus) + # self.assertEquals('setup-db', json['status']) - def test_config_file(self): - # Try for an invalid file. Should 404. - self.getResponse(SuperUserConfigFile, params=dict(filename='foobar'), expected_code=404) + def test_config_file(self): + # Try for an invalid file. Should 404. + self.getResponse( + SuperUserConfigFile, params=dict(filename="foobar"), expected_code=404 + ) - # Try for a valid filename. Should not exist. - json = self.getJsonResponse(SuperUserConfigFile, params=dict(filename='ssl.cert')) - self.assertFalse(json['exists']) + # Try for a valid filename. Should not exist. + json = self.getJsonResponse( + SuperUserConfigFile, params=dict(filename="ssl.cert") + ) + self.assertFalse(json["exists"]) - # Add the file. - self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), - file=(StringIO('my file contents'), 'ssl.cert')) + # Add the file. + self.postResponse( + SuperUserConfigFile, + params=dict(filename="ssl.cert"), + file=(StringIO("my file contents"), "ssl.cert"), + ) - # Should now exist. - json = self.getJsonResponse(SuperUserConfigFile, params=dict(filename='ssl.cert')) - self.assertTrue(json['exists']) + # Should now exist. + json = self.getJsonResponse( + SuperUserConfigFile, params=dict(filename="ssl.cert") + ) + self.assertTrue(json["exists"]) - def test_update_with_external_auth(self): - # Run a mock LDAP. - mockldap = MockLdap({ - 'dc=quay,dc=io': { - 'dc': ['quay', 'io'] - }, - 'ou=employees,dc=quay,dc=io': { - 'dc': ['quay', 'io'], - 'ou': 'employees' - }, - 'uid=' + ADMIN_ACCESS_USER + ',ou=employees,dc=quay,dc=io': { - 'dc': ['quay', 'io'], - 'ou': 'employees', - 'uid': [ADMIN_ACCESS_USER], - 'userPassword': ['password'], - 'mail': [ADMIN_ACCESS_EMAIL], - }, - }) + def test_update_with_external_auth(self): + # Run a mock LDAP. + mockldap = MockLdap( + { + "dc=quay,dc=io": {"dc": ["quay", "io"]}, + "ou=employees,dc=quay,dc=io": {"dc": ["quay", "io"], "ou": "employees"}, + "uid=" + + ADMIN_ACCESS_USER + + ",ou=employees,dc=quay,dc=io": { + "dc": ["quay", "io"], + "ou": "employees", + "uid": [ADMIN_ACCESS_USER], + "userPassword": ["password"], + "mail": [ADMIN_ACCESS_EMAIL], + }, + } + ) - config = { - 'AUTHENTICATION_TYPE': 'LDAP', - 'LDAP_BASE_DN': ['dc=quay', 'dc=io'], - 'LDAP_ADMIN_DN': 'uid=devtable,ou=employees,dc=quay,dc=io', - 'LDAP_ADMIN_PASSWD': 'password', - 'LDAP_USER_RDN': ['ou=employees'], - 'LDAP_UID_ATTR': 'uid', - 'LDAP_EMAIL_ATTR': 'mail', - } + config = { + "AUTHENTICATION_TYPE": "LDAP", + "LDAP_BASE_DN": ["dc=quay", "dc=io"], + "LDAP_ADMIN_DN": "uid=devtable,ou=employees,dc=quay,dc=io", + "LDAP_ADMIN_PASSWD": "password", + "LDAP_USER_RDN": ["ou=employees"], + "LDAP_UID_ATTR": "uid", + "LDAP_EMAIL_ATTR": "mail", + } - mockldap.start() - try: - # Write the config with the valid password. - self.putResponse(SuperUserConfig, - data={'config': config, - 'password': 'password', - 'hostname': 'foo'}, expected_code=200) + mockldap.start() + try: + # Write the config with the valid password. + self.putResponse( + SuperUserConfig, + data={"config": config, "password": "password", "hostname": "foo"}, + expected_code=200, + ) + + # Ensure that the user row has been linked. + # TODO: fix this test + # self.assertEquals(ADMIN_ACCESS_USER, + # model.user.verify_federated_login('ldap', ADMIN_ACCESS_USER).username) + finally: + mockldap.stop() - # Ensure that the user row has been linked. - # TODO: fix this test - # self.assertEquals(ADMIN_ACCESS_USER, - # model.user.verify_federated_login('ldap', ADMIN_ACCESS_USER).username) - finally: - mockldap.stop() class TestSuperUserCustomCertificates(ApiTestCase): - def test_custom_certificates(self): + def test_custom_certificates(self): - # Upload a certificate. - cert_contents, _ = generate_test_cert(hostname='somecoolhost', san_list=['DNS:bar', 'DNS:baz']) - self.postResponse(SuperUserCustomCertificate, params=dict(certpath='testcert.crt'), - file=(StringIO(cert_contents), 'testcert.crt'), expected_code=204) + # Upload a certificate. + cert_contents, _ = generate_test_cert( + hostname="somecoolhost", san_list=["DNS:bar", "DNS:baz"] + ) + self.postResponse( + SuperUserCustomCertificate, + params=dict(certpath="testcert.crt"), + file=(StringIO(cert_contents), "testcert.crt"), + expected_code=204, + ) - # Make sure it is present. - json = self.getJsonResponse(SuperUserCustomCertificates) - self.assertEquals(1, len(json['certs'])) + # Make sure it is present. + json = self.getJsonResponse(SuperUserCustomCertificates) + self.assertEquals(1, len(json["certs"])) - cert_info = json['certs'][0] - self.assertEquals('testcert.crt', cert_info['path']) + cert_info = json["certs"][0] + self.assertEquals("testcert.crt", cert_info["path"]) - self.assertEquals(set(['somecoolhost', 'bar', 'baz']), set(cert_info['names'])) - self.assertFalse(cert_info['expired']) + self.assertEquals(set(["somecoolhost", "bar", "baz"]), set(cert_info["names"])) + self.assertFalse(cert_info["expired"]) - # Remove the certificate. - self.deleteResponse(SuperUserCustomCertificate, params=dict(certpath='testcert.crt')) + # Remove the certificate. + self.deleteResponse( + SuperUserCustomCertificate, params=dict(certpath="testcert.crt") + ) - # Make sure it is gone. - json = self.getJsonResponse(SuperUserCustomCertificates) - self.assertEquals(0, len(json['certs'])) + # Make sure it is gone. + json = self.getJsonResponse(SuperUserCustomCertificates) + self.assertEquals(0, len(json["certs"])) - def test_expired_custom_certificate(self): - # Upload a certificate. - cert_contents, _ = generate_test_cert(hostname='somecoolhost', expires=-10) - self.postResponse(SuperUserCustomCertificate, params=dict(certpath='testcert.crt'), - file=(StringIO(cert_contents), 'testcert.crt'), expected_code=204) + def test_expired_custom_certificate(self): + # Upload a certificate. + cert_contents, _ = generate_test_cert(hostname="somecoolhost", expires=-10) + self.postResponse( + SuperUserCustomCertificate, + params=dict(certpath="testcert.crt"), + file=(StringIO(cert_contents), "testcert.crt"), + expected_code=204, + ) - # Make sure it is present. - json = self.getJsonResponse(SuperUserCustomCertificates) - self.assertEquals(1, len(json['certs'])) + # Make sure it is present. + json = self.getJsonResponse(SuperUserCustomCertificates) + self.assertEquals(1, len(json["certs"])) - cert_info = json['certs'][0] - self.assertEquals('testcert.crt', cert_info['path']) + cert_info = json["certs"][0] + self.assertEquals("testcert.crt", cert_info["path"]) - self.assertEquals(set(['somecoolhost']), set(cert_info['names'])) - self.assertTrue(cert_info['expired']) + self.assertEquals(set(["somecoolhost"]), set(cert_info["names"])) + self.assertTrue(cert_info["expired"]) - def test_invalid_custom_certificate(self): - # Upload an invalid certificate. - self.postResponse(SuperUserCustomCertificate, params=dict(certpath='testcert.crt'), - file=(StringIO('some contents'), 'testcert.crt'), expected_code=204) + def test_invalid_custom_certificate(self): + # Upload an invalid certificate. + self.postResponse( + SuperUserCustomCertificate, + params=dict(certpath="testcert.crt"), + file=(StringIO("some contents"), "testcert.crt"), + expected_code=204, + ) - # Make sure it is present but invalid. - json = self.getJsonResponse(SuperUserCustomCertificates) - self.assertEquals(1, len(json['certs'])) + # Make sure it is present but invalid. + json = self.getJsonResponse(SuperUserCustomCertificates) + self.assertEquals(1, len(json["certs"])) - cert_info = json['certs'][0] - self.assertEquals('testcert.crt', cert_info['path']) - self.assertEquals('no start line', cert_info['error']) + cert_info = json["certs"][0] + self.assertEquals("testcert.crt", cert_info["path"]) + self.assertEquals("no start line", cert_info["error"]) - def test_path_sanitization(self): - # Upload a certificate. - cert_contents, _ = generate_test_cert(hostname='somecoolhost', expires=-10) - self.postResponse(SuperUserCustomCertificate, params=dict(certpath='testcert/../foobar.crt'), - file=(StringIO(cert_contents), 'testcert/../foobar.crt'), expected_code=204) + def test_path_sanitization(self): + # Upload a certificate. + cert_contents, _ = generate_test_cert(hostname="somecoolhost", expires=-10) + self.postResponse( + SuperUserCustomCertificate, + params=dict(certpath="testcert/../foobar.crt"), + file=(StringIO(cert_contents), "testcert/../foobar.crt"), + expected_code=204, + ) - # Make sure it is present. - json = self.getJsonResponse(SuperUserCustomCertificates) - self.assertEquals(1, len(json['certs'])) - - cert_info = json['certs'][0] - self.assertEquals('foobar.crt', cert_info['path']) + # Make sure it is present. + json = self.getJsonResponse(SuperUserCustomCertificates) + self.assertEquals(1, len(json["certs"])) + cert_info = json["certs"][0] + self.assertEquals("foobar.crt", cert_info["path"]) diff --git a/config_app/config_test/test_suconfig_api.py b/config_app/config_test/test_suconfig_api.py index 408b96a8b..a805e6421 100644 --- a/config_app/config_test/test_suconfig_api.py +++ b/config_app/config_test/test_suconfig_api.py @@ -4,176 +4,235 @@ import mock from data.database import User from data import model -from config_app.config_endpoints.api.suconfig import SuperUserConfig, SuperUserConfigValidate, SuperUserConfigFile, \ - SuperUserRegistryStatus, SuperUserCreateInitialSuperUser +from config_app.config_endpoints.api.suconfig import ( + SuperUserConfig, + SuperUserConfigValidate, + SuperUserConfigFile, + SuperUserRegistryStatus, + SuperUserCreateInitialSuperUser, +) from config_app.config_endpoints.api import api_bp from config_app.config_test import ApiTestCase, READ_ACCESS_USER, ADMIN_ACCESS_USER from config_app.c_app import app, config_provider try: - app.register_blueprint(api_bp, url_prefix='/api') + app.register_blueprint(api_bp, url_prefix="/api") except ValueError: - # This blueprint was already registered - pass + # This blueprint was already registered + pass # OVERRIDES FROM PORTING FROM OLD APP: -all_queues = [] # the config app doesn't have any queues +all_queues = [] # the config app doesn't have any queues + class FreshConfigProvider(object): - def __enter__(self): - config_provider.reset_for_test() - return config_provider + def __enter__(self): + config_provider.reset_for_test() + return config_provider - def __exit__(self, type, value, traceback): - config_provider.reset_for_test() + def __exit__(self, type, value, traceback): + config_provider.reset_for_test() class TestSuperUserRegistryStatus(ApiTestCase): - def test_registry_status_no_config(self): - with FreshConfigProvider(): - json = self.getJsonResponse(SuperUserRegistryStatus) - self.assertEquals('config-db', json['status']) + def test_registry_status_no_config(self): + with FreshConfigProvider(): + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertEquals("config-db", json["status"]) - @mock.patch("config_app.config_endpoints.api.suconfig.database_is_valid", mock.Mock(return_value=False)) - def test_registry_status_no_database(self): - with FreshConfigProvider(): - config_provider.save_config({'key': 'value'}) - json = self.getJsonResponse(SuperUserRegistryStatus) - self.assertEquals('setup-db', json['status']) + @mock.patch( + "config_app.config_endpoints.api.suconfig.database_is_valid", + mock.Mock(return_value=False), + ) + def test_registry_status_no_database(self): + with FreshConfigProvider(): + config_provider.save_config({"key": "value"}) + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertEquals("setup-db", json["status"]) - @mock.patch("config_app.config_endpoints.api.suconfig.database_is_valid", mock.Mock(return_value=True)) - def test_registry_status_db_has_superuser(self): - with FreshConfigProvider(): - config_provider.save_config({'key': 'value'}) - json = self.getJsonResponse(SuperUserRegistryStatus) - self.assertEquals('config', json['status']) + @mock.patch( + "config_app.config_endpoints.api.suconfig.database_is_valid", + mock.Mock(return_value=True), + ) + def test_registry_status_db_has_superuser(self): + with FreshConfigProvider(): + config_provider.save_config({"key": "value"}) + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertEquals("config", json["status"]) - @mock.patch("config_app.config_endpoints.api.suconfig.database_is_valid", mock.Mock(return_value=True)) - @mock.patch("config_app.config_endpoints.api.suconfig.database_has_users", mock.Mock(return_value=False)) - def test_registry_status_db_no_superuser(self): - with FreshConfigProvider(): - config_provider.save_config({'key': 'value'}) - json = self.getJsonResponse(SuperUserRegistryStatus) - self.assertEquals('create-superuser', json['status']) + @mock.patch( + "config_app.config_endpoints.api.suconfig.database_is_valid", + mock.Mock(return_value=True), + ) + @mock.patch( + "config_app.config_endpoints.api.suconfig.database_has_users", + mock.Mock(return_value=False), + ) + def test_registry_status_db_no_superuser(self): + with FreshConfigProvider(): + config_provider.save_config({"key": "value"}) + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertEquals("create-superuser", json["status"]) + + @mock.patch( + "config_app.config_endpoints.api.suconfig.database_is_valid", + mock.Mock(return_value=True), + ) + @mock.patch( + "config_app.config_endpoints.api.suconfig.database_has_users", + mock.Mock(return_value=True), + ) + def test_registry_status_setup_complete(self): + with FreshConfigProvider(): + config_provider.save_config({"key": "value", "SETUP_COMPLETE": True}) + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertEquals("config", json["status"]) - @mock.patch("config_app.config_endpoints.api.suconfig.database_is_valid", mock.Mock(return_value=True)) - @mock.patch("config_app.config_endpoints.api.suconfig.database_has_users", mock.Mock(return_value=True)) - def test_registry_status_setup_complete(self): - with FreshConfigProvider(): - config_provider.save_config({'key': 'value', 'SETUP_COMPLETE': True}) - json = self.getJsonResponse(SuperUserRegistryStatus) - self.assertEquals('config', json['status']) class TestSuperUserConfigFile(ApiTestCase): - def test_get_superuser_invalid_filename(self): - with FreshConfigProvider(): - self.getResponse(SuperUserConfigFile, params=dict(filename='somefile'), expected_code=404) + def test_get_superuser_invalid_filename(self): + with FreshConfigProvider(): + self.getResponse( + SuperUserConfigFile, params=dict(filename="somefile"), expected_code=404 + ) - def test_get_superuser(self): - with FreshConfigProvider(): - result = self.getJsonResponse(SuperUserConfigFile, params=dict(filename='ssl.cert')) - self.assertFalse(result['exists']) + def test_get_superuser(self): + with FreshConfigProvider(): + result = self.getJsonResponse( + SuperUserConfigFile, params=dict(filename="ssl.cert") + ) + self.assertFalse(result["exists"]) - def test_post_no_file(self): - with FreshConfigProvider(): - # No file - self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=400) + def test_post_no_file(self): + with FreshConfigProvider(): + # No file + self.postResponse( + SuperUserConfigFile, params=dict(filename="ssl.cert"), expected_code=400 + ) - def test_post_superuser_invalid_filename(self): - with FreshConfigProvider(): - self.postResponse(SuperUserConfigFile, params=dict(filename='somefile'), expected_code=404) + def test_post_superuser_invalid_filename(self): + with FreshConfigProvider(): + self.postResponse( + SuperUserConfigFile, params=dict(filename="somefile"), expected_code=404 + ) - def test_post_superuser(self): - with FreshConfigProvider(): - self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=400) + def test_post_superuser(self): + with FreshConfigProvider(): + self.postResponse( + SuperUserConfigFile, params=dict(filename="ssl.cert"), expected_code=400 + ) class TestSuperUserCreateInitialSuperUser(ApiTestCase): - def test_no_config_file(self): - with FreshConfigProvider(): - # If there is no config.yaml, then this method should security fail. - data = dict(username='cooluser', password='password', email='fake@example.com') - self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) + def test_no_config_file(self): + with FreshConfigProvider(): + # If there is no config.yaml, then this method should security fail. + data = dict( + username="cooluser", password="password", email="fake@example.com" + ) + self.postResponse( + SuperUserCreateInitialSuperUser, data=data, expected_code=403 + ) - def test_config_file_with_db_users(self): - with FreshConfigProvider(): - # Write some config. - self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) + def test_config_file_with_db_users(self): + with FreshConfigProvider(): + # Write some config. + self.putJsonResponse( + SuperUserConfig, data=dict(config={}, hostname="foobar") + ) - # If there is a config.yaml, but existing DB users exist, then this method should security - # fail. - data = dict(username='cooluser', password='password', email='fake@example.com') - self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) + # If there is a config.yaml, but existing DB users exist, then this method should security + # fail. + data = dict( + username="cooluser", password="password", email="fake@example.com" + ) + self.postResponse( + SuperUserCreateInitialSuperUser, data=data, expected_code=403 + ) - def test_config_file_with_no_db_users(self): - with FreshConfigProvider(): - # Write some config. - self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) + def test_config_file_with_no_db_users(self): + with FreshConfigProvider(): + # Write some config. + self.putJsonResponse( + SuperUserConfig, data=dict(config={}, hostname="foobar") + ) - # Delete all the users in the DB. - for user in list(User.select()): - model.user.delete_user(user, all_queues) + # Delete all the users in the DB. + for user in list(User.select()): + model.user.delete_user(user, all_queues) - # This method should now succeed. - data = dict(username='cooluser', password='password', email='fake@example.com') - result = self.postJsonResponse(SuperUserCreateInitialSuperUser, data=data) - self.assertTrue(result['status']) + # This method should now succeed. + data = dict( + username="cooluser", password="password", email="fake@example.com" + ) + result = self.postJsonResponse(SuperUserCreateInitialSuperUser, data=data) + self.assertTrue(result["status"]) - # Verify the superuser was created. - User.get(User.username == 'cooluser') + # Verify the superuser was created. + User.get(User.username == "cooluser") - # Verify the superuser was placed into the config. - result = self.getJsonResponse(SuperUserConfig) - self.assertEquals(['cooluser'], result['config']['SUPER_USERS']) + # Verify the superuser was placed into the config. + result = self.getJsonResponse(SuperUserConfig) + self.assertEquals(["cooluser"], result["config"]["SUPER_USERS"]) class TestSuperUserConfigValidate(ApiTestCase): - def test_nonsuperuser_noconfig(self): - with FreshConfigProvider(): - result = self.postJsonResponse(SuperUserConfigValidate, params=dict(service='someservice'), - data=dict(config={})) + def test_nonsuperuser_noconfig(self): + with FreshConfigProvider(): + result = self.postJsonResponse( + SuperUserConfigValidate, + params=dict(service="someservice"), + data=dict(config={}), + ) - self.assertFalse(result['status']) + self.assertFalse(result["status"]) + def test_nonsuperuser_config(self): + with FreshConfigProvider(): + # The validate config call works if there is no config.yaml OR the user is a superuser. + # Add a config, and verify it breaks when unauthenticated. + json = self.putJsonResponse( + SuperUserConfig, data=dict(config={}, hostname="foobar") + ) + self.assertTrue(json["exists"]) - def test_nonsuperuser_config(self): - with FreshConfigProvider(): - # The validate config call works if there is no config.yaml OR the user is a superuser. - # Add a config, and verify it breaks when unauthenticated. - json = self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) - self.assertTrue(json['exists']) + result = self.postJsonResponse( + SuperUserConfigValidate, + params=dict(service="someservice"), + data=dict(config={}), + ) - - result = self.postJsonResponse(SuperUserConfigValidate, params=dict(service='someservice'), - data=dict(config={})) - - self.assertFalse(result['status']) + self.assertFalse(result["status"]) class TestSuperUserConfig(ApiTestCase): - def test_get_superuser(self): - with FreshConfigProvider(): - json = self.getJsonResponse(SuperUserConfig) + def test_get_superuser(self): + with FreshConfigProvider(): + json = self.getJsonResponse(SuperUserConfig) - # Note: We expect the config to be none because a config.yaml should never be checked into - # the directory. - self.assertIsNone(json['config']) + # Note: We expect the config to be none because a config.yaml should never be checked into + # the directory. + self.assertIsNone(json["config"]) - def test_put(self): - with FreshConfigProvider() as config: - json = self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) - self.assertTrue(json['exists']) + def test_put(self): + with FreshConfigProvider() as config: + json = self.putJsonResponse( + SuperUserConfig, data=dict(config={}, hostname="foobar") + ) + self.assertTrue(json["exists"]) - # Verify the config file exists. - self.assertTrue(config.config_exists()) + # Verify the config file exists. + self.assertTrue(config.config_exists()) - # This should succeed. - json = self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='barbaz')) - self.assertTrue(json['exists']) + # This should succeed. + json = self.putJsonResponse( + SuperUserConfig, data=dict(config={}, hostname="barbaz") + ) + self.assertTrue(json["exists"]) - json = self.getJsonResponse(SuperUserConfig) - self.assertIsNotNone(json['config']) + json = self.getJsonResponse(SuperUserConfig) + self.assertIsNotNone(json["config"]) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/config_app/config_util/config/TransientDirectoryProvider.py b/config_app/config_util/config/TransientDirectoryProvider.py index 5ac685592..a8be2710d 100644 --- a/config_app/config_util/config/TransientDirectoryProvider.py +++ b/config_app/config_util/config/TransientDirectoryProvider.py @@ -5,58 +5,63 @@ from backports.tempfile import TemporaryDirectory from config_app.config_util.config.fileprovider import FileConfigProvider -OLD_CONFIG_SUBDIR = 'old/' +OLD_CONFIG_SUBDIR = "old/" + class TransientDirectoryProvider(FileConfigProvider): - """ Implementation of the config provider that reads and writes the data + """ Implementation of the config provider that reads and writes the data from/to the file system, only using temporary directories, deleting old dirs and creating new ones as requested. """ - def __init__(self, config_volume, yaml_filename, py_filename): - # Create a temp directory that will be cleaned up when we change the config path - # This should ensure we have no "pollution" of different configs: - # no uploaded config should ever affect subsequent config modifications/creations - temp_dir = TemporaryDirectory() - self.temp_dir = temp_dir - self.old_config_dir = None - super(TransientDirectoryProvider, self).__init__(temp_dir.name, yaml_filename, py_filename) + def __init__(self, config_volume, yaml_filename, py_filename): + # Create a temp directory that will be cleaned up when we change the config path + # This should ensure we have no "pollution" of different configs: + # no uploaded config should ever affect subsequent config modifications/creations + temp_dir = TemporaryDirectory() + self.temp_dir = temp_dir + self.old_config_dir = None + super(TransientDirectoryProvider, self).__init__( + temp_dir.name, yaml_filename, py_filename + ) - @property - def provider_id(self): - return 'transient' + @property + def provider_id(self): + return "transient" - def new_config_dir(self): - """ + def new_config_dir(self): + """ Update the path with a new temporary directory, deleting the old one in the process """ - self.temp_dir.cleanup() - temp_dir = TemporaryDirectory() + self.temp_dir.cleanup() + temp_dir = TemporaryDirectory() - self.config_volume = temp_dir.name - self.temp_dir = temp_dir - self.yaml_path = os.path.join(temp_dir.name, self.yaml_filename) + self.config_volume = temp_dir.name + self.temp_dir = temp_dir + self.yaml_path = os.path.join(temp_dir.name, self.yaml_filename) - def create_copy_of_config_dir(self): - """ + def create_copy_of_config_dir(self): + """ Create a directory to store loaded/populated configuration (for rollback if necessary) """ - if self.old_config_dir is not None: - self.old_config_dir.cleanup() + if self.old_config_dir is not None: + self.old_config_dir.cleanup() - temp_dir = TemporaryDirectory() - self.old_config_dir = temp_dir + temp_dir = TemporaryDirectory() + self.old_config_dir = temp_dir - # Python 2.7's shutil.copy() doesn't allow for copying to existing directories, - # so when copying/reading to the old saved config, we have to talk to a subdirectory, - # and use the shutil.copytree() function - copytree(self.config_volume, os.path.join(temp_dir.name, OLD_CONFIG_SUBDIR)) + # Python 2.7's shutil.copy() doesn't allow for copying to existing directories, + # so when copying/reading to the old saved config, we have to talk to a subdirectory, + # and use the shutil.copytree() function + copytree(self.config_volume, os.path.join(temp_dir.name, OLD_CONFIG_SUBDIR)) - def get_config_dir_path(self): - return self.config_volume + def get_config_dir_path(self): + return self.config_volume - def get_old_config_dir(self): - if self.old_config_dir is None: - raise Exception('Cannot return a configuration that was no old configuration') + def get_old_config_dir(self): + if self.old_config_dir is None: + raise Exception( + "Cannot return a configuration that was no old configuration" + ) - return os.path.join(self.old_config_dir.name, OLD_CONFIG_SUBDIR) + return os.path.join(self.old_config_dir.name, OLD_CONFIG_SUBDIR) diff --git a/config_app/config_util/config/__init__.py b/config_app/config_util/config/__init__.py index d39d0ea1c..7429d17cf 100644 --- a/config_app/config_util/config/__init__.py +++ b/config_app/config_util/config/__init__.py @@ -3,37 +3,40 @@ import os from config_app.config_util.config.fileprovider import FileConfigProvider from config_app.config_util.config.testprovider import TestConfigProvider -from config_app.config_util.config.TransientDirectoryProvider import TransientDirectoryProvider +from config_app.config_util.config.TransientDirectoryProvider import ( + TransientDirectoryProvider, +) from util.config.validator import EXTRA_CA_DIRECTORY, EXTRA_CA_DIRECTORY_PREFIX def get_config_provider(config_volume, yaml_filename, py_filename, testing=False): - """ Loads and returns the config provider for the current environment. """ + """ Loads and returns the config provider for the current environment. """ - if testing: - return TestConfigProvider() + if testing: + return TestConfigProvider() - return TransientDirectoryProvider(config_volume, yaml_filename, py_filename) + return TransientDirectoryProvider(config_volume, yaml_filename, py_filename) def get_config_as_kube_secret(config_path): - data = {} + data = {} - # Kubernetes secrets don't have sub-directories, so for the extra_ca_certs dir - # we have to put the extra certs in with a prefix, and then one of our init scripts - # (02_get_kube_certs.sh) will expand the prefixed certs into the equivalent directory - # so that they'll be installed correctly on startup by the certs_install script - certs_dir = os.path.join(config_path, EXTRA_CA_DIRECTORY) - if os.path.exists(certs_dir): - for extra_cert in os.listdir(certs_dir): - with open(os.path.join(certs_dir, extra_cert)) as f: - data[EXTRA_CA_DIRECTORY_PREFIX + extra_cert] = base64.b64encode(f.read()) + # Kubernetes secrets don't have sub-directories, so for the extra_ca_certs dir + # we have to put the extra certs in with a prefix, and then one of our init scripts + # (02_get_kube_certs.sh) will expand the prefixed certs into the equivalent directory + # so that they'll be installed correctly on startup by the certs_install script + certs_dir = os.path.join(config_path, EXTRA_CA_DIRECTORY) + if os.path.exists(certs_dir): + for extra_cert in os.listdir(certs_dir): + with open(os.path.join(certs_dir, extra_cert)) as f: + data[EXTRA_CA_DIRECTORY_PREFIX + extra_cert] = base64.b64encode( + f.read() + ) + for name in os.listdir(config_path): + file_path = os.path.join(config_path, name) + if not os.path.isdir(file_path): + with open(file_path) as f: + data[name] = base64.b64encode(f.read()) - for name in os.listdir(config_path): - file_path = os.path.join(config_path, name) - if not os.path.isdir(file_path): - with open(file_path) as f: - data[name] = base64.b64encode(f.read()) - - return data + return data diff --git a/config_app/config_util/config/basefileprovider.py b/config_app/config_util/config/basefileprovider.py index caf231321..ac78000d9 100644 --- a/config_app/config_util/config/basefileprovider.py +++ b/config_app/config_util/config/basefileprovider.py @@ -1,72 +1,76 @@ import os import logging -from config_app.config_util.config.baseprovider import (BaseProvider, import_yaml, export_yaml, - CannotWriteConfigException) +from config_app.config_util.config.baseprovider import ( + BaseProvider, + import_yaml, + export_yaml, + CannotWriteConfigException, +) logger = logging.getLogger(__name__) class BaseFileProvider(BaseProvider): - """ Base implementation of the config provider that reads the data from the file system. """ + """ Base implementation of the config provider that reads the data from the file system. """ - def __init__(self, config_volume, yaml_filename, py_filename): - self.config_volume = config_volume - self.yaml_filename = yaml_filename - self.py_filename = py_filename + def __init__(self, config_volume, yaml_filename, py_filename): + self.config_volume = config_volume + self.yaml_filename = yaml_filename + self.py_filename = py_filename - self.yaml_path = os.path.join(config_volume, yaml_filename) - self.py_path = os.path.join(config_volume, py_filename) + self.yaml_path = os.path.join(config_volume, yaml_filename) + self.py_path = os.path.join(config_volume, py_filename) - def update_app_config(self, app_config): - if os.path.exists(self.py_path): - logger.debug('Applying config file: %s', self.py_path) - app_config.from_pyfile(self.py_path) + def update_app_config(self, app_config): + if os.path.exists(self.py_path): + logger.debug("Applying config file: %s", self.py_path) + app_config.from_pyfile(self.py_path) - if os.path.exists(self.yaml_path): - logger.debug('Applying config file: %s', self.yaml_path) - import_yaml(app_config, self.yaml_path) + if os.path.exists(self.yaml_path): + logger.debug("Applying config file: %s", self.yaml_path) + import_yaml(app_config, self.yaml_path) - def get_config(self): - if not self.config_exists(): - return None + def get_config(self): + if not self.config_exists(): + return None - config_obj = {} - import_yaml(config_obj, self.yaml_path) - return config_obj + config_obj = {} + import_yaml(config_obj, self.yaml_path) + return config_obj - def config_exists(self): - return self.volume_file_exists(self.yaml_filename) + def config_exists(self): + return self.volume_file_exists(self.yaml_filename) - def volume_exists(self): - return os.path.exists(self.config_volume) + def volume_exists(self): + return os.path.exists(self.config_volume) - def volume_file_exists(self, filename): - return os.path.exists(os.path.join(self.config_volume, filename)) + def volume_file_exists(self, filename): + return os.path.exists(os.path.join(self.config_volume, filename)) - def get_volume_file(self, filename, mode='r'): - return open(os.path.join(self.config_volume, filename), mode=mode) + def get_volume_file(self, filename, mode="r"): + return open(os.path.join(self.config_volume, filename), mode=mode) - def get_volume_path(self, directory, filename): - return os.path.join(directory, filename) + def get_volume_path(self, directory, filename): + return os.path.join(directory, filename) - def list_volume_directory(self, path): - dirpath = os.path.join(self.config_volume, path) - if not os.path.exists(dirpath): - return None + def list_volume_directory(self, path): + dirpath = os.path.join(self.config_volume, path) + if not os.path.exists(dirpath): + return None - if not os.path.isdir(dirpath): - return None + if not os.path.isdir(dirpath): + return None - return os.listdir(dirpath) + return os.listdir(dirpath) - def requires_restart(self, app_config): - file_config = self.get_config() - if not file_config: - return False + def requires_restart(self, app_config): + file_config = self.get_config() + if not file_config: + return False - for key in file_config: - if app_config.get(key) != file_config[key]: - return True + for key in file_config: + if app_config.get(key) != file_config[key]: + return True - return False + return False diff --git a/config_app/config_util/config/baseprovider.py b/config_app/config_util/config/baseprovider.py index 17ae7e86b..e6705809d 100644 --- a/config_app/config_util/config/baseprovider.py +++ b/config_app/config_util/config/baseprovider.py @@ -12,117 +12,119 @@ logger = logging.getLogger(__name__) class CannotWriteConfigException(Exception): - """ Exception raised when the config cannot be written. """ - pass + """ Exception raised when the config cannot be written. """ + + pass class SetupIncompleteException(Exception): - """ Exception raised when attempting to verify config that has not yet been setup. """ - pass + """ Exception raised when attempting to verify config that has not yet been setup. """ + + pass def import_yaml(config_obj, config_file): - with open(config_file) as f: - c = yaml.safe_load(f) - if not c: - logger.debug('Empty YAML config file') - return + with open(config_file) as f: + c = yaml.safe_load(f) + if not c: + logger.debug("Empty YAML config file") + return - if isinstance(c, str): - raise Exception('Invalid YAML config file: ' + str(c)) + if isinstance(c, str): + raise Exception("Invalid YAML config file: " + str(c)) - for key in c.iterkeys(): - if key.isupper(): - config_obj[key] = c[key] + for key in c.iterkeys(): + if key.isupper(): + config_obj[key] = c[key] - if config_obj.get('SETUP_COMPLETE', False): - try: - validate(config_obj, CONFIG_SCHEMA) - except ValidationError: - # TODO: Change this into a real error - logger.exception('Could not validate config schema') - else: - logger.debug('Skipping config schema validation because setup is not complete') + if config_obj.get("SETUP_COMPLETE", False): + try: + validate(config_obj, CONFIG_SCHEMA) + except ValidationError: + # TODO: Change this into a real error + logger.exception("Could not validate config schema") + else: + logger.debug("Skipping config schema validation because setup is not complete") - return config_obj + return config_obj def get_yaml(config_obj): - return yaml.safe_dump(config_obj, encoding='utf-8', allow_unicode=True) + return yaml.safe_dump(config_obj, encoding="utf-8", allow_unicode=True) def export_yaml(config_obj, config_file): - try: - with open(config_file, 'w') as f: - f.write(get_yaml(config_obj)) - except IOError as ioe: - raise CannotWriteConfigException(str(ioe)) + try: + with open(config_file, "w") as f: + f.write(get_yaml(config_obj)) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) @add_metaclass(ABCMeta) class BaseProvider(object): - """ A configuration provider helps to load, save, and handle config override in the application. + """ A configuration provider helps to load, save, and handle config override in the application. """ - @property - def provider_id(self): - raise NotImplementedError + @property + def provider_id(self): + raise NotImplementedError - @abstractmethod - def update_app_config(self, app_config): - """ Updates the given application config object with the loaded override config. """ + @abstractmethod + def update_app_config(self, app_config): + """ Updates the given application config object with the loaded override config. """ - @abstractmethod - def get_config(self): - """ Returns the contents of the config override file, or None if none. """ + @abstractmethod + def get_config(self): + """ Returns the contents of the config override file, or None if none. """ - @abstractmethod - def save_config(self, config_object): - """ Updates the contents of the config override file to those given. """ + @abstractmethod + def save_config(self, config_object): + """ Updates the contents of the config override file to those given. """ - @abstractmethod - def config_exists(self): - """ Returns true if a config override file exists in the config volume. """ + @abstractmethod + def config_exists(self): + """ Returns true if a config override file exists in the config volume. """ - @abstractmethod - def volume_exists(self): - """ Returns whether the config override volume exists. """ + @abstractmethod + def volume_exists(self): + """ Returns whether the config override volume exists. """ - @abstractmethod - def volume_file_exists(self, filename): - """ Returns whether the file with the given name exists under the config override volume. """ + @abstractmethod + def volume_file_exists(self, filename): + """ Returns whether the file with the given name exists under the config override volume. """ - @abstractmethod - def get_volume_file(self, filename, mode='r'): - """ Returns a Python file referring to the given name under the config override volume. """ + @abstractmethod + def get_volume_file(self, filename, mode="r"): + """ Returns a Python file referring to the given name under the config override volume. """ - @abstractmethod - def write_volume_file(self, filename, contents): - """ Writes the given contents to the config override volumne, with the given filename. """ + @abstractmethod + def write_volume_file(self, filename, contents): + """ Writes the given contents to the config override volumne, with the given filename. """ - @abstractmethod - def remove_volume_file(self, filename): - """ Removes the config override volume file with the given filename. """ + @abstractmethod + def remove_volume_file(self, filename): + """ Removes the config override volume file with the given filename. """ - @abstractmethod - def list_volume_directory(self, path): - """ Returns a list of strings representing the names of the files found in the config override + @abstractmethod + def list_volume_directory(self, path): + """ Returns a list of strings representing the names of the files found in the config override directory under the given path. If the path doesn't exist, returns None. """ - @abstractmethod - def save_volume_file(self, filename, flask_file): - """ Saves the given flask file to the config override volume, with the given + @abstractmethod + def save_volume_file(self, filename, flask_file): + """ Saves the given flask file to the config override volume, with the given filename. """ - @abstractmethod - def requires_restart(self, app_config): - """ If true, the configuration loaded into memory for the app does not match that on disk, + @abstractmethod + def requires_restart(self, app_config): + """ If true, the configuration loaded into memory for the app does not match that on disk, indicating that this container requires a restart. """ - @abstractmethod - def get_volume_path(self, directory, filename): - """ Helper for constructing file paths, which may differ between providers. For example, + @abstractmethod + def get_volume_path(self, directory, filename): + """ Helper for constructing file paths, which may differ between providers. For example, kubernetes can't have subfolders in configmaps """ diff --git a/config_app/config_util/config/fileprovider.py b/config_app/config_util/config/fileprovider.py index 74531e581..4f9d94ad0 100644 --- a/config_app/config_util/config/fileprovider.py +++ b/config_app/config_util/config/fileprovider.py @@ -1,60 +1,65 @@ import os import logging -from config_app.config_util.config.baseprovider import export_yaml, CannotWriteConfigException +from config_app.config_util.config.baseprovider import ( + export_yaml, + CannotWriteConfigException, +) from config_app.config_util.config.basefileprovider import BaseFileProvider logger = logging.getLogger(__name__) def _ensure_parent_dir(filepath): - """ Ensures that the parent directory of the given file path exists. """ - try: - parentpath = os.path.abspath(os.path.join(filepath, os.pardir)) - if not os.path.isdir(parentpath): - os.makedirs(parentpath) - except IOError as ioe: - raise CannotWriteConfigException(str(ioe)) + """ Ensures that the parent directory of the given file path exists. """ + try: + parentpath = os.path.abspath(os.path.join(filepath, os.pardir)) + if not os.path.isdir(parentpath): + os.makedirs(parentpath) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) class FileConfigProvider(BaseFileProvider): - """ Implementation of the config provider that reads and writes the data + """ Implementation of the config provider that reads and writes the data from/to the file system. """ - def __init__(self, config_volume, yaml_filename, py_filename): - super(FileConfigProvider, self).__init__(config_volume, yaml_filename, py_filename) + def __init__(self, config_volume, yaml_filename, py_filename): + super(FileConfigProvider, self).__init__( + config_volume, yaml_filename, py_filename + ) - @property - def provider_id(self): - return 'file' + @property + def provider_id(self): + return "file" - def save_config(self, config_obj): - export_yaml(config_obj, self.yaml_path) + def save_config(self, config_obj): + export_yaml(config_obj, self.yaml_path) - def write_volume_file(self, filename, contents): - filepath = os.path.join(self.config_volume, filename) - _ensure_parent_dir(filepath) + def write_volume_file(self, filename, contents): + filepath = os.path.join(self.config_volume, filename) + _ensure_parent_dir(filepath) - try: - with open(filepath, mode='w') as f: - f.write(contents) - except IOError as ioe: - raise CannotWriteConfigException(str(ioe)) + try: + with open(filepath, mode="w") as f: + f.write(contents) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) - return filepath + return filepath - def remove_volume_file(self, filename): - filepath = os.path.join(self.config_volume, filename) - os.remove(filepath) + def remove_volume_file(self, filename): + filepath = os.path.join(self.config_volume, filename) + os.remove(filepath) - def save_volume_file(self, filename, flask_file): - filepath = os.path.join(self.config_volume, filename) - _ensure_parent_dir(filepath) + def save_volume_file(self, filename, flask_file): + filepath = os.path.join(self.config_volume, filename) + _ensure_parent_dir(filepath) - # Write the file. - try: - flask_file.save(filepath) - except IOError as ioe: - raise CannotWriteConfigException(str(ioe)) + # Write the file. + try: + flask_file.save(filepath) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) - return filepath + return filepath diff --git a/config_app/config_util/config/test/test_helpers.py b/config_app/config_util/config/test/test_helpers.py index ceeae51ff..f266bb65c 100644 --- a/config_app/config_util/config/test/test_helpers.py +++ b/config_app/config_util/config/test/test_helpers.py @@ -9,67 +9,73 @@ from util.config.validator import EXTRA_CA_DIRECTORY def _create_temp_file_structure(file_structure): - temp_dir = TemporaryDirectory() + temp_dir = TemporaryDirectory() - for filename, data in file_structure.iteritems(): - if filename == EXTRA_CA_DIRECTORY: - extra_ca_dir_path = os.path.join(temp_dir.name, EXTRA_CA_DIRECTORY) - os.mkdir(extra_ca_dir_path) + for filename, data in file_structure.iteritems(): + if filename == EXTRA_CA_DIRECTORY: + extra_ca_dir_path = os.path.join(temp_dir.name, EXTRA_CA_DIRECTORY) + os.mkdir(extra_ca_dir_path) - for name, cert_value in data: - with open(os.path.join(extra_ca_dir_path, name), 'w') as f: - f.write(cert_value) - else: - with open(os.path.join(temp_dir.name, filename), 'w') as f: - f.write(data) + for name, cert_value in data: + with open(os.path.join(extra_ca_dir_path, name), "w") as f: + f.write(cert_value) + else: + with open(os.path.join(temp_dir.name, filename), "w") as f: + f.write(data) - return temp_dir + return temp_dir -@pytest.mark.parametrize('file_structure, expected_secret', [ - pytest.param({ - 'config.yaml': 'test:true', - }, - { - 'config.yaml': 'dGVzdDp0cnVl', - }, id='just a config value'), - pytest.param({ - 'config.yaml': 'test:true', - 'otherfile.ext': 'im a file' - }, - { - 'config.yaml': 'dGVzdDp0cnVl', - 'otherfile.ext': base64.b64encode('im a file') - }, id='config and another file'), - pytest.param({ - 'config.yaml': 'test:true', - 'extra_ca_certs': [ - ('cert.crt', 'im a cert!'), - ] - }, - { - 'config.yaml': 'dGVzdDp0cnVl', - 'extra_ca_certs_cert.crt': base64.b64encode('im a cert!'), - }, id='config and an extra cert'), - pytest.param({ - 'config.yaml': 'test:true', - 'otherfile.ext': 'im a file', - 'extra_ca_certs': [ - ('cert.crt', 'im a cert!'), - ('another.crt', 'im a different cert!'), - ] - }, - { - 'config.yaml': 'dGVzdDp0cnVl', - 'otherfile.ext': base64.b64encode('im a file'), - 'extra_ca_certs_cert.crt': base64.b64encode('im a cert!'), - 'extra_ca_certs_another.crt': base64.b64encode('im a different cert!'), - }, id='config, files, and extra certs!'), -]) +@pytest.mark.parametrize( + "file_structure, expected_secret", + [ + pytest.param( + {"config.yaml": "test:true"}, + {"config.yaml": "dGVzdDp0cnVl"}, + id="just a config value", + ), + pytest.param( + {"config.yaml": "test:true", "otherfile.ext": "im a file"}, + { + "config.yaml": "dGVzdDp0cnVl", + "otherfile.ext": base64.b64encode("im a file"), + }, + id="config and another file", + ), + pytest.param( + { + "config.yaml": "test:true", + "extra_ca_certs": [("cert.crt", "im a cert!")], + }, + { + "config.yaml": "dGVzdDp0cnVl", + "extra_ca_certs_cert.crt": base64.b64encode("im a cert!"), + }, + id="config and an extra cert", + ), + pytest.param( + { + "config.yaml": "test:true", + "otherfile.ext": "im a file", + "extra_ca_certs": [ + ("cert.crt", "im a cert!"), + ("another.crt", "im a different cert!"), + ], + }, + { + "config.yaml": "dGVzdDp0cnVl", + "otherfile.ext": base64.b64encode("im a file"), + "extra_ca_certs_cert.crt": base64.b64encode("im a cert!"), + "extra_ca_certs_another.crt": base64.b64encode("im a different cert!"), + }, + id="config, files, and extra certs!", + ), + ], +) def test_get_config_as_kube_secret(file_structure, expected_secret): - temp_dir = _create_temp_file_structure(file_structure) + temp_dir = _create_temp_file_structure(file_structure) - secret = get_config_as_kube_secret(temp_dir.name) - assert secret == expected_secret + secret = get_config_as_kube_secret(temp_dir.name) + assert secret == expected_secret - temp_dir.cleanup() + temp_dir.cleanup() diff --git a/config_app/config_util/config/test/test_transient_dir_provider.py b/config_app/config_util/config/test/test_transient_dir_provider.py index 2d1f3f96c..2d53b5153 100644 --- a/config_app/config_util/config/test/test_transient_dir_provider.py +++ b/config_app/config_util/config/test/test_transient_dir_provider.py @@ -1,68 +1,71 @@ import pytest import os -from config_app.config_util.config.TransientDirectoryProvider import TransientDirectoryProvider +from config_app.config_util.config.TransientDirectoryProvider import ( + TransientDirectoryProvider, +) -@pytest.mark.parametrize('files_to_write, operations, expected_new_dir', [ - pytest.param({ - 'config.yaml': 'a config', - }, ([], [], []), { - 'config.yaml': 'a config', - }, id='just a config'), - pytest.param({ - 'config.yaml': 'a config', - 'oldfile': 'hmmm' - }, ([], [], ['oldfile']), { - 'config.yaml': 'a config', - }, id='delete a file'), - pytest.param({ - 'config.yaml': 'a config', - 'oldfile': 'hmmm' - }, ([('newfile', 'asdf')], [], ['oldfile']), { - 'config.yaml': 'a config', - 'newfile': 'asdf' - }, id='delete and add a file'), - pytest.param({ - 'config.yaml': 'a config', - 'somefile': 'before' - }, ([('newfile', 'asdf')], [('somefile', 'after')], []), { - 'config.yaml': 'a config', - 'newfile': 'asdf', - 'somefile': 'after', - }, id='add new files and change files'), -]) +@pytest.mark.parametrize( + "files_to_write, operations, expected_new_dir", + [ + pytest.param( + {"config.yaml": "a config"}, + ([], [], []), + {"config.yaml": "a config"}, + id="just a config", + ), + pytest.param( + {"config.yaml": "a config", "oldfile": "hmmm"}, + ([], [], ["oldfile"]), + {"config.yaml": "a config"}, + id="delete a file", + ), + pytest.param( + {"config.yaml": "a config", "oldfile": "hmmm"}, + ([("newfile", "asdf")], [], ["oldfile"]), + {"config.yaml": "a config", "newfile": "asdf"}, + id="delete and add a file", + ), + pytest.param( + {"config.yaml": "a config", "somefile": "before"}, + ([("newfile", "asdf")], [("somefile", "after")], []), + {"config.yaml": "a config", "newfile": "asdf", "somefile": "after"}, + id="add new files and change files", + ), + ], +) def test_transient_dir_copy_config_dir(files_to_write, operations, expected_new_dir): - config_provider = TransientDirectoryProvider('', '', '') + config_provider = TransientDirectoryProvider("", "", "") - for name, data in files_to_write.iteritems(): - config_provider.write_volume_file(name, data) + for name, data in files_to_write.iteritems(): + config_provider.write_volume_file(name, data) - config_provider.create_copy_of_config_dir() + config_provider.create_copy_of_config_dir() - for create in operations[0]: - (name, data) = create - config_provider.write_volume_file(name, data) + for create in operations[0]: + (name, data) = create + config_provider.write_volume_file(name, data) - for update in operations[1]: - (name, data) = update - config_provider.write_volume_file(name, data) + for update in operations[1]: + (name, data) = update + config_provider.write_volume_file(name, data) - for delete in operations[2]: - config_provider.remove_volume_file(delete) + for delete in operations[2]: + config_provider.remove_volume_file(delete) - # check that the new directory matches expected state - for filename, data in expected_new_dir.iteritems(): - with open(os.path.join(config_provider.get_config_dir_path(), filename)) as f: - new_data = f.read() - assert new_data == data + # check that the new directory matches expected state + for filename, data in expected_new_dir.iteritems(): + with open(os.path.join(config_provider.get_config_dir_path(), filename)) as f: + new_data = f.read() + assert new_data == data - # Now check that the old dir matches the original state - saved = config_provider.get_old_config_dir() + # Now check that the old dir matches the original state + saved = config_provider.get_old_config_dir() - for filename, data in files_to_write.iteritems(): - with open(os.path.join(saved, filename)) as f: - new_data = f.read() - assert new_data == data + for filename, data in files_to_write.iteritems(): + with open(os.path.join(saved, filename)) as f: + new_data = f.read() + assert new_data == data - config_provider.temp_dir.cleanup() + config_provider.temp_dir.cleanup() diff --git a/config_app/config_util/config/testprovider.py b/config_app/config_util/config/testprovider.py index 63e563056..39070687e 100644 --- a/config_app/config_util/config/testprovider.py +++ b/config_app/config_util/config/testprovider.py @@ -4,80 +4,84 @@ import os from config_app.config_util.config.baseprovider import BaseProvider -REAL_FILES = ['test/data/signing-private.gpg', 'test/data/signing-public.gpg', 'test/data/test.pem'] +REAL_FILES = [ + "test/data/signing-private.gpg", + "test/data/signing-public.gpg", + "test/data/test.pem", +] class TestConfigProvider(BaseProvider): - """ Implementation of the config provider for testing. Everything is kept in-memory instead on + """ Implementation of the config provider for testing. Everything is kept in-memory instead on the real file system. """ - def __init__(self): - self.clear() + def __init__(self): + self.clear() - def clear(self): - self.files = {} - self._config = {} + def clear(self): + self.files = {} + self._config = {} - @property - def provider_id(self): - return 'test' + @property + def provider_id(self): + return "test" - def update_app_config(self, app_config): - self._config = app_config + def update_app_config(self, app_config): + self._config = app_config - def get_config(self): - if not 'config.yaml' in self.files: - return None + def get_config(self): + if not "config.yaml" in self.files: + return None - return json.loads(self.files.get('config.yaml', '{}')) + return json.loads(self.files.get("config.yaml", "{}")) - def save_config(self, config_obj): - self.files['config.yaml'] = json.dumps(config_obj) + def save_config(self, config_obj): + self.files["config.yaml"] = json.dumps(config_obj) - def config_exists(self): - return 'config.yaml' in self.files + def config_exists(self): + return "config.yaml" in self.files - def volume_exists(self): - return True + def volume_exists(self): + return True - def volume_file_exists(self, filename): - if filename in REAL_FILES: - return True + def volume_file_exists(self, filename): + if filename in REAL_FILES: + return True - return filename in self.files + return filename in self.files - def save_volume_file(self, filename, flask_file): - self.files[filename] = flask_file.read() + def save_volume_file(self, filename, flask_file): + self.files[filename] = flask_file.read() - def write_volume_file(self, filename, contents): - self.files[filename] = contents + def write_volume_file(self, filename, contents): + self.files[filename] = contents - def get_volume_file(self, filename, mode='r'): - if filename in REAL_FILES: - return open(filename, mode=mode) + def get_volume_file(self, filename, mode="r"): + if filename in REAL_FILES: + return open(filename, mode=mode) - return io.BytesIO(self.files[filename]) + return io.BytesIO(self.files[filename]) - def remove_volume_file(self, filename): - self.files.pop(filename, None) + def remove_volume_file(self, filename): + self.files.pop(filename, None) - def list_volume_directory(self, path): - paths = [] - for filename in self.files: - if filename.startswith(path): - paths.append(filename[len(path) + 1:]) + def list_volume_directory(self, path): + paths = [] + for filename in self.files: + if filename.startswith(path): + paths.append(filename[len(path) + 1 :]) - return paths + return paths - def requires_restart(self, app_config): - return False + def requires_restart(self, app_config): + return False - def reset_for_test(self): - self._config['SUPER_USERS'] = ['devtable'] - self.files = {} + def reset_for_test(self): + self._config["SUPER_USERS"] = ["devtable"] + self.files = {} - def get_volume_path(self, directory, filename): - return os.path.join(directory, filename) + def get_volume_path(self, directory, filename): + return os.path.join(directory, filename) - def get_config_dir_path(self): - return '' + def get_config_dir_path(self): + return "" diff --git a/config_app/config_util/k8saccessor.py b/config_app/config_util/k8saccessor.py index dd115681b..e59f127f2 100644 --- a/config_app/config_util/k8saccessor.py +++ b/config_app/config_util/k8saccessor.py @@ -12,295 +12,374 @@ from config_app.config_util.k8sconfig import KubernetesConfig logger = logging.getLogger(__name__) -QE_DEPLOYMENT_LABEL = 'quay-enterprise-component' -QE_CONTAINER_NAME = 'quay-enterprise-app' +QE_DEPLOYMENT_LABEL = "quay-enterprise-component" +QE_CONTAINER_NAME = "quay-enterprise-app" # Tuple containing response of the deployment rollout status method. # status is one of: 'failed' | 'progressing' | 'available' # message is any string describing the state. -DeploymentRolloutStatus = namedtuple('DeploymentRolloutStatus', ['status', 'message']) +DeploymentRolloutStatus = namedtuple("DeploymentRolloutStatus", ["status", "message"]) + class K8sApiException(Exception): - pass + pass def _deployment_rollout_status_message(deployment, deployment_name): - """ + """ Gets the friendly human readable message of the current state of the deployment rollout :param deployment: python dict matching: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#deployment-v1-apps :param deployment_name: string :return: DeploymentRolloutStatus """ - # Logic for rollout status pulled from the `kubectl rollout status` command: - # https://github.com/kubernetes/kubernetes/blob/d9ba19c751709c8608e09a0537eea98973f3a796/pkg/kubectl/rollout_status.go#L62 - if deployment['metadata']['generation'] <= deployment['status']['observedGeneration']: - for cond in deployment['status']['conditions']: - if cond['type'] == 'Progressing' and cond['reason'] == 'ProgressDeadlineExceeded': + # Logic for rollout status pulled from the `kubectl rollout status` command: + # https://github.com/kubernetes/kubernetes/blob/d9ba19c751709c8608e09a0537eea98973f3a796/pkg/kubectl/rollout_status.go#L62 + if ( + deployment["metadata"]["generation"] + <= deployment["status"]["observedGeneration"] + ): + for cond in deployment["status"]["conditions"]: + if ( + cond["type"] == "Progressing" + and cond["reason"] == "ProgressDeadlineExceeded" + ): + return DeploymentRolloutStatus( + status="failed", + message="Deployment %s's rollout failed. Please try again later." + % deployment_name, + ) + + desired_replicas = deployment["spec"]["replicas"] + current_replicas = deployment["status"].get("replicas", 0) + if current_replicas == 0: + return DeploymentRolloutStatus( + status="available", + message="Deployment %s updated (no replicas, so nothing to roll out)" + % deployment_name, + ) + + # Some fields are optional in the spec, so if they're omitted, replace with defaults that won't indicate a wrong status + available_replicas = deployment["status"].get("availableReplicas", 0) + updated_replicas = deployment["status"].get("updatedReplicas", 0) + + if updated_replicas < desired_replicas: + return DeploymentRolloutStatus( + status="progressing", + message="Waiting for rollout to finish: %d out of %d new replicas have been updated..." + % (updated_replicas, desired_replicas), + ) + + if current_replicas > updated_replicas: + return DeploymentRolloutStatus( + status="progressing", + message="Waiting for rollout to finish: %d old replicas are pending termination..." + % (current_replicas - updated_replicas), + ) + + if available_replicas < updated_replicas: + return DeploymentRolloutStatus( + status="progressing", + message="Waiting for rollout to finish: %d of %d updated replicas are available..." + % (available_replicas, updated_replicas), + ) + return DeploymentRolloutStatus( - status='failed', - message="Deployment %s's rollout failed. Please try again later." % deployment_name + status="available", + message="Deployment %s successfully rolled out." % deployment_name, ) - desired_replicas = deployment['spec']['replicas'] - current_replicas = deployment['status'].get('replicas', 0) - if current_replicas == 0: - return DeploymentRolloutStatus( - status='available', - message='Deployment %s updated (no replicas, so nothing to roll out)' % deployment_name - ) - - # Some fields are optional in the spec, so if they're omitted, replace with defaults that won't indicate a wrong status - available_replicas = deployment['status'].get('availableReplicas', 0) - updated_replicas = deployment['status'].get('updatedReplicas', 0) - - if updated_replicas < desired_replicas: - return DeploymentRolloutStatus( - status='progressing', - message='Waiting for rollout to finish: %d out of %d new replicas have been updated...' % ( - updated_replicas, desired_replicas) - ) - - if current_replicas > updated_replicas: - return DeploymentRolloutStatus( - status='progressing', - message='Waiting for rollout to finish: %d old replicas are pending termination...' % ( - current_replicas - updated_replicas) - ) - - if available_replicas < updated_replicas: - return DeploymentRolloutStatus( - status='progressing', - message='Waiting for rollout to finish: %d of %d updated replicas are available...' % ( - available_replicas, updated_replicas) - ) - return DeploymentRolloutStatus( - status='available', - message='Deployment %s successfully rolled out.' % deployment_name + status="progressing", message="Waiting for deployment spec to be updated..." ) - return DeploymentRolloutStatus( - status='progressing', - message='Waiting for deployment spec to be updated...' - ) - class KubernetesAccessorSingleton(object): - """ Singleton allowing access to kubernetes operations """ - _instance = None + """ Singleton allowing access to kubernetes operations """ - def __init__(self, kube_config=None): - self.kube_config = kube_config - if kube_config is None: - self.kube_config = KubernetesConfig.from_env() + _instance = None - KubernetesAccessorSingleton._instance = self + def __init__(self, kube_config=None): + self.kube_config = kube_config + if kube_config is None: + self.kube_config = KubernetesConfig.from_env() - @classmethod - def get_instance(cls, kube_config=None): - """ + KubernetesAccessorSingleton._instance = self + + @classmethod + def get_instance(cls, kube_config=None): + """ Singleton getter implementation, returns the instance if one exists, otherwise creates the instance and ties it to the class. :return: KubernetesAccessorSingleton """ - if cls._instance is None: - return cls(kube_config) + if cls._instance is None: + return cls(kube_config) - return cls._instance + return cls._instance - def save_secret_to_directory(self, dir_path): - """ + def save_secret_to_directory(self, dir_path): + """ Saves all files in the kubernetes secret to a local directory. Assumes the directory is empty. """ - secret = self._lookup_secret() + secret = self._lookup_secret() - secret_data = secret.get('data', {}) + secret_data = secret.get("data", {}) - # Make the `extra_ca_certs` dir to ensure we can populate extra certs - extra_ca_dir_path = os.path.join(dir_path, EXTRA_CA_DIRECTORY) - os.mkdir(extra_ca_dir_path) + # Make the `extra_ca_certs` dir to ensure we can populate extra certs + extra_ca_dir_path = os.path.join(dir_path, EXTRA_CA_DIRECTORY) + os.mkdir(extra_ca_dir_path) - for secret_filename, data in secret_data.iteritems(): - write_path = os.path.join(dir_path, secret_filename) + for secret_filename, data in secret_data.iteritems(): + write_path = os.path.join(dir_path, secret_filename) - if EXTRA_CA_DIRECTORY_PREFIX in secret_filename: - write_path = os.path.join(extra_ca_dir_path, secret_filename.replace(EXTRA_CA_DIRECTORY_PREFIX, '')) + if EXTRA_CA_DIRECTORY_PREFIX in secret_filename: + write_path = os.path.join( + extra_ca_dir_path, + secret_filename.replace(EXTRA_CA_DIRECTORY_PREFIX, ""), + ) - with open(write_path, 'w') as f: - f.write(base64.b64decode(data)) + with open(write_path, "w") as f: + f.write(base64.b64decode(data)) - return 200 + return 200 - def save_file_as_secret(self, name, file_pointer): - value = file_pointer.read() - self._update_secret_file(name, value) + def save_file_as_secret(self, name, file_pointer): + value = file_pointer.read() + self._update_secret_file(name, value) - def replace_qe_secret(self, new_secret_data): - """ + def replace_qe_secret(self, new_secret_data): + """ Removes the old config and replaces it with the new_secret_data as one action """ - # Check first that the namespace for Red Hat Quay exists. If it does not, report that - # as an error, as it seems to be a common issue. - namespace_url = 'namespaces/%s' % (self.kube_config.qe_namespace) - response = self._execute_k8s_api('GET', namespace_url) - if response.status_code // 100 != 2: - msg = 'A Kubernetes namespace with name `%s` must be created to save config' % self.kube_config.qe_namespace - raise Exception(msg) + # Check first that the namespace for Red Hat Quay exists. If it does not, report that + # as an error, as it seems to be a common issue. + namespace_url = "namespaces/%s" % (self.kube_config.qe_namespace) + response = self._execute_k8s_api("GET", namespace_url) + if response.status_code // 100 != 2: + msg = ( + "A Kubernetes namespace with name `%s` must be created to save config" + % self.kube_config.qe_namespace + ) + raise Exception(msg) - # Check if the secret exists. If not, then we create an empty secret and then update the file - # inside. - secret_url = 'namespaces/%s/secrets/%s' % (self.kube_config.qe_namespace, self.kube_config.qe_config_secret) - secret = self._lookup_secret() - if secret is None: - self._assert_success(self._execute_k8s_api('POST', secret_url, { - "kind": "Secret", - "apiVersion": "v1", - "metadata": { - "name": self.kube_config.qe_config_secret - }, - "data": {} - })) + # Check if the secret exists. If not, then we create an empty secret and then update the file + # inside. + secret_url = "namespaces/%s/secrets/%s" % ( + self.kube_config.qe_namespace, + self.kube_config.qe_config_secret, + ) + secret = self._lookup_secret() + if secret is None: + self._assert_success( + self._execute_k8s_api( + "POST", + secret_url, + { + "kind": "Secret", + "apiVersion": "v1", + "metadata": {"name": self.kube_config.qe_config_secret}, + "data": {}, + }, + ) + ) - # Update the secret to reflect the file change. - secret['data'] = new_secret_data + # Update the secret to reflect the file change. + secret["data"] = new_secret_data - self._assert_success(self._execute_k8s_api('PUT', secret_url, secret)) + self._assert_success(self._execute_k8s_api("PUT", secret_url, secret)) - def get_deployment_rollout_status(self, deployment_name): - """" + def get_deployment_rollout_status(self, deployment_name): + """" Returns the status of a rollout of a given deployment :return _DeploymentRolloutStatus """ - deployment_selector_url = 'namespaces/%s/deployments/%s' % ( - self.kube_config.qe_namespace, deployment_name - ) + deployment_selector_url = "namespaces/%s/deployments/%s" % ( + self.kube_config.qe_namespace, + deployment_name, + ) - response = self._execute_k8s_api('GET', deployment_selector_url, api_prefix='apis/apps/v1') - if response.status_code != 200: - return DeploymentRolloutStatus('failed', 'Could not get deployment. Please check that the deployment exists') + response = self._execute_k8s_api( + "GET", deployment_selector_url, api_prefix="apis/apps/v1" + ) + if response.status_code != 200: + return DeploymentRolloutStatus( + "failed", + "Could not get deployment. Please check that the deployment exists", + ) - deployment = json.loads(response.text) + deployment = json.loads(response.text) - return _deployment_rollout_status_message(deployment, deployment_name) + return _deployment_rollout_status_message(deployment, deployment_name) - def get_qe_deployments(self): - """" + def get_qe_deployments(self): + """" Returns all deployments matching the label selector provided in the KubeConfig """ - deployment_selector_url = 'namespaces/%s/deployments?labelSelector=%s%%3D%s' % ( - self.kube_config.qe_namespace, QE_DEPLOYMENT_LABEL, self.kube_config.qe_deployment_selector - ) + deployment_selector_url = "namespaces/%s/deployments?labelSelector=%s%%3D%s" % ( + self.kube_config.qe_namespace, + QE_DEPLOYMENT_LABEL, + self.kube_config.qe_deployment_selector, + ) - response = self._execute_k8s_api('GET', deployment_selector_url, api_prefix='apis/extensions/v1beta1') - if response.status_code != 200: - return None - return json.loads(response.text) + response = self._execute_k8s_api( + "GET", deployment_selector_url, api_prefix="apis/extensions/v1beta1" + ) + if response.status_code != 200: + return None + return json.loads(response.text) - def cycle_qe_deployments(self, deployment_names): - """" + def cycle_qe_deployments(self, deployment_names): + """" Triggers a rollout of all desired deployments in the qe namespace """ - for name in deployment_names: - logger.debug('Cycling deployment %s', name) - deployment_url = 'namespaces/%s/deployments/%s' % (self.kube_config.qe_namespace, name) + for name in deployment_names: + logger.debug("Cycling deployment %s", name) + deployment_url = "namespaces/%s/deployments/%s" % ( + self.kube_config.qe_namespace, + name, + ) - # There is currently no command to simply rolling restart all the pods: https://github.com/kubernetes/kubernetes/issues/13488 - # Instead, we modify the template of the deployment with a dummy env variable to trigger a cycle of the pods - # (based off this comment: https://github.com/kubernetes/kubernetes/issues/13488#issuecomment-240393845) - self._assert_success(self._execute_k8s_api('PATCH', deployment_url, { - 'spec': { - 'template': { - 'spec': { - 'containers': [{ - # Note: this name MUST match the deployment template's pod template - # (e.g.