From 708b7ee669feaeebb70cc3f2cb4dbff4cf91c6ff Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Wed, 9 May 2018 16:11:21 -0400 Subject: [PATCH 01/14] Add config flag that only renders new simple page --- endpoints/setup_web.py | 46 ++++++++++++++++++++++++++++++++++++ templates/config_index.html | 13 ++++++++++ test/data/test.db | Bin 1753088 -> 1753088 bytes web.py | 14 ++++++++++- 4 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 endpoints/setup_web.py create mode 100644 templates/config_index.html diff --git a/endpoints/setup_web.py b/endpoints/setup_web.py new file mode 100644 index 000000000..b99c3e2f8 --- /dev/null +++ b/endpoints/setup_web.py @@ -0,0 +1,46 @@ +import os +import json +import logging + +from datetime import timedelta, datetime + +from cachetools import lru_cache +from flask import (abort, redirect, request, url_for, make_response, Response, render_template, + Blueprint, jsonify, send_file, session) +from flask_login import current_user + + +from app import (app) +from endpoints.api.discovery import swagger_route_data +from endpoints.common import common_login, render_page_template +from util.cache import no_cache + + + +@lru_cache(maxsize=1) +def _get_route_data(): + return swagger_route_data(include_internal=True, compact=True) + + +def render_page_template_with_routedata(name, *args, **kwargs): + return render_page_template(name, _get_route_data(), *args, **kwargs) + +# Capture the unverified SSL errors. +logger = logging.getLogger(__name__) +logging.captureWarnings(True) + +setup_web = Blueprint('setup_web', __name__) + +STATUS_TAGS = app.config['STATUS_TAGS'] + +@setup_web.route('/', methods=['GET'], defaults={'path': ''}) +@no_cache +def index(path, **kwargs): + return render_page_template_with_routedata('config_index.html', **kwargs) + +@setup_web.errorhandler(404) +@setup_web.route('/404', methods=['GET']) +def not_found_error_display(e = None): + resp = index('', error_code=404, error_info=dict(reason='notfound')) + resp.status_code = 404 + return resp diff --git a/templates/config_index.html b/templates/config_index.html new file mode 100644 index 000000000..3abcccfec --- /dev/null +++ b/templates/config_index.html @@ -0,0 +1,13 @@ + + + + + Config mode + + +

+

What is my purpose?
+
You create tarballs
+

+ + \ No newline at end of file diff --git a/test/data/test.db b/test/data/test.db index 4b5cbd65f77db551dad7e57c671859735757c398..34d3a84ba69063630149e8390d2a9a0cc20b0d9f 100644 GIT binary patch delta 182 zcmZo@$ZlxJo*>P*ZK8}b|x)&$1Z1g6#m=GFw3)&$np1h&=$_N@sVCJUuabqy>P z49%^K4Xg|e^~_8S3=J(B-GwC>LQSVTF6I#3eqP*b)t+jLQSVTF6I#3eqt?hA5qBPp3)_nt$!{ir@C y5OV=BHxTmxF)tAF0Wm)i3jnbo5DNjZFc6CXu_zFW0kJp`O8~Lt_M`ez))N4Z_C1RL diff --git a/web.py b/web.py index fd3b1768e..7a88646c9 100644 --- a/web.py +++ b/web.py @@ -9,9 +9,21 @@ from endpoints.realtime import realtime from endpoints.web import web from endpoints.webhooks import webhooks from endpoints.wellknown import wellknown +from endpoints.setup_web import setup_web + + +import os +is_config_mode = 'FLAGGED_CONFIG_MODE' in os.environ +print('\n\n\nAre we in config mode?') +print(is_config_mode) + + +if is_config_mode: + application.register_blueprint(setup_web) +else: + application.register_blueprint(web) -application.register_blueprint(web) application.register_blueprint(githubtrigger, url_prefix='/oauth2') application.register_blueprint(gitlabtrigger, url_prefix='/oauth2') application.register_blueprint(oauthlogin, url_prefix='/oauth2') From 81af2d9fcc11b32b6195c1606e05fd0595943725 Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Thu, 10 May 2018 13:22:53 -0400 Subject: [PATCH 02/14] Use different js bundle with base html --- endpoints/common.py | 6 +++--- endpoints/setup_web.py | 2 +- static/configappjs/index.js | 5 +++++ templates/config_index.html | 14 ++++++-------- webpack.config.js | 5 ++++- 5 files changed, 19 insertions(+), 13 deletions(-) create mode 100644 static/configappjs/index.js diff --git a/endpoints/common.py b/endpoints/common.py index 82040bb06..a5113ca79 100644 --- a/endpoints/common.py +++ b/endpoints/common.py @@ -24,7 +24,7 @@ from _init import __version__ logger = logging.getLogger(__name__) -JS_BUNDLE_NAME = 'bundle' +JS_BUNDLE_NAME = 'main' def common_login(user_uuid, permanent_session=True): @@ -73,9 +73,9 @@ def _list_files(path, extension, contains=""): return [join_path(dp, f) for dp, _, files in os.walk(filepath) for f in files if matches(f)] -def render_page_template(name, route_data=None, **kwargs): +def render_page_template(name, route_data=None, js_bundle_name=JS_BUNDLE_NAME, **kwargs): """ Renders the page template with the given name as the response and returns its contents. """ - main_scripts = _list_files('build', 'js', JS_BUNDLE_NAME) + main_scripts = _list_files('build', 'js', js_bundle_name) use_cdn = app.config.get('USE_CDN', True) if request.args.get('use_cdn') is not None: diff --git a/endpoints/setup_web.py b/endpoints/setup_web.py index b99c3e2f8..acd2640f3 100644 --- a/endpoints/setup_web.py +++ b/endpoints/setup_web.py @@ -36,7 +36,7 @@ STATUS_TAGS = app.config['STATUS_TAGS'] @setup_web.route('/', methods=['GET'], defaults={'path': ''}) @no_cache def index(path, **kwargs): - return render_page_template_with_routedata('config_index.html', **kwargs) + return render_page_template_with_routedata('config_index.html', js_bundle_name='configapp', **kwargs) @setup_web.errorhandler(404) @setup_web.route('/404', methods=['GET']) diff --git a/static/configappjs/index.js b/static/configappjs/index.js new file mode 100644 index 000000000..9bac6fe97 --- /dev/null +++ b/static/configappjs/index.js @@ -0,0 +1,5 @@ + + +const setupPage = ''; +angular.module('quay', [setupPage]); +console.log('Hello world! I\'m the config app'); \ No newline at end of file diff --git a/templates/config_index.html b/templates/config_index.html index 3abcccfec..f81ae7935 100644 --- a/templates/config_index.html +++ b/templates/config_index.html @@ -1,13 +1,11 @@ - - - - +{% extends "base.html" %} + +{% block title %} Config mode - - +{% endblock %} +{% block body_content %}

What is my purpose?
You create tarballs

- - \ No newline at end of file +{% endblock %} diff --git a/webpack.config.js b/webpack.config.js index 6944a3a64..51df57480 100644 --- a/webpack.config.js +++ b/webpack.config.js @@ -3,7 +3,10 @@ const path = require('path'); let config = { - entry: "./static/js/main.ts", + entry: { + main: "./static/js/main.ts", + configapp: "./static/configappjs/index.js" + }, output: { path: path.resolve(__dirname, "static/build"), publicPath: "/static/build/", From 92db413da69266d0177036db57f6c5f09c8c1076 Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Mon, 14 May 2018 11:27:56 -0400 Subject: [PATCH 03/14] Start moving configapp into separate dir --- config_app/Procfile | 3 + config_app/__init__.py | 0 config_app/app.py | 257 ++++++++++++++++++ config_app/application.py | 15 + config_app/conf/__init__.py | 0 config_app/conf/gunicorn_local.py | 27 ++ config_app/config_endpoints/__init__.py | 0 .../config_endpoints}/setup_web.py | 2 +- .../templates}/config_index.html | 0 config_app/web.py | 6 + local-config-app.sh | 17 ++ web.py | 6 +- 12 files changed, 327 insertions(+), 6 deletions(-) create mode 100644 config_app/Procfile create mode 100644 config_app/__init__.py create mode 100644 config_app/app.py create mode 100644 config_app/application.py create mode 100644 config_app/conf/__init__.py create mode 100644 config_app/conf/gunicorn_local.py create mode 100644 config_app/config_endpoints/__init__.py rename {endpoints => config_app/config_endpoints}/setup_web.py (94%) rename {templates => config_app/templates}/config_index.html (100%) create mode 100644 config_app/web.py create mode 100755 local-config-app.sh diff --git a/config_app/Procfile b/config_app/Procfile new file mode 100644 index 000000000..242c204d4 --- /dev/null +++ b/config_app/Procfile @@ -0,0 +1,3 @@ +app: PYTHONPATH="../" gunicorn -c conf/gunicorn_local.py application:application +# webpack: npm run watch + diff --git a/config_app/__init__.py b/config_app/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/config_app/app.py b/config_app/app.py new file mode 100644 index 000000000..90f5771b6 --- /dev/null +++ b/config_app/app.py @@ -0,0 +1,257 @@ +import hashlib +import json +import logging +import os + +from functools import partial + +from Crypto.PublicKey import RSA +from flask import Flask, request, Request +from flask_login import LoginManager +from flask_mail import Mail +from flask_principal import Principal +from jwkest.jwk import RSAKey + +import features +from _init import CONF_DIR +from auth.auth_context import get_authenticated_user +from avatars.avatars import Avatar +from buildman.manager.buildcanceller import BuildCanceller +from data import database +from data import model +from data.archivedlogs import LogArchive +from data.billing import Billing +from data.buildlogs import BuildLogs +from data.cache import get_model_cache +from data.model.user import LoginWrappedDBUser +from data.queue import WorkQueue, BuildMetricQueueReporter +from data.userevent import UserEventsBuilderModule +from data.userfiles import Userfiles +from data.users import UserAuthentication +from path_converters import RegexConverter, RepositoryPathConverter, APIRepositoryPathConverter +from oauth.services.github import GithubOAuthService +from oauth.services.gitlab import GitLabOAuthService +from oauth.loginmanager import OAuthLoginManager +from storage import Storage +from util.log import filter_logs +from util import get_app_url +from util.ipresolver import IPResolver +from util.saas.analytics import Analytics +from util.saas.useranalytics import UserAnalytics +from util.saas.exceptionlog import Sentry +from util.names import urn_generator +from util.config.configutil import generate_secret_key +from util.config.provider import get_config_provider +from util.config.superusermanager import SuperUserManager +from util.label_validator import LabelValidator +from util.metrics.metricqueue import MetricQueue +from util.metrics.prometheus import PrometheusPlugin +from util.saas.cloudwatch import start_cloudwatch_sender +from util.secscan.api import SecurityScannerAPI +from util.tufmetadata.api import TUFMetadataAPI +from util.security.instancekeys import InstanceKeys +from util.security.signing import Signer + + +OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/') +OVERRIDE_CONFIG_YAML_FILENAME = os.path.join(CONF_DIR, 'stack/config.yaml') +OVERRIDE_CONFIG_PY_FILENAME = os.path.join(CONF_DIR, 'stack/config.py') + +OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG' + +DOCKER_V2_SIGNINGKEY_FILENAME = 'docker_v2.pem' + +app = Flask(__name__) +logger = logging.getLogger(__name__) + +# Instantiate the configuration. +is_testing = 'TEST' in os.environ +is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ +config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', + testing=is_testing, kubernetes=is_kubernetes) + +if is_testing: + from test.testconfig import TestConfig + logger.debug('Loading test config.') + app.config.from_object(TestConfig()) +else: + from config import DefaultConfig + logger.debug('Loading default config.') + app.config.from_object(DefaultConfig()) + app.teardown_request(database.close_db_filter) + +# Load the override config via the provider. +config_provider.update_app_config(app.config) + +# Update any configuration found in the override environment variable. +environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}')) +app.config.update(environ_config) + +# Allow user to define a custom storage preference for the local instance. +_distributed_storage_preference = os.environ.get('QUAY_DISTRIBUTED_STORAGE_PREFERENCE', '').split() +if _distributed_storage_preference: + app.config['DISTRIBUTED_STORAGE_PREFERENCE'] = _distributed_storage_preference + +# Generate a secret key if none was specified. +if app.config['SECRET_KEY'] is None: + logger.debug('Generating in-memory secret key') + app.config['SECRET_KEY'] = generate_secret_key() + +# If the "preferred" scheme is https, then http is not allowed. Therefore, ensure we have a secure +# session cookie. +if (app.config['PREFERRED_URL_SCHEME'] == 'https' and + not app.config.get('FORCE_NONSECURE_SESSION_COOKIE', False)): + app.config['SESSION_COOKIE_SECURE'] = True + +# Load features from config. +features.import_features(app.config) + +CONFIG_DIGEST = hashlib.sha256(json.dumps(app.config, default=str)).hexdigest()[0:8] + +logger.debug("Loaded config", extra={"config": app.config}) + + +class RequestWithId(Request): + request_gen = staticmethod(urn_generator(['request'])) + + def __init__(self, *args, **kwargs): + super(RequestWithId, self).__init__(*args, **kwargs) + self.request_id = self.request_gen() + + +@app.before_request +def _request_start(): + logger.debug('Starting request: %s (%s)', request.request_id, request.path, + extra={"request_id": request.request_id}) + + +DEFAULT_FILTER = lambda x: '[FILTERED]' +FILTERED_VALUES = [ + {'key': ['password'], 'fn': DEFAULT_FILTER}, + {'key': ['user', 'password'], 'fn': DEFAULT_FILTER}, + {'key': ['blob'], 'fn': lambda x: x[0:8]} +] + + +@app.after_request +def _request_end(resp): + jsonbody = request.get_json(force=True, silent=True) + values = request.values.to_dict() + + if jsonbody and not isinstance(jsonbody, dict): + jsonbody = {'_parsererror': jsonbody} + + if isinstance(values, dict): + filter_logs(values, FILTERED_VALUES) + + extra = { + "endpoint": request.endpoint, + "request_id" : request.request_id, + "remote_addr": request.remote_addr, + "http_method": request.method, + "original_url": request.url, + "path": request.path, + "parameters": values, + "json_body": jsonbody, + "confsha": CONFIG_DIGEST, + } + + if request.user_agent is not None: + extra["user-agent"] = request.user_agent.string + + logger.debug('Ending request: %s (%s)', request.request_id, request.path, extra=extra) + return resp + + + +root_logger = logging.getLogger() + +app.request_class = RequestWithId + +# Register custom converters. +app.url_map.converters['regex'] = RegexConverter +app.url_map.converters['repopath'] = RepositoryPathConverter +app.url_map.converters['apirepopath'] = APIRepositoryPathConverter + +Principal(app, use_sessions=False) + +tf = app.config['DB_TRANSACTION_FACTORY'] + +model_cache = get_model_cache(app.config) +avatar = Avatar(app) +login_manager = LoginManager(app) +mail = Mail(app) +prometheus = PrometheusPlugin(app) +metric_queue = MetricQueue(prometheus) +chunk_cleanup_queue = WorkQueue(app.config['CHUNK_CLEANUP_QUEUE_NAME'], tf, metric_queue=metric_queue) +instance_keys = InstanceKeys(app) +ip_resolver = IPResolver(app) +storage = Storage(app, metric_queue, chunk_cleanup_queue, instance_keys, config_provider, ip_resolver) +userfiles = Userfiles(app, storage) +log_archive = LogArchive(app, storage) +analytics = Analytics(app) +user_analytics = UserAnalytics(app) +billing = Billing(app) +sentry = Sentry(app) +build_logs = BuildLogs(app) +authentication = UserAuthentication(app, config_provider, OVERRIDE_CONFIG_DIRECTORY) +userevents = UserEventsBuilderModule(app) +superusers = SuperUserManager(app) +signer = Signer(app, config_provider) +instance_keys = InstanceKeys(app) +label_validator = LabelValidator(app) +build_canceller = BuildCanceller(app) + +start_cloudwatch_sender(metric_queue, app) + +github_trigger = GithubOAuthService(app.config, 'GITHUB_TRIGGER_CONFIG') +gitlab_trigger = GitLabOAuthService(app.config, 'GITLAB_TRIGGER_CONFIG') + +oauth_login = OAuthLoginManager(app.config) +oauth_apps = [github_trigger, gitlab_trigger] + +image_replication_queue = WorkQueue(app.config['REPLICATION_QUEUE_NAME'], tf, + has_namespace=False, metric_queue=metric_queue) +dockerfile_build_queue = WorkQueue(app.config['DOCKERFILE_BUILD_QUEUE_NAME'], tf, + metric_queue=metric_queue, + reporter=BuildMetricQueueReporter(metric_queue), + has_namespace=True) +notification_queue = WorkQueue(app.config['NOTIFICATION_QUEUE_NAME'], tf, has_namespace=True, + metric_queue=metric_queue) +secscan_notification_queue = WorkQueue(app.config['SECSCAN_NOTIFICATION_QUEUE_NAME'], tf, + has_namespace=False, + metric_queue=metric_queue) + +# Note: We set `has_namespace` to `False` here, as we explicitly want this queue to not be emptied +# when a namespace is marked for deletion. +namespace_gc_queue = WorkQueue(app.config['NAMESPACE_GC_QUEUE_NAME'], tf, has_namespace=False, + metric_queue=metric_queue) + +all_queues = [image_replication_queue, dockerfile_build_queue, notification_queue, + secscan_notification_queue, chunk_cleanup_queue, namespace_gc_queue] + +secscan_api = SecurityScannerAPI(app, app.config, storage) +tuf_metadata_api = TUFMetadataAPI(app, app.config) + +# Check for a key in config. If none found, generate a new signing key for Docker V2 manifests. +_v2_key_path = os.path.join(OVERRIDE_CONFIG_DIRECTORY, DOCKER_V2_SIGNINGKEY_FILENAME) +if os.path.exists(_v2_key_path): + docker_v2_signing_key = RSAKey().load(_v2_key_path) +else: + docker_v2_signing_key = RSAKey(key=RSA.generate(2048)) + + +database.configure(app.config) +model.config.app_config = app.config +model.config.store = storage +model.config.register_image_cleanup_callback(secscan_api.cleanup_layers) +model.config.register_repo_cleanup_callback(tuf_metadata_api.delete_metadata) + + +@login_manager.user_loader +def load_user(user_uuid): + logger.debug('User loader loading deferred user with uuid: %s', user_uuid) + return LoginWrappedDBUser(user_uuid) + + +get_app_url = partial(get_app_url, app.config) diff --git a/config_app/application.py b/config_app/application.py new file mode 100644 index 000000000..86916e714 --- /dev/null +++ b/config_app/application.py @@ -0,0 +1,15 @@ +import os +import logging +import logging.config + +from util.log import logfile_path +from app import app as application + + +# Bind all of the blueprints +import web + + +if __name__ == '__main__': + logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) + application.run(port=5000, debug=True, threaded=True, host='0.0.0.0') diff --git a/config_app/conf/__init__.py b/config_app/conf/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/config_app/conf/gunicorn_local.py b/config_app/conf/gunicorn_local.py new file mode 100644 index 000000000..b33558ef2 --- /dev/null +++ b/config_app/conf/gunicorn_local.py @@ -0,0 +1,27 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +import logging + +from Crypto import Random +from util.log import logfile_path +from util.workers import get_worker_count + + +logconfig = logfile_path(debug=True) +bind = '0.0.0.0:5000' +workers = get_worker_count('local', 2, minimum=2, maximum=8) +worker_class = 'gevent' +daemon = False +pythonpath = '.' +preload_app = True + +def post_fork(server, worker): + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + +def when_ready(server): + logger = logging.getLogger(__name__) + logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class) diff --git a/config_app/config_endpoints/__init__.py b/config_app/config_endpoints/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/endpoints/setup_web.py b/config_app/config_endpoints/setup_web.py similarity index 94% rename from endpoints/setup_web.py rename to config_app/config_endpoints/setup_web.py index acd2640f3..c819dfb1d 100644 --- a/endpoints/setup_web.py +++ b/config_app/config_endpoints/setup_web.py @@ -29,7 +29,7 @@ def render_page_template_with_routedata(name, *args, **kwargs): logger = logging.getLogger(__name__) logging.captureWarnings(True) -setup_web = Blueprint('setup_web', __name__) +setup_web = Blueprint('setup_web', __name__, template_folder='templates') STATUS_TAGS = app.config['STATUS_TAGS'] diff --git a/templates/config_index.html b/config_app/templates/config_index.html similarity index 100% rename from templates/config_index.html rename to config_app/templates/config_index.html diff --git a/config_app/web.py b/config_app/web.py new file mode 100644 index 000000000..c98239f38 --- /dev/null +++ b/config_app/web.py @@ -0,0 +1,6 @@ +from app import app as application +from config_endpoints.setup_web import setup_web + + +application.register_blueprint(setup_web) + diff --git a/local-config-app.sh b/local-config-app.sh new file mode 100755 index 000000000..6dc723670 --- /dev/null +++ b/local-config-app.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +cat << "EOF" + __ __ + / \ / \ ______ _ _ __ __ __ + / /\ / /\ \ / __ \ | | | | / \ \ \ / / +/ / / / \ \ | | | | | | | | / /\ \ \ / +\ \ \ \ / / | |__| | | |__| | / ____ \ | | + \ \/ \ \/ / \_ ___/ \____/ /_/ \_\ |_| + \__/ \__/ \ \__ + \___\ by CoreOS + + Build, Store, and Distribute your Containers + + +EOF + +goreman -basedir "config_app" start diff --git a/web.py b/web.py index 7a88646c9..ed2ef24b6 100644 --- a/web.py +++ b/web.py @@ -9,7 +9,6 @@ from endpoints.realtime import realtime from endpoints.web import web from endpoints.webhooks import webhooks from endpoints.wellknown import wellknown -from endpoints.setup_web import setup_web import os @@ -18,10 +17,7 @@ print('\n\n\nAre we in config mode?') print(is_config_mode) -if is_config_mode: - application.register_blueprint(setup_web) -else: - application.register_blueprint(web) +application.register_blueprint(web) application.register_blueprint(githubtrigger, url_prefix='/oauth2') From 15c15faf306c61251a4d9082d83345d2ff8220d9 Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Mon, 14 May 2018 13:12:42 -0400 Subject: [PATCH 04/14] Return template from call --- config_app/app.py | 254 ----------------------- config_app/application.py | 8 +- config_app/config_endpoints/common.py | 78 +++++++ config_app/config_endpoints/setup_web.py | 21 +- config_app/templates/config_index.html | 23 +- 5 files changed, 105 insertions(+), 279 deletions(-) create mode 100644 config_app/config_endpoints/common.py diff --git a/config_app/app.py b/config_app/app.py index 90f5771b6..ba09f5713 100644 --- a/config_app/app.py +++ b/config_app/app.py @@ -1,257 +1,3 @@ -import hashlib -import json -import logging -import os - -from functools import partial - -from Crypto.PublicKey import RSA from flask import Flask, request, Request -from flask_login import LoginManager -from flask_mail import Mail -from flask_principal import Principal -from jwkest.jwk import RSAKey - -import features -from _init import CONF_DIR -from auth.auth_context import get_authenticated_user -from avatars.avatars import Avatar -from buildman.manager.buildcanceller import BuildCanceller -from data import database -from data import model -from data.archivedlogs import LogArchive -from data.billing import Billing -from data.buildlogs import BuildLogs -from data.cache import get_model_cache -from data.model.user import LoginWrappedDBUser -from data.queue import WorkQueue, BuildMetricQueueReporter -from data.userevent import UserEventsBuilderModule -from data.userfiles import Userfiles -from data.users import UserAuthentication -from path_converters import RegexConverter, RepositoryPathConverter, APIRepositoryPathConverter -from oauth.services.github import GithubOAuthService -from oauth.services.gitlab import GitLabOAuthService -from oauth.loginmanager import OAuthLoginManager -from storage import Storage -from util.log import filter_logs -from util import get_app_url -from util.ipresolver import IPResolver -from util.saas.analytics import Analytics -from util.saas.useranalytics import UserAnalytics -from util.saas.exceptionlog import Sentry -from util.names import urn_generator -from util.config.configutil import generate_secret_key -from util.config.provider import get_config_provider -from util.config.superusermanager import SuperUserManager -from util.label_validator import LabelValidator -from util.metrics.metricqueue import MetricQueue -from util.metrics.prometheus import PrometheusPlugin -from util.saas.cloudwatch import start_cloudwatch_sender -from util.secscan.api import SecurityScannerAPI -from util.tufmetadata.api import TUFMetadataAPI -from util.security.instancekeys import InstanceKeys -from util.security.signing import Signer - - -OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/') -OVERRIDE_CONFIG_YAML_FILENAME = os.path.join(CONF_DIR, 'stack/config.yaml') -OVERRIDE_CONFIG_PY_FILENAME = os.path.join(CONF_DIR, 'stack/config.py') - -OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG' - -DOCKER_V2_SIGNINGKEY_FILENAME = 'docker_v2.pem' app = Flask(__name__) -logger = logging.getLogger(__name__) - -# Instantiate the configuration. -is_testing = 'TEST' in os.environ -is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ -config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', - testing=is_testing, kubernetes=is_kubernetes) - -if is_testing: - from test.testconfig import TestConfig - logger.debug('Loading test config.') - app.config.from_object(TestConfig()) -else: - from config import DefaultConfig - logger.debug('Loading default config.') - app.config.from_object(DefaultConfig()) - app.teardown_request(database.close_db_filter) - -# Load the override config via the provider. -config_provider.update_app_config(app.config) - -# Update any configuration found in the override environment variable. -environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}')) -app.config.update(environ_config) - -# Allow user to define a custom storage preference for the local instance. -_distributed_storage_preference = os.environ.get('QUAY_DISTRIBUTED_STORAGE_PREFERENCE', '').split() -if _distributed_storage_preference: - app.config['DISTRIBUTED_STORAGE_PREFERENCE'] = _distributed_storage_preference - -# Generate a secret key if none was specified. -if app.config['SECRET_KEY'] is None: - logger.debug('Generating in-memory secret key') - app.config['SECRET_KEY'] = generate_secret_key() - -# If the "preferred" scheme is https, then http is not allowed. Therefore, ensure we have a secure -# session cookie. -if (app.config['PREFERRED_URL_SCHEME'] == 'https' and - not app.config.get('FORCE_NONSECURE_SESSION_COOKIE', False)): - app.config['SESSION_COOKIE_SECURE'] = True - -# Load features from config. -features.import_features(app.config) - -CONFIG_DIGEST = hashlib.sha256(json.dumps(app.config, default=str)).hexdigest()[0:8] - -logger.debug("Loaded config", extra={"config": app.config}) - - -class RequestWithId(Request): - request_gen = staticmethod(urn_generator(['request'])) - - def __init__(self, *args, **kwargs): - super(RequestWithId, self).__init__(*args, **kwargs) - self.request_id = self.request_gen() - - -@app.before_request -def _request_start(): - logger.debug('Starting request: %s (%s)', request.request_id, request.path, - extra={"request_id": request.request_id}) - - -DEFAULT_FILTER = lambda x: '[FILTERED]' -FILTERED_VALUES = [ - {'key': ['password'], 'fn': DEFAULT_FILTER}, - {'key': ['user', 'password'], 'fn': DEFAULT_FILTER}, - {'key': ['blob'], 'fn': lambda x: x[0:8]} -] - - -@app.after_request -def _request_end(resp): - jsonbody = request.get_json(force=True, silent=True) - values = request.values.to_dict() - - if jsonbody and not isinstance(jsonbody, dict): - jsonbody = {'_parsererror': jsonbody} - - if isinstance(values, dict): - filter_logs(values, FILTERED_VALUES) - - extra = { - "endpoint": request.endpoint, - "request_id" : request.request_id, - "remote_addr": request.remote_addr, - "http_method": request.method, - "original_url": request.url, - "path": request.path, - "parameters": values, - "json_body": jsonbody, - "confsha": CONFIG_DIGEST, - } - - if request.user_agent is not None: - extra["user-agent"] = request.user_agent.string - - logger.debug('Ending request: %s (%s)', request.request_id, request.path, extra=extra) - return resp - - - -root_logger = logging.getLogger() - -app.request_class = RequestWithId - -# Register custom converters. -app.url_map.converters['regex'] = RegexConverter -app.url_map.converters['repopath'] = RepositoryPathConverter -app.url_map.converters['apirepopath'] = APIRepositoryPathConverter - -Principal(app, use_sessions=False) - -tf = app.config['DB_TRANSACTION_FACTORY'] - -model_cache = get_model_cache(app.config) -avatar = Avatar(app) -login_manager = LoginManager(app) -mail = Mail(app) -prometheus = PrometheusPlugin(app) -metric_queue = MetricQueue(prometheus) -chunk_cleanup_queue = WorkQueue(app.config['CHUNK_CLEANUP_QUEUE_NAME'], tf, metric_queue=metric_queue) -instance_keys = InstanceKeys(app) -ip_resolver = IPResolver(app) -storage = Storage(app, metric_queue, chunk_cleanup_queue, instance_keys, config_provider, ip_resolver) -userfiles = Userfiles(app, storage) -log_archive = LogArchive(app, storage) -analytics = Analytics(app) -user_analytics = UserAnalytics(app) -billing = Billing(app) -sentry = Sentry(app) -build_logs = BuildLogs(app) -authentication = UserAuthentication(app, config_provider, OVERRIDE_CONFIG_DIRECTORY) -userevents = UserEventsBuilderModule(app) -superusers = SuperUserManager(app) -signer = Signer(app, config_provider) -instance_keys = InstanceKeys(app) -label_validator = LabelValidator(app) -build_canceller = BuildCanceller(app) - -start_cloudwatch_sender(metric_queue, app) - -github_trigger = GithubOAuthService(app.config, 'GITHUB_TRIGGER_CONFIG') -gitlab_trigger = GitLabOAuthService(app.config, 'GITLAB_TRIGGER_CONFIG') - -oauth_login = OAuthLoginManager(app.config) -oauth_apps = [github_trigger, gitlab_trigger] - -image_replication_queue = WorkQueue(app.config['REPLICATION_QUEUE_NAME'], tf, - has_namespace=False, metric_queue=metric_queue) -dockerfile_build_queue = WorkQueue(app.config['DOCKERFILE_BUILD_QUEUE_NAME'], tf, - metric_queue=metric_queue, - reporter=BuildMetricQueueReporter(metric_queue), - has_namespace=True) -notification_queue = WorkQueue(app.config['NOTIFICATION_QUEUE_NAME'], tf, has_namespace=True, - metric_queue=metric_queue) -secscan_notification_queue = WorkQueue(app.config['SECSCAN_NOTIFICATION_QUEUE_NAME'], tf, - has_namespace=False, - metric_queue=metric_queue) - -# Note: We set `has_namespace` to `False` here, as we explicitly want this queue to not be emptied -# when a namespace is marked for deletion. -namespace_gc_queue = WorkQueue(app.config['NAMESPACE_GC_QUEUE_NAME'], tf, has_namespace=False, - metric_queue=metric_queue) - -all_queues = [image_replication_queue, dockerfile_build_queue, notification_queue, - secscan_notification_queue, chunk_cleanup_queue, namespace_gc_queue] - -secscan_api = SecurityScannerAPI(app, app.config, storage) -tuf_metadata_api = TUFMetadataAPI(app, app.config) - -# Check for a key in config. If none found, generate a new signing key for Docker V2 manifests. -_v2_key_path = os.path.join(OVERRIDE_CONFIG_DIRECTORY, DOCKER_V2_SIGNINGKEY_FILENAME) -if os.path.exists(_v2_key_path): - docker_v2_signing_key = RSAKey().load(_v2_key_path) -else: - docker_v2_signing_key = RSAKey(key=RSA.generate(2048)) - - -database.configure(app.config) -model.config.app_config = app.config -model.config.store = storage -model.config.register_image_cleanup_callback(secscan_api.cleanup_layers) -model.config.register_repo_cleanup_callback(tuf_metadata_api.delete_metadata) - - -@login_manager.user_loader -def load_user(user_uuid): - logger.debug('User loader loading deferred user with uuid: %s', user_uuid) - return LoginWrappedDBUser(user_uuid) - - -get_app_url = partial(get_app_url, app.config) diff --git a/config_app/application.py b/config_app/application.py index 86916e714..a4d231092 100644 --- a/config_app/application.py +++ b/config_app/application.py @@ -1,8 +1,8 @@ -import os -import logging -import logging.config +# import os +# import logging +# import logging.config -from util.log import logfile_path +# from util.log import logfile_path from app import app as application diff --git a/config_app/config_endpoints/common.py b/config_app/config_endpoints/common.py new file mode 100644 index 000000000..1bcdfb03d --- /dev/null +++ b/config_app/config_endpoints/common.py @@ -0,0 +1,78 @@ +from flask import make_response, render_template, request, session + +def render_page_template(name, route_data=None, **kwargs): + """ Renders the page template with the given name as the response and returns its contents. """ + # main_scripts = _list_files('build', 'js', js_bundle_name) + # + # use_cdn = app.config.get('USE_CDN', True) + # if request.args.get('use_cdn') is not None: + # use_cdn = request.args.get('use_cdn') == 'true' + # + # external_styles = get_external_css(local=not use_cdn) + # external_scripts = get_external_javascript(local=not use_cdn) + # + # # Add Stripe checkout if billing is enabled. + # if features.BILLING: + # external_scripts.append('//checkout.stripe.com/checkout.js') + # + # def get_external_login_config(): + # login_config = [] + # for login_service in oauth_login.services: + # login_config.append({ + # 'id': login_service.service_id(), + # 'title': login_service.service_name(), + # 'config': login_service.get_public_config(), + # 'icon': login_service.get_icon(), + # }) + # + # return login_config + # + # def get_oauth_config(): + # oauth_config = {} + # for oauth_app in oauth_apps: + # oauth_config[oauth_app.key_name] = oauth_app.get_public_config() + # + # return oauth_config + # + # contact_href = None + # if len(app.config.get('CONTACT_INFO', [])) == 1: + # contact_href = app.config['CONTACT_INFO'][0] + # + # version_number = '' + # if not features.BILLING: + # version_number = 'Quay %s' % __version__ + # + # scopes_set = {scope.scope: scope._asdict() for scope in scopes.app_scopes(app.config).values()} + + contents = render_template(name, + route_data=route_data, + # external_styles=external_styles, + # external_scripts=external_scripts, + # main_scripts=main_scripts, + # feature_set=features.get_features(), + # config_set=frontend_visible_config(app.config), + # oauth_set=get_oauth_config(), + # external_login_set=get_external_login_config(), + # scope_set=scopes_set, + # vuln_priority_set=PRIORITY_LEVELS, + # enterprise_logo=app.config.get('ENTERPRISE_LOGO_URL', ''), + # mixpanel_key=app.config.get('MIXPANEL_KEY', ''), + # munchkin_key=app.config.get('MARKETO_MUNCHKIN_ID', ''), + # recaptcha_key=app.config.get('RECAPTCHA_SITE_KEY', ''), + # google_tagmanager_key=app.config.get('GOOGLE_TAGMANAGER_KEY', ''), + # google_anaytics_key=app.config.get('GOOGLE_ANALYTICS_KEY', ''), + # sentry_public_dsn=app.config.get('SENTRY_PUBLIC_DSN', ''), + # is_debug=str(app.config.get('DEBUGGING', False)).lower(), + # show_chat=features.SUPPORT_CHAT, + # aci_conversion=features.ACI_CONVERSION, + # has_billing=features.BILLING, + # contact_href=contact_href, + # hostname=app.config['SERVER_HOSTNAME'], + # preferred_scheme=app.config['PREFERRED_URL_SCHEME'], + # version_number=version_number, + # current_year=datetime.datetime.now().year, + **kwargs) + + resp = make_response(contents) + resp.headers['X-FRAME-OPTIONS'] = 'DENY' + return resp diff --git a/config_app/config_endpoints/setup_web.py b/config_app/config_endpoints/setup_web.py index c819dfb1d..eb07ae01d 100644 --- a/config_app/config_endpoints/setup_web.py +++ b/config_app/config_endpoints/setup_web.py @@ -5,25 +5,26 @@ import logging from datetime import timedelta, datetime from cachetools import lru_cache -from flask import (abort, redirect, request, url_for, make_response, Response, render_template, - Blueprint, jsonify, send_file, session) -from flask_login import current_user +# from flask import (abort, redirect, request, url_for, make_response, Response, render_template, +# Blueprint, jsonify, send_file, session) +from flask import Blueprint +# from flask_login import current_user from app import (app) -from endpoints.api.discovery import swagger_route_data -from endpoints.common import common_login, render_page_template +# from endpoints.api.discovery import swagger_route_data +from common import render_page_template from util.cache import no_cache -@lru_cache(maxsize=1) -def _get_route_data(): - return swagger_route_data(include_internal=True, compact=True) +# @lru_cache(maxsize=1) +# def _get_route_data(): +# return swagger_route_data(include_internal=True, compact=True) def render_page_template_with_routedata(name, *args, **kwargs): - return render_page_template(name, _get_route_data(), *args, **kwargs) + return render_page_template(name, *args, **kwargs) # Capture the unverified SSL errors. logger = logging.getLogger(__name__) @@ -31,7 +32,7 @@ logging.captureWarnings(True) setup_web = Blueprint('setup_web', __name__, template_folder='templates') -STATUS_TAGS = app.config['STATUS_TAGS'] +# STATUS_TAGS = app.config['STATUS_TAGS'] @setup_web.route('/', methods=['GET'], defaults={'path': ''}) @no_cache diff --git a/config_app/templates/config_index.html b/config_app/templates/config_index.html index f81ae7935..234e5fb5c 100644 --- a/config_app/templates/config_index.html +++ b/config_app/templates/config_index.html @@ -1,11 +1,12 @@ -{% extends "base.html" %} - -{% block title %} - Config mode -{% endblock %} -{% block body_content %} -

-

What is my purpose?
-
You create tarballs
-

-{% endblock %} + + + + Config app + + +
+

What is my purpose

+

You make tarballs

+
+ + From d080ca2cc6cb482b5e20aab4fb02e8f3a10272f3 Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Mon, 14 May 2018 15:45:26 -0400 Subject: [PATCH 05/14] Create webpack config for config app further improve developer morale get initial angular loading Add remote css to config index Starts work to port endpoints into config app Add the api blueprint --- config_app/Procfile | 2 +- config_app/app.py | 3 +- config_app/application.py | 10 +- config_app/config_endpoints/api/__init__.py | 151 ++ config_app/config_endpoints/common.py | 363 +++- config_app/config_endpoints/setup_web.py | 43 +- config_app/js/components/file-upload-box.js | 172 ++ config_app/js/config-app.module.ts | 45 + .../config-bool-field.html | 8 + .../config-certificates-field.html | 76 + .../config-contact-field.html | 46 + .../config-contacts-field.html | 4 + .../config-file-field.html | 13 + .../config-list-field.html | 17 + .../config-map-field.html | 20 + .../config-numeric-field.html | 6 + .../config-parsed-field.html | 1 + .../config-service-key-field.html | 29 + .../config-setup-tool.html | 1656 ++++++++++++++++ .../config-string-field.html | 10 + .../config-string-list-field.html | 6 + .../config-variable-field.html | 10 + .../core-config-setup/config-setup-tool.html | 1657 +++++++++++++++++ .../js/core-config-setup/core-config-setup.js | 1454 +++++++++++++++ config_app/js/main.ts | 36 + config_app/js/services/api-service.js | 332 ++++ config_app/js/services/container-service.js | 45 + config_app/js/services/cookie-service.js | 23 + config_app/js/services/features-config.js | 91 + config_app/js/services/user-service.js | 217 +++ config_app/js/services/util-service.js | 83 + config_app/js/setup/setup.component.js | 332 ++++ config_app/js/setup/setup.html | 311 ++++ config_app/static/css/core-ui.css | 1500 +++++++++++++++ .../static/lib/angular-file-upload.min.js | 2 + config_app/templates/config_index.html | 12 - config_app/templates/index.html | 51 + config_app/util/__init__.py | 0 config_app/util/baseprovider.py | 128 ++ config_app/util/config.py | 21 + config_app/util/fileprovider.py | 60 + config_app/web.py | 4 +- config_app/webpack.config.js | 60 + endpoints/common.py | 6 +- local-config-app.sh | 10 +- package.json | 5 +- static/configappjs/index.js | 5 - web.py | 8 - webpack.config.js | 5 +- 49 files changed, 8996 insertions(+), 153 deletions(-) create mode 100644 config_app/config_endpoints/api/__init__.py create mode 100644 config_app/js/components/file-upload-box.js create mode 100644 config_app/js/config-app.module.ts create mode 100644 config_app/js/config-field-templates/config-bool-field.html create mode 100644 config_app/js/config-field-templates/config-certificates-field.html create mode 100644 config_app/js/config-field-templates/config-contact-field.html create mode 100644 config_app/js/config-field-templates/config-contacts-field.html create mode 100644 config_app/js/config-field-templates/config-file-field.html create mode 100644 config_app/js/config-field-templates/config-list-field.html create mode 100644 config_app/js/config-field-templates/config-map-field.html create mode 100644 config_app/js/config-field-templates/config-numeric-field.html create mode 100644 config_app/js/config-field-templates/config-parsed-field.html create mode 100644 config_app/js/config-field-templates/config-service-key-field.html create mode 100644 config_app/js/config-field-templates/config-setup-tool.html create mode 100644 config_app/js/config-field-templates/config-string-field.html create mode 100644 config_app/js/config-field-templates/config-string-list-field.html create mode 100644 config_app/js/config-field-templates/config-variable-field.html create mode 100644 config_app/js/core-config-setup/config-setup-tool.html create mode 100644 config_app/js/core-config-setup/core-config-setup.js create mode 100644 config_app/js/main.ts create mode 100644 config_app/js/services/api-service.js create mode 100644 config_app/js/services/container-service.js create mode 100644 config_app/js/services/cookie-service.js create mode 100644 config_app/js/services/features-config.js create mode 100644 config_app/js/services/user-service.js create mode 100644 config_app/js/services/util-service.js create mode 100644 config_app/js/setup/setup.component.js create mode 100644 config_app/js/setup/setup.html create mode 100644 config_app/static/css/core-ui.css create mode 100644 config_app/static/lib/angular-file-upload.min.js delete mode 100644 config_app/templates/config_index.html create mode 100644 config_app/templates/index.html create mode 100644 config_app/util/__init__.py create mode 100644 config_app/util/baseprovider.py create mode 100644 config_app/util/config.py create mode 100644 config_app/util/fileprovider.py create mode 100644 config_app/webpack.config.js delete mode 100644 static/configappjs/index.js diff --git a/config_app/Procfile b/config_app/Procfile index 242c204d4..0ea2ba9c6 100644 --- a/config_app/Procfile +++ b/config_app/Procfile @@ -1,3 +1,3 @@ app: PYTHONPATH="../" gunicorn -c conf/gunicorn_local.py application:application -# webpack: npm run watch +# webpack: npm run watch-config-app diff --git a/config_app/app.py b/config_app/app.py index ba09f5713..811f51457 100644 --- a/config_app/app.py +++ b/config_app/app.py @@ -1,3 +1,4 @@ -from flask import Flask, request, Request +from flask import Flask app = Flask(__name__) + diff --git a/config_app/application.py b/config_app/application.py index a4d231092..9f4249e00 100644 --- a/config_app/application.py +++ b/config_app/application.py @@ -1,15 +1,9 @@ -# import os -# import logging -# import logging.config - -# from util.log import logfile_path from app import app as application - # Bind all of the blueprints import web if __name__ == '__main__': - logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) - application.run(port=5000, debug=True, threaded=True, host='0.0.0.0') + logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) + application.run(port=5000, debug=True, threaded=True, host='0.0.0.0') diff --git a/config_app/config_endpoints/api/__init__.py b/config_app/config_endpoints/api/__init__.py new file mode 100644 index 000000000..67580475a --- /dev/null +++ b/config_app/config_endpoints/api/__init__.py @@ -0,0 +1,151 @@ +import logging + +from config_app import app +from config_app.util.config import config_provider + +from flask import Blueprint, request, session +from flask_restful import Resource, abort, Api, reqparse +from flask_restful.utils.cors import crossdomain + +from functools import partial, wraps + + +logger = logging.getLogger(__name__) +api_bp = Blueprint('api', __name__) + +CROSS_DOMAIN_HEADERS = ['Authorization', 'Content-Type', 'X-Requested-With'] + + +class ApiExceptionHandlingApi(Api): + @crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS) + def handle_error(self, error): + print('HANDLING ERROR IN API') + return super(ApiExceptionHandlingApi, self).handle_error(error) + + +api = ApiExceptionHandlingApi() + + +class HelloWorld(Resource): + def get(self): + print("hit the dummy endpoint") + return {'hello': 'world'} + + +api.add_resource(HelloWorld, '/') + + + +def verify_not_prod(func): + @add_method_metadata('enterprise_only', True) + def wrapped(*args, **kwargs): + # Verify that we are not running on a production (i.e. hosted) stack. If so, we fail. + # This should never happen (because of the feature-flag on SUPER_USERS), but we want to be + # absolutely sure. + # if app.config['SERVER_HOSTNAME'].find('quay.io') >= 0: + # TODO(config_port) fixme + if False: + logger.error('!!! Super user method called IN PRODUCTION !!!') + raise StandardError() + + return func(*args, **kwargs) + + return wrapped + + +def resource(*urls, **kwargs): + def wrapper(api_resource): + if not api_resource: + return None + + api_resource.registered = True + api.add_resource(api_resource, *urls, **kwargs) + return api_resource + + return wrapper + + +class ApiResource(Resource): + registered = False + method_decorators = [] + + def options(self): + return None, 200 + + +def add_method_metadata(name, value): + def modifier(func): + if func is None: + return None + + if '__api_metadata' not in dir(func): + func.__api_metadata = {} + func.__api_metadata[name] = value + return func + + return modifier + + +def method_metadata(func, name): + if func is None: + return None + + if '__api_metadata' in dir(func): + return func.__api_metadata.get(name, None) + return None + + +def no_cache(f): + @wraps(f) + def add_no_cache(*args, **kwargs): + response = f(*args, **kwargs) + if response is not None: + response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate' + return response + return add_no_cache + + +nickname = partial(add_method_metadata, 'nickname') + +api.init_app(api_bp) +# api.decorators = [csrf_protect(), +# crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS), +# process_oauth, time_decorator(api_bp.name, metric_queue), +# require_xhr_from_browser] + + + + +@resource('/v1/superuser/config') +class SuperUserConfig(ApiResource): + """ Resource for fetching and updating the current configuration, if any. """ + schemas = { + 'UpdateConfig': { + 'type': 'object', + 'description': 'Updates the YAML config file', + 'required': [ + 'config', + 'hostname' + ], + 'properties': { + 'config': { + 'type': 'object' + }, + 'hostname': { + 'type': 'string' + }, + 'password': { + 'type': 'string' + }, + }, + }, + } + + @verify_not_prod + @nickname('scGetConfig') + def get(self): + """ Returns the currently defined configuration, if any. """ + config_object = config_provider.get_config() + return { + 'config': config_object + } diff --git a/config_app/config_endpoints/common.py b/config_app/config_endpoints/common.py index 1bcdfb03d..1378f0209 100644 --- a/config_app/config_endpoints/common.py +++ b/config_app/config_endpoints/common.py @@ -1,78 +1,305 @@ -from flask import make_response, render_template, request, session +import logging +import os +import re +import sys +from collections import OrderedDict +from cachetools import lru_cache -def render_page_template(name, route_data=None, **kwargs): +from flask import make_response, render_template +from flask_restful import reqparse + +from config_app.config_endpoints.api import method_metadata +from config_app.app import app + + +def truthy_bool(param): + return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'} + + +DEFAULT_JS_BUNDLE_NAME = 'configapp' +PARAM_REGEX = re.compile(r'<([^:>]+:)*([\w]+)>') +logger = logging.getLogger(__name__) +TYPE_CONVERTER = { + truthy_bool: 'boolean', + str: 'string', + basestring: 'string', + reqparse.text_type: 'string', + int: 'integer', +} + + +def _list_files(path, extension, contains=""): + """ Returns a list of all the files with the given extension found under the given path. """ + + def matches(f): + return os.path.splitext(f)[1] == '.' + extension and contains in os.path.splitext(f)[0] + + def join_path(dp, f): + # Remove the static/ prefix. It is added in the template. + return os.path.join(dp, f)[len('static/'):] + + filepath = os.path.join('static/', path) + return [join_path(dp, f) for dp, _, files in os.walk(filepath) for f in files if matches(f)] + + +def render_page_template(name, route_data=None, js_bundle_name=DEFAULT_JS_BUNDLE_NAME, **kwargs): """ Renders the page template with the given name as the response and returns its contents. """ - # main_scripts = _list_files('build', 'js', js_bundle_name) - # - # use_cdn = app.config.get('USE_CDN', True) - # if request.args.get('use_cdn') is not None: - # use_cdn = request.args.get('use_cdn') == 'true' - # - # external_styles = get_external_css(local=not use_cdn) - # external_scripts = get_external_javascript(local=not use_cdn) - # - # # Add Stripe checkout if billing is enabled. - # if features.BILLING: - # external_scripts.append('//checkout.stripe.com/checkout.js') - # - # def get_external_login_config(): - # login_config = [] - # for login_service in oauth_login.services: - # login_config.append({ - # 'id': login_service.service_id(), - # 'title': login_service.service_name(), - # 'config': login_service.get_public_config(), - # 'icon': login_service.get_icon(), - # }) - # - # return login_config - # - # def get_oauth_config(): - # oauth_config = {} - # for oauth_app in oauth_apps: - # oauth_config[oauth_app.key_name] = oauth_app.get_public_config() - # - # return oauth_config - # - # contact_href = None - # if len(app.config.get('CONTACT_INFO', [])) == 1: - # contact_href = app.config['CONTACT_INFO'][0] - # - # version_number = '' - # if not features.BILLING: - # version_number = 'Quay %s' % __version__ - # - # scopes_set = {scope.scope: scope._asdict() for scope in scopes.app_scopes(app.config).values()} + main_scripts = _list_files('build', 'js', js_bundle_name) contents = render_template(name, route_data=route_data, - # external_styles=external_styles, - # external_scripts=external_scripts, - # main_scripts=main_scripts, - # feature_set=features.get_features(), - # config_set=frontend_visible_config(app.config), - # oauth_set=get_oauth_config(), - # external_login_set=get_external_login_config(), - # scope_set=scopes_set, - # vuln_priority_set=PRIORITY_LEVELS, - # enterprise_logo=app.config.get('ENTERPRISE_LOGO_URL', ''), - # mixpanel_key=app.config.get('MIXPANEL_KEY', ''), - # munchkin_key=app.config.get('MARKETO_MUNCHKIN_ID', ''), - # recaptcha_key=app.config.get('RECAPTCHA_SITE_KEY', ''), - # google_tagmanager_key=app.config.get('GOOGLE_TAGMANAGER_KEY', ''), - # google_anaytics_key=app.config.get('GOOGLE_ANALYTICS_KEY', ''), - # sentry_public_dsn=app.config.get('SENTRY_PUBLIC_DSN', ''), - # is_debug=str(app.config.get('DEBUGGING', False)).lower(), - # show_chat=features.SUPPORT_CHAT, - # aci_conversion=features.ACI_CONVERSION, - # has_billing=features.BILLING, - # contact_href=contact_href, - # hostname=app.config['SERVER_HOSTNAME'], - # preferred_scheme=app.config['PREFERRED_URL_SCHEME'], - # version_number=version_number, - # current_year=datetime.datetime.now().year, + main_scripts=main_scripts, **kwargs) resp = make_response(contents) resp.headers['X-FRAME-OPTIONS'] = 'DENY' return resp + + +def fully_qualified_name(method_view_class): + return '%s.%s' % (method_view_class.__module__, method_view_class.__name__) + + +# @lru_cache(maxsize=1) +def generate_route_data(): + include_internal = True + compact = True + + def swagger_parameter(name, description, kind='path', param_type='string', required=True, + enum=None, schema=None): + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject + parameter_info = { + 'name': name, + 'in': kind, + 'required': required + } + + if schema: + parameter_info['schema'] = { + '$ref': '#/definitions/%s' % schema + } + else: + parameter_info['type'] = param_type + + if enum is not None and len(list(enum)) > 0: + parameter_info['enum'] = list(enum) + + return parameter_info + + paths = {} + models = {} + tags = [] + tags_added = set() + operation_ids = set() + + print('APP URL MAp:') + print(app.url_map) + for rule in app.url_map.iter_rules(): + endpoint_method = app.view_functions[rule.endpoint] + + # Verify that we have a view class for this API method. + if not 'view_class' in dir(endpoint_method): + continue + + view_class = endpoint_method.view_class + + # Hide the class if it is internal. + internal = method_metadata(view_class, 'internal') + if not include_internal and internal: + continue + + # Build the tag. + parts = fully_qualified_name(view_class).split('.') + tag_name = parts[-2] + if not tag_name in tags_added: + tags_added.add(tag_name) + tags.append({ + 'name': tag_name, + 'description': (sys.modules[view_class.__module__].__doc__ or '').strip() + }) + + # Build the Swagger data for the path. + swagger_path = PARAM_REGEX.sub(r'{\2}', rule.rule) + full_name = fully_qualified_name(view_class) + path_swagger = { + 'x-name': full_name, + 'x-path': swagger_path, + 'x-tag': tag_name + } + + related_user_res = method_metadata(view_class, 'related_user_resource') + if related_user_res is not None: + path_swagger['x-user-related'] = fully_qualified_name(related_user_res) + + paths[swagger_path] = path_swagger + + # Add any global path parameters. + param_data_map = view_class.__api_path_params if '__api_path_params' in dir(view_class) else {} + if param_data_map: + path_parameters_swagger = [] + for path_parameter in param_data_map: + description = param_data_map[path_parameter].get('description') + path_parameters_swagger.append(swagger_parameter(path_parameter, description)) + + path_swagger['parameters'] = path_parameters_swagger + + # Add the individual HTTP operations. + method_names = list(rule.methods.difference(['HEAD', 'OPTIONS'])) + for method_name in method_names: + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object + method = getattr(view_class, method_name.lower(), None) + if method is None: + logger.debug('Unable to find method for %s in class %s', method_name, view_class) + continue + + operationId = method_metadata(method, 'nickname') + operation_swagger = { + 'operationId': operationId, + 'parameters': [], + } + + if operationId is None: + continue + + if operationId in operation_ids: + raise Exception('Duplicate operation Id: %s' % operationId) + + operation_ids.add(operationId) + + # Mark the method as internal. + internal = method_metadata(method, 'internal') + if internal is not None: + operation_swagger['x-internal'] = True + + if include_internal: + requires_fresh_login = method_metadata(method, 'requires_fresh_login') + if requires_fresh_login is not None: + operation_swagger['x-requires-fresh-login'] = True + + # Add the path parameters. + if rule.arguments: + for path_parameter in rule.arguments: + description = param_data_map.get(path_parameter, {}).get('description') + operation_swagger['parameters'].append(swagger_parameter(path_parameter, description)) + + # Add the query parameters. + if '__api_query_params' in dir(method): + for query_parameter_info in method.__api_query_params: + name = query_parameter_info['name'] + description = query_parameter_info['help'] + param_type = TYPE_CONVERTER[query_parameter_info['type']] + required = query_parameter_info['required'] + + operation_swagger['parameters'].append( + swagger_parameter(name, description, kind='query', + param_type=param_type, + required=required, + enum=query_parameter_info['choices'])) + + # Add the OAuth security block. + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject + scope = method_metadata(method, 'oauth2_scope') + if scope and not compact: + operation_swagger['security'] = [{'oauth2_implicit': [scope.scope]}] + + # Add the responses block. + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject + response_schema_name = method_metadata(method, 'response_schema') + if not compact: + if response_schema_name: + models[response_schema_name] = view_class.schemas[response_schema_name] + + models['ApiError'] = { + 'type': 'object', + 'properties': { + 'status': { + 'type': 'integer', + 'description': 'Status code of the response.' + }, + 'type': { + 'type': 'string', + 'description': 'Reference to the type of the error.' + }, + 'detail': { + 'type': 'string', + 'description': 'Details about the specific instance of the error.' + }, + 'title': { + 'type': 'string', + 'description': 'Unique error code to identify the type of error.' + }, + 'error_message': { + 'type': 'string', + 'description': 'Deprecated; alias for detail' + }, + 'error_type': { + 'type': 'string', + 'description': 'Deprecated; alias for detail' + } + }, + 'required': [ + 'status', + 'type', + 'title', + ] + } + + responses = { + '400': { + 'description': 'Bad Request', + }, + + '401': { + 'description': 'Session required', + }, + + '403': { + 'description': 'Unauthorized access', + }, + + '404': { + 'description': 'Not found', + }, + } + + for _, body in responses.items(): + body['schema'] = {'$ref': '#/definitions/ApiError'} + + if method_name == 'DELETE': + responses['204'] = { + 'description': 'Deleted' + } + elif method_name == 'POST': + responses['201'] = { + 'description': 'Successful creation' + } + else: + responses['200'] = { + 'description': 'Successful invocation' + } + + if response_schema_name: + responses['200']['schema'] = { + '$ref': '#/definitions/%s' % response_schema_name + } + + operation_swagger['responses'] = responses + + # Add the request block. + request_schema_name = method_metadata(method, 'request_schema') + if request_schema_name and not compact: + models[request_schema_name] = view_class.schemas[request_schema_name] + + operation_swagger['parameters'].append( + swagger_parameter('body', 'Request body contents.', kind='body', + schema=request_schema_name)) + + # Add the operation to the parent path. + if not internal or (internal and include_internal): + path_swagger[method_name.lower()] = operation_swagger + + tags.sort(key=lambda t: t['name']) + paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]['x-tag'])) + + if compact: + return {'paths': paths} diff --git a/config_app/config_endpoints/setup_web.py b/config_app/config_endpoints/setup_web.py index eb07ae01d..90693d037 100644 --- a/config_app/config_endpoints/setup_web.py +++ b/config_app/config_endpoints/setup_web.py @@ -1,47 +1,18 @@ -import os -import json -import logging - -from datetime import timedelta, datetime - -from cachetools import lru_cache -# from flask import (abort, redirect, request, url_for, make_response, Response, render_template, -# Blueprint, jsonify, send_file, session) from flask import Blueprint -# from flask_login import current_user - - -from app import (app) -# from endpoints.api.discovery import swagger_route_data from common import render_page_template +from config_app.config_endpoints.common import generate_route_data from util.cache import no_cache - - -# @lru_cache(maxsize=1) -# def _get_route_data(): -# return swagger_route_data(include_internal=True, compact=True) - - -def render_page_template_with_routedata(name, *args, **kwargs): - return render_page_template(name, *args, **kwargs) - -# Capture the unverified SSL errors. -logger = logging.getLogger(__name__) -logging.captureWarnings(True) - setup_web = Blueprint('setup_web', __name__, template_folder='templates') -# STATUS_TAGS = app.config['STATUS_TAGS'] + +def render_page_template_with_routedata(name, *args, **kwargs): + return render_page_template(name, generate_route_data(), *args, **kwargs) + @setup_web.route('/', methods=['GET'], defaults={'path': ''}) @no_cache def index(path, **kwargs): - return render_page_template_with_routedata('config_index.html', js_bundle_name='configapp', **kwargs) + return render_page_template_with_routedata('index.html', js_bundle_name='configapp', **kwargs) + -@setup_web.errorhandler(404) -@setup_web.route('/404', methods=['GET']) -def not_found_error_display(e = None): - resp = index('', error_code=404, error_info=dict(reason='notfound')) - resp.status_code = 404 - return resp diff --git a/config_app/js/components/file-upload-box.js b/config_app/js/components/file-upload-box.js new file mode 100644 index 000000000..7005c21ed --- /dev/null +++ b/config_app/js/components/file-upload-box.js @@ -0,0 +1,172 @@ +/** + * An element which adds a stylize box for uploading a file. + */ +angular.module('quay-config').directive('fileUploadBox', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: '/static/directives/file-upload-box.html', + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'selectMessage': '@selectMessage', + + 'filesSelected': '&filesSelected', + 'filesCleared': '&filesCleared', + 'filesValidated': '&filesValidated', + + 'extensions': '= $scope.selectedFiles.length) { + callback(true, fileIds); + return; + } + + // For the current file, retrieve a file-drop URL from the API for the file. + var currentFile = $scope.selectedFiles[currentIndex]; + var mimeType = currentFile.type || 'application/octet-stream'; + var data = { + 'mimeType': mimeType + }; + + $scope.currentlyUploadingFile = currentFile; + $scope.uploadProgress = 0; + + ApiService.getFiledropUrl(data).then(function(resp) { + // Perform the upload. + conductUpload(currentFile, resp.url, resp.file_id, mimeType, progressCb, doneCb); + }, function() { + callback(false, 'Could not retrieve upload URL'); + }); + }; + + // Start the uploading. + $scope.state = 'uploading'; + performFileUpload(); + }; + + $scope.handleFilesChanged = function(files) { + if ($scope.state == 'uploading') { return; } + + $scope.message = null; + $scope.selectedFiles = files; + + if (files.length == 0) { + $scope.state = 'clear'; + $scope.filesCleared(); + } else { + for (var i = 0; i < files.length; ++i) { + if (files[i].size > MAX_FILE_SIZE) { + $scope.state = 'error'; + $scope.message = 'File ' + files[i].name + ' is larger than the maximum file ' + + 'size of ' + MAX_FILE_SIZE_MB + ' MB'; + return; + } + } + + $scope.state = 'checking'; + $scope.filesSelected({ + 'files': files, + 'callback': function(status, message) { + $scope.state = status ? 'okay' : 'error'; + $scope.message = message; + + if (status) { + $scope.filesValidated({ + 'files': files, + 'uploadFiles': uploadFiles + }); + } + } + }); + } + }; + + $scope.getAccepts = function(extensions) { + if (!extensions || !extensions.length) { + return '*'; + } + + return extensions.join(','); + }; + + $scope.$watch('reset', function(reset) { + if (reset) { + $scope.state = 'clear'; + $element.find('#file-drop-' + $scope.boxId).parent().trigger('reset'); + } + }); + } + }; + return directiveDefinitionObject; +}); \ No newline at end of file diff --git a/config_app/js/config-app.module.ts b/config_app/js/config-app.module.ts new file mode 100644 index 000000000..59fe1bf20 --- /dev/null +++ b/config_app/js/config-app.module.ts @@ -0,0 +1,45 @@ +import { NgModule } from 'ng-metadata/core'; +import * as restangular from 'restangular'; + +const quayDependencies: string[] = [ + 'restangular', + 'ngCookies', + 'angularFileUpload', + 'ngSanitize' +]; + +@NgModule(({ + imports: quayDependencies, + declarations: [], + providers: [ + provideConfig, + ] +})) +class DependencyConfig{} + + +provideConfig.$inject = [ + '$provide', + '$injector', + '$compileProvider', + 'RestangularProvider', +]; + +function provideConfig($provide: ng.auto.IProvideService, + $injector: ng.auto.IInjectorService, + $compileProvider: ng.ICompileProvider, + RestangularProvider: any): void { + + // Configure the API provider. + RestangularProvider.setBaseUrl('/api/v1/'); + + console.log('i'); +} + + +@NgModule({ + imports: [ DependencyConfig ], + declarations: [], + providers: [] +}) +export class ConfigAppModule {} diff --git a/config_app/js/config-field-templates/config-bool-field.html b/config_app/js/config-field-templates/config-bool-field.html new file mode 100644 index 000000000..190698290 --- /dev/null +++ b/config_app/js/config-field-templates/config-bool-field.html @@ -0,0 +1,8 @@ +
+
+ +
+
diff --git a/config_app/js/config-field-templates/config-certificates-field.html b/config_app/js/config-field-templates/config-certificates-field.html new file mode 100644 index 000000000..f20e4c459 --- /dev/null +++ b/config_app/js/config-field-templates/config-certificates-field.html @@ -0,0 +1,76 @@ +
+
+ +
+ extra_ca_certs is a single file and cannot be processed by this tool. If a valid and appended list of certificates, they will be installed on container startup. +
+ +
+
+

This section lists any custom or self-signed SSL certificates that are installed in the container on startup after being read from the extra_ca_certs directory in the configuration volume. +

+

+ Custom certificates are typically used in place of publicly signed certificates for corporate-internal services. +

+

Please make sure that all custom names used for downstream services (such as Clair) are listed in the certificates below.

+
+ + + + + + +
Upload certificates: +
+
+ + + + + + + + + + + + + + +
Certificate FilenameStatusNames Handled
{{ certificate.path }} +
+ + Error: {{ certificate.error }} +
+
+ + Certificate is expired +
+
+ + Certificate is valid +
+
+
(None)
+ {{ name }} +
+ + + Delete Certificate + + +
+
+
+ Uploading, validating and updating certificate(s) +
+
+
No custom certificates found.
+
+
+
+
\ No newline at end of file diff --git a/config_app/js/config-field-templates/config-contact-field.html b/config_app/js/config-field-templates/config-contact-field.html new file mode 100644 index 000000000..58cdea0c4 --- /dev/null +++ b/config_app/js/config-field-templates/config-contact-field.html @@ -0,0 +1,46 @@ +
+ + + + + +
+ + +
+ +
+
+
diff --git a/config_app/js/config-field-templates/config-contacts-field.html b/config_app/js/config-field-templates/config-contacts-field.html new file mode 100644 index 000000000..40762934c --- /dev/null +++ b/config_app/js/config-field-templates/config-contacts-field.html @@ -0,0 +1,4 @@ +
+
+
+
diff --git a/config_app/js/config-field-templates/config-file-field.html b/config_app/js/config-field-templates/config-file-field.html new file mode 100644 index 000000000..11c4227f7 --- /dev/null +++ b/config_app/js/config-field-templates/config-file-field.html @@ -0,0 +1,13 @@ +
+ + + /conf/stack/{{ filename }} + Select a replacement file: + + Please select a file to upload as {{ filename }}: + + + + Uploading file as {{ filename }}... {{ uploadProgress }}% + +
diff --git a/config_app/js/config-field-templates/config-list-field.html b/config_app/js/config-field-templates/config-list-field.html new file mode 100644 index 000000000..9918e9a07 --- /dev/null +++ b/config_app/js/config-field-templates/config-list-field.html @@ -0,0 +1,17 @@ +
+
    +
  • + {{ item }} + + Remove + +
  • +
+ No {{ itemTitle }}s defined +
+ + +
+
diff --git a/config_app/js/config-field-templates/config-map-field.html b/config_app/js/config-field-templates/config-map-field.html new file mode 100644 index 000000000..84f086052 --- /dev/null +++ b/config_app/js/config-field-templates/config-map-field.html @@ -0,0 +1,20 @@ +
+ + + + + + +
{{ key }}{{ value }} + Remove +
+ No entries defined +
+ Add Key-Value: + + + +
+
diff --git a/config_app/js/config-field-templates/config-numeric-field.html b/config_app/js/config-field-templates/config-numeric-field.html new file mode 100644 index 000000000..8c25a2fea --- /dev/null +++ b/config_app/js/config-field-templates/config-numeric-field.html @@ -0,0 +1,6 @@ +
+
+ +
+
diff --git a/config_app/js/config-field-templates/config-parsed-field.html b/config_app/js/config-field-templates/config-parsed-field.html new file mode 100644 index 000000000..766b0a8a2 --- /dev/null +++ b/config_app/js/config-field-templates/config-parsed-field.html @@ -0,0 +1 @@ +
diff --git a/config_app/js/config-field-templates/config-service-key-field.html b/config_app/js/config-field-templates/config-service-key-field.html new file mode 100644 index 000000000..52b7c1187 --- /dev/null +++ b/config_app/js/config-field-templates/config-service-key-field.html @@ -0,0 +1,29 @@ +
+ +
+ + +
+ Could not load service keys +
+ + +
+
+ + Valid key for service {{ serviceName }} exists +
+
+ No valid key found for service {{ serviceName }} + Create Key +
+
+ + + + +
+
diff --git a/config_app/js/config-field-templates/config-setup-tool.html b/config_app/js/config-field-templates/config-setup-tool.html new file mode 100644 index 000000000..629e3b45f --- /dev/null +++ b/config_app/js/config-field-templates/config-setup-tool.html @@ -0,0 +1,1656 @@ +
+
+
+
+ + +
+
+ Custom SSL Certificates +
+
+
+
+
+ + +
+
+ Basic Configuration +
+
+ + + + + + + + + + +
Enterprise Logo URL: + +
+ Enter the full URL to your company's logo. +
+
+ +
Contact Information: + +
+ Information to show in the Contact Page. If none specified, CoreOS contact information + is displayed. +
+
+
+
+ + +
+
+ Server Configuration +
+
+ + + + + + + + + +
Server Hostname: + +
+ The HTTP host (and optionally the port number if a non-standard HTTP/HTTPS port) of the location + where the registry will be accessible on the network +
+
TLS: + + +
+ Running without TLS should not be used for production workloads! +
+ +
+ Terminating TLS outside of Quay Enterprise can result in unusual behavior if the external load balancer is not + configured properly. This option is not recommended for simple setups. Please contact support + if you encounter problems while using this option. +
+ +
+ Enabling TLS also enables HTTP Strict Transport Security.
+ This prevents downgrade attacks and cookie theft, but browsers will reject all future insecure connections on this hostname. +
+ + + + + + + + + + +
Certificate: + +
+ The certificate must be in PEM format. +
+
Private key: + +
+
+ +
+
+ + +
+
+ Data Consistency Settings +
+
+
+

Relax constraints on consistency guarantees for specific operations + to enable higher performance and availability. +

+
+ + + + +
+
+ Allow repository pulls even if audit logging fails. +
+ If enabled, failures to write to the audit log will fallback from + the database to the standard logger for registry pulls. +
+
+
+
+
+ + +
+
+ Time Machine +
+
+
+

Time machine keeps older copies of tags within a repository for the configured period + of time, after which they are garbage collected. This allows users to + revert tags to older images in case they accidentally pushed a broken image. It is + highly recommended to have time machine enabled, but it does take a bit more space + in storage. +

+
+ + + + + + + + + + + + + + +
Allowed expiration periods: + +
+ The expiration periods allowed for configuration. The default tag expiration *must* be in this list. +
+
Default expiration period: + +
+ The default tag expiration period for all namespaces (users and organizations). Must be expressed in a duration string form: 30m, 1h, 1d, 2w. +
+
Allow users to select expiration: +
+ Enable Expiration Configuration +
+ If enabled, users will be able to select the tag expiration duration for the namespace(s) they + administrate, from the configured list of options. +
+
+
+
+
+ + +
+
+ redis +
+
+
+

A redis key-value store is required for real-time events and build logs.

+
+ + + + + + + + + + + + + + +
Redis Hostname: + +
Redis port: + +
+ Access to this port and hostname must be allowed from all hosts running + the enterprise registry +
+
Redis password: + +
+
+
+ + +
+
+ Registry Storage +
+
+
+

+ Registry images can be stored either locally or in a remote storage system. + A remote storage system is required for high-availability systems. +

+ +
+ Enable Storage Replication +
+ If enabled, replicates storage to other regions. See documentation for more information. +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Location ID: + +
+ {{ sc.location }} +
+
+ {{ storageConfigError[$index].location }} +
+ +
Set Default: +
+ Replicate to storage engine by default +
+
Storage Engine: + + +
+ {{ storageConfigError[$index].engine }} +
+
{{ field.title }}: + + + + {{ field.placeholder }} + + +
+ +
+
+ {{ field.help_text }} +
+
+ See Documentation for more information +
+
+
+ + +
+
+
+ + +
+
+ Action Log Rotation and Archiving +
+
+
+

+ All actions performed in are automatically logged. These logs are stored in a database table, which can become quite large. + Enabling log rotation and archiving will move all logs older than 30 days into storage. +

+
+
+ Enable Action Log Rotation +
+ + + + + + + + + + +
Storage location: + +
+ The storage location in which to place archived action logs. Logs will only be archived to this single location. +
+
Storage path: + +
+ The path under the configured storage engine in which to place the archived logs in JSON form. +
+
+
+ + +
+
+ Security Scanner +
+
+
+

If enabled, all images pushed to Quay will be scanned via the external security scanning service, with vulnerability information available in the UI and API, as well + as async notification support. +

+
+ +
+ Enable Security Scanning +
+
+ A scanner compliant with the Quay Security Scanning API must be running to use this feature. Documentation on running Clair can be found at Running Clair Security Scanner. +
+ + + + + + + + + + +
Authentication Key: + +
+ The security scanning service requires an authorized service key to speak to Quay. Once setup, the key + can be managed in the Service Keys panel under the Super User Admin Panel. +
+
Security Scanner Endpoint: + +
+ The HTTP URL at which the security scanner is running. +
+
+ Is the security scanner behind a domain signed with a self-signed TLS certificate? If so, please make sure to register your SSL CA in the custom certificates panel above. +
+
+
+
+ + +
+
+ Application Registry +
+
+
+

If enabled, an additional registry API will be available for managing applications (Kubernetes manifests, Helm charts) via the App Registry specification. A great place to get started is to install the Helm Registry Plugin. +

+ +
+ Enable App Registry +
+
+
+ + +
+
+ BitTorrent-based download +
+
+
+

If enabled, all images in the registry can be downloaded using the quayctl tool via the BitTorrent protocol. A JWT-compatible BitTorrent tracker such as Chihaya must be run. +

+ +
+ Enable BitTorrent downloads +
+ + + + + + +
Announce URL: + +
+ The HTTP URL at which the torrents should be announced. A JWT-compatible tracker such as Chihaya must be run to ensure proper security. Documentation on running Chihaya with + this support can be found at Running Chihaya for Quay Enterprise. +
+
+
+
+ + +
+
+ rkt Conversion +
+
+
+

If enabled, all images in the registry can be fetched via rkt fetch or any other AppC discovery-compliant implementation.

+
+ +
+ Enable ACI Conversion +
+ +
+ Documentation on generating these keys can be found at Generating ACI Signing Keys. +
+ + + + + + + + + + + + + + +
GPG2 Public Key File: + +
+ The certificate must be in PEM format. +
+
GPG2 Private Key File: + +
GPG2 Private Key Name: + +
+
+
+ + +
+
+ E-mail +
+
+
+

Valid e-mail server configuration is required for notification e-mails and the ability of + users to reset their passwords.

+
+ +
+ Enable E-mails +
+ + + + + + + + + + + + + + + + + + + + + + + +
SMTP Server: + > +
SMTP Server Port: + +
TLS: +
+ Require TLS +
+
Mail Sender: + +
+ E-mail address from which all e-mails are sent. If not specified, + support@quay.io will be used. +
+
Authentication: +
+ Requires Authentication +
+ + + + + + + + + + +
Username: + +
Password: + +
+
+
+
+ + +
+
+ Internal Authentication +
+
+
+

+ Authentication for the registry can be handled by either the registry itself, LDAP, Keystone, or external JWT endpoint. +

+

+ Additional external authentication providers (such as GitHub) can be used in addition for login into the UI. +

+
+ +
+
+ It is highly recommended to require encrypted client passwords. External passwords used in the Docker client will be stored in plaintext! + Enable this requirement now. +
+ +
+ Note: The "Require Encrypted Client Passwords" feature is currently enabled which will + prevent passwords from being saved as plaintext by the Docker client. +
+
+ + + + + + + + + + + + + + + + + + + +
Authentication: + +
Team synchronization: +
+ Enable Team Synchronization Support +
+
+ If enabled, organization administrators who are also superusers can set teams to have their membership synchronized with a backing group in {{ config.AUTHENTICATION_TYPE }}. +
+
Resynchronization duration: + +
+ The duration before a team must be re-synchronized. Must be expressed in a duration string form: 30m, 1h, 1d. +
+
Self-service team syncing setup: +
If enabled, this feature will allow *any organization administrator* to read the membership of any {{ config.AUTHENTICATION_TYPE }} group.
+
+ Allow non-superusers to enable and manage team syncing +
+
+ If enabled, non-superusers will be able to enable and manage team sycning on teams under organizations in which they are administrators. +
+
+ + + + + + + + + + + + + + + + + + + + + + + +
Keystone API Version: + +
Keystone Authentication URL: + +
+ The URL (starting with http or https) of the Keystone Server endpoint for auth. +
+
Keystone Administrator Username: + +
+ The username for the Keystone admin. +
+
Keystone Administrator Password: + +
+ The password for the Keystone admin. +
+
Keystone Administrator Tenant: + +
+ The tenant (project/group) that contains the administrator user. +
+
+ + +
+ JSON Web Token authentication allows your organization to provide an HTTP endpoint that + verifies user credentials on behalf of . +
+ Documentation + on the API required can be found here: https://github.com/coreos/jwt-auth-example. +
+ + + + + + + + + + + + + + + + + + + + + + +
Authentication Issuer: + +
+ The id of the issuer signing the JWT token. Must be unique to your organization. +
+
Public Key: + +
+ A certificate containing the public key portion of the key pair used to sign + the JSON Web Tokens. This file must be in PEM format. +
+
User Verification Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for verifying username and password credentials. +
+ +
+ Credentials will be sent in the Authorization header as Basic Auth, and this endpoint should return 200 OK on success (or a 4** otherwise). +
+
User Query Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for looking up + users based on a prefix query. This is optional. +
+ +
+ The prefix query will be sent as a query parameter with name query. +
+
User Lookup Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for looking up + a user by username or email address. +
+ +
+ The username or email address will be sent as a query parameter with name username. +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LDAP URI: + +
+ The full LDAP URI, including the ldap:// or ldaps:// prefix. +
+
Base DN: + +
+ A Distinguished Name path which forms the base path for looking up all LDAP records. +
+
+ Example: dc=my,dc=domain,dc=com +
+
User Relative DN: + +
+ A Distinguished Name path which forms the base path for looking up all user LDAP records, + relative to the Base DN defined above. +
+
+ Example: ou=employees +
+
Secondary User Relative DNs: + +
+ A list of Distinguished Name path(s) which forms the secondary base path(s) for + looking up all user LDAP records, relative to the Base DN defined above. These path(s) + will be tried if the user is not found via the primary relative DN. +
+
+ Example: [ou=employees] +
+
Administrator DN: +
+ The Distinguished Name for the Administrator account. This account must be able to login and view the records for all user accounts. +
+
+ Example: uid=admin,ou=employees,dc=my,dc=domain,dc=com +
+
Administrator DN Password: +
+ Note: This will be stored in + plaintext inside the config.yaml, so setting up a dedicated account or using + a password hash is highly recommended. +
+ +
+ The password for the Administrator DN. +
+
UID Attribute: + +
+ The name of the property field in your LDAP user records that stores your + users' username. Typically "uid". +
+
Mail Attribute: + +
+ The name of the property field in your LDAP user records that stores your + users' e-mail address(es). Typically "mail". +
+
Custom TLS Certificate: + +
+ If specified, the certificate (in PEM format) for the LDAP TLS connection. +
+
Allow insecure: +
+ Allow fallback to non-TLS connections +
+
+ If enabled, LDAP will fallback to insecure non-TLS connections if TLS does not succeed. +
+
+
+
+ +
+
+ External Authorization (OAuth) +
+
+ +
+
+ GitHub (Enterprise) Authentication +
+
+
+

+ If enabled, users can use GitHub or GitHub Enterprise to authenticate to the registry. +

+

+ Note: A registered GitHub (Enterprise) OAuth application is required. + View instructions on how to + + Create an OAuth Application in GitHub + +

+
+ +
+ Enable GitHub Authentication +
+ +
+ Warning: This provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
GitHub: + +
GitHub Endpoint: + + +
+ The GitHub Enterprise endpoint. Must start with http:// or https://. +
+
OAuth Client ID: + + +
OAuth Client Secret: + + +
Organization Filtering: +
+ Restrict By Organization Membership +
+ +
+ If enabled, only members of specified GitHub + Enterprise organizations will be allowed to login via GitHub + Enterprise. +
+ + + +
Binding Field: + +
+ If selected, when a user logs in via this provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this provider. This is not the recommended setup. +
+
+
+
+ + +
+
+ Google Authentication +
+
+
+

+ If enabled, users can use Google to authenticate to the registry. +

+

+ Note: A registered Google OAuth application is required. + Visit the + + Google Developer Console + + to register an application. +

+
+ +
+ Enable Google Authentication +
+ +
+ Warning: This provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + +
OAuth Client ID: + + +
OAuth Client Secret: + + +
Binding Field: + +
+ If selected, when a user logs in via this provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this provider. This is not the recommended setup. +
+
+
+
+ + +
+
+ + {{ config[provider]['SERVICE_NAME'] || (getOIDCProviderId(provider) + ' Authentication') }} + (Delete) +
+
+
+ Warning: This OIDC provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Service ID: + {{ getOIDCProviderId(provider) }} +
OIDC Server: + + +
+ The URL of an OIDC-compliant server. +
+
Client ID: + +
Client Secret: + +
Service Name: + + +
+ The user friendly name to display for the service on the login page. +
+
Service Icon (optional): + + +
+ If specified, the icon to display for this login service on the login page. Can be either a URL to an icon or a CSS class name from Font Awesome +
+
Binding Field: + +
+ If selected, when a user logs in via this OIDC provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the OIDC provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this OIDC provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this OIDC provider. This is not the recommended setup. +
+
Login Scopes: + +
+ If specified, the scopes to send to the OIDC provider when performing the login flow. Note that, if specified, these scopes will + override those set by default, so this list must include a scope for OpenID Connect + (typically the openid scope) or this provider will fail. +
+
+
+

Callback URLs for this service:

+
    +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback
  • +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback/attach
  • +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback/cli
  • +
+
+
+
+ + + Add OIDC Provider + What is OIDC? +
+
+ + +
+
+ Access Settings +
+
+
+

Various settings around access and authentication to the registry.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Basic Credentials Login: +
+ Login to User Interface via credentials +
+
+
+ Login to User Interface via credentials must be enabled. Click here to enable. +
+
+ Login to User Interface via credentials is enabled (requires at least one OIDC provider to disable) +
+
+
+ If enabled, users will be able to login to the user interface via their username and password credentials. +
+
+ If disabled, users will only be able to login to the user interface via one of the configured External Authentication providers. +
+
External Application tokens +
+ Allow external application tokens +
+
+ If enabled, users will be able to generate external application tokens for use on the Docker and rkt CLI. Note + that these tokens will not be required unless "App Token" is chosen as the Internal Authentication method above. +
+
External application token expiration + +
+ The expiration time for user generated external application tokens. If none, tokens will never expire. +
+
Anonymous Access: +
+ Enable Anonymous Access +
+
+ If enabled, public repositories and search can be accessed by anyone that can + reach the registry, even if they are not authenticated. Disable to only allow + authenticated users to view and pull "public" resources. +
+
User Creation: +
+ Enable Open User Creation +
+
+ If enabled, user accounts can be created by anyone (unless restricted below to invited users). + Users can always be created in the users panel in this superuser tool, even if this feature is disabled. +
+
Invite-only User Creation: +
+ Enable Invite-only User Creation +
+
+ If enabled, user accounts can only be created when a user has been invited, by e-mail address, to join a team. + Users can always be created in the users panel in this superuser tool, even if this feature is enabled. +
+
Encrypted Client Password: +
+ Require Encrypted Client Passwords +
+
+ If enabled, users will not be able to login from the Docker command + line with a non-encrypted password and must generate an encrypted + password to use. +
+
+ This feature is highly recommended for setups with external authentication, as Docker currently stores passwords in plaintext on user's machines. +
+
Prefix username autocompletion: +
+ Allow prefix username autocompletion +
+
+ If disabled, autocompletion for users will only match on exact usernames. +
+
Team Invitations: +
+ Require Team Invitations +
+
+ If enabled, when adding a new user to a team, they will receive an invitation to join the team, with the option to decline. + Otherwise, users will be immediately part of a team when added by a team administrator. +
+
+
+
+ + +
+
+ Dockerfile Build Support +
+
+
+ If enabled, users can submit Dockerfiles to be built and pushed by . +
+ +
+ Enable Dockerfile Build +
+ +
+ Note: Build workers are required for this feature. + See Adding Build Workers for instructions on how to setup build workers. +
+
+
+ + +
+
+ GitHub (Enterprise) Build Triggers +
+
+
+

+ If enabled, users can setup GitHub or GitHub Enterprise triggers to invoke Registry builds. +

+

+ Note: A registered GitHub (Enterprise) OAuth application (separate from GitHub Authentication) is required. + View instructions on how to + + Create an OAuth Application in GitHub + +

+
+ +
+ Enable GitHub Triggers +
+ + + + + + + + + + + + + + + + + + +
GitHub: + +
GitHub Endpoint: + + +
+ The GitHub Enterprise endpoint. Must start with http:// or https://. +
+
OAuth Client ID: + + +
OAuth Client Secret: + + +
+
+
+ + +
+
+ BitBucket Build Triggers +
+
+
+

+ If enabled, users can setup BitBucket triggers to invoke Registry builds. +

+

+ Note: A registered BitBucket OAuth application is required. + View instructions on how to + + Create an OAuth Application in BitBucket + +

+
+ +
+ Enable BitBucket Triggers +
+ + + + + + + + + + +
OAuth Consumer Key: + + +
OAuth Consumer Secret: + + +
+
+
+ + +
+
+ GitLab Build Triggers +
+
+
+

+ If enabled, users can setup GitLab triggers to invoke Registry builds. +

+

+ Note: A registered GitLab OAuth application is required. + Visit the + + GitLab applications admin panel + + to create a new application. +

+

The callback URL to use is:   + {{ config.PREFERRED_URL_SCHEME || 'http' }}://{{ config.SERVER_HOSTNAME || 'localhost' }}/oauth2/gitlab/callback/trigger +

+
+ +
+ Enable GitLab Triggers +
+ + + + + + + + + + + + + + + + + + +
GitLab: + +
GitLab Endpoint: + + +
+ The GitLab Enterprise endpoint. Must start with http:// or https://. +
+
Application Id: + + +
Secret: + + +
+
+
+ + + + +
+ + +
+ + + + +
+
diff --git a/config_app/js/config-field-templates/config-string-field.html b/config_app/js/config-field-templates/config-string-field.html new file mode 100644 index 000000000..703891f89 --- /dev/null +++ b/config_app/js/config-field-templates/config-string-field.html @@ -0,0 +1,10 @@ +
+
+ +
+ {{ errorMessage }} +
+
+
diff --git a/config_app/js/config-field-templates/config-string-list-field.html b/config_app/js/config-field-templates/config-string-list-field.html new file mode 100644 index 000000000..de29dfb91 --- /dev/null +++ b/config_app/js/config-field-templates/config-string-list-field.html @@ -0,0 +1,6 @@ +
+
+ +
+
diff --git a/config_app/js/config-field-templates/config-variable-field.html b/config_app/js/config-field-templates/config-variable-field.html new file mode 100644 index 000000000..9236469cd --- /dev/null +++ b/config_app/js/config-field-templates/config-variable-field.html @@ -0,0 +1,10 @@ +
+
+ +
+ + +
diff --git a/config_app/js/core-config-setup/config-setup-tool.html b/config_app/js/core-config-setup/config-setup-tool.html new file mode 100644 index 000000000..ec3faa1c7 --- /dev/null +++ b/config_app/js/core-config-setup/config-setup-tool.html @@ -0,0 +1,1657 @@ +
+
+
+ +
+ + +
+
+ Custom SSL Certificates +
+
+
+
+
+ + +
+
+ Basic Configuration +
+
+ + + + + + + + + + +
Enterprise Logo URL: + +
+ Enter the full URL to your company's logo. +
+
+ +
Contact Information: + +
+ Information to show in the Contact Page. If none specified, CoreOS contact information + is displayed. +
+
+
+
+ + +
+
+ Server Configuration +
+
+ + + + + + + + + +
Server Hostname: + +
+ The HTTP host (and optionally the port number if a non-standard HTTP/HTTPS port) of the location + where the registry will be accessible on the network +
+
TLS: + + +
+ Running without TLS should not be used for production workloads! +
+ +
+ Terminating TLS outside of Quay Enterprise can result in unusual behavior if the external load balancer is not + configured properly. This option is not recommended for simple setups. Please contact support + if you encounter problems while using this option. +
+ +
+ Enabling TLS also enables HTTP Strict Transport Security.
+ This prevents downgrade attacks and cookie theft, but browsers will reject all future insecure connections on this hostname. +
+ + + + + + + + + + +
Certificate: + +
+ The certificate must be in PEM format. +
+
Private key: + +
+
+ +
+
+ + +
+
+ Data Consistency Settings +
+
+
+

Relax constraints on consistency guarantees for specific operations + to enable higher performance and availability. +

+
+ + + + +
+
+ Allow repository pulls even if audit logging fails. +
+ If enabled, failures to write to the audit log will fallback from + the database to the standard logger for registry pulls. +
+
+
+
+
+ + +
+
+ Time Machine +
+
+
+

Time machine keeps older copies of tags within a repository for the configured period + of time, after which they are garbage collected. This allows users to + revert tags to older images in case they accidentally pushed a broken image. It is + highly recommended to have time machine enabled, but it does take a bit more space + in storage. +

+
+ + + + + + + + + + + + + + +
Allowed expiration periods: + +
+ The expiration periods allowed for configuration. The default tag expiration *must* be in this list. +
+
Default expiration period: + +
+ The default tag expiration period for all namespaces (users and organizations). Must be expressed in a duration string form: 30m, 1h, 1d, 2w. +
+
Allow users to select expiration: +
+ Enable Expiration Configuration +
+ If enabled, users will be able to select the tag expiration duration for the namespace(s) they + administrate, from the configured list of options. +
+
+
+
+
+ + +
+
+ redis +
+
+
+

A redis key-value store is required for real-time events and build logs.

+
+ + + + + + + + + + + + + + +
Redis Hostname: + +
Redis port: + +
+ Access to this port and hostname must be allowed from all hosts running + the enterprise registry +
+
Redis password: + +
+
+
+ + +
+
+ Registry Storage +
+
+
+

+ Registry images can be stored either locally or in a remote storage system. + A remote storage system is required for high-availability systems. +

+ +
+ Enable Storage Replication +
+ If enabled, replicates storage to other regions. See documentation for more information. +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Location ID: + +
+ {{ sc.location }} +
+
+ {{ storageConfigError[$index].location }} +
+ +
Set Default: +
+ Replicate to storage engine by default +
+
Storage Engine: + + +
+ {{ storageConfigError[$index].engine }} +
+
{{ field.title }}: + + + + {{ field.placeholder }} + + +
+ +
+
+ {{ field.help_text }} +
+
+ See Documentation for more information +
+
+
+ + +
+
+
+ + +
+
+ Action Log Rotation and Archiving +
+
+
+

+ All actions performed in are automatically logged. These logs are stored in a database table, which can become quite large. + Enabling log rotation and archiving will move all logs older than 30 days into storage. +

+
+
+ Enable Action Log Rotation +
+ + + + + + + + + + +
Storage location: + +
+ The storage location in which to place archived action logs. Logs will only be archived to this single location. +
+
Storage path: + +
+ The path under the configured storage engine in which to place the archived logs in JSON form. +
+
+
+ + +
+
+ Security Scanner +
+
+
+

If enabled, all images pushed to Quay will be scanned via the external security scanning service, with vulnerability information available in the UI and API, as well + as async notification support. +

+
+ +
+ Enable Security Scanning +
+
+ A scanner compliant with the Quay Security Scanning API must be running to use this feature. Documentation on running Clair can be found at Running Clair Security Scanner. +
+ + + + + + + + + + +
Authentication Key: + +
+ The security scanning service requires an authorized service key to speak to Quay. Once setup, the key + can be managed in the Service Keys panel under the Super User Admin Panel. +
+
Security Scanner Endpoint: + +
+ The HTTP URL at which the security scanner is running. +
+
+ Is the security scanner behind a domain signed with a self-signed TLS certificate? If so, please make sure to register your SSL CA in the custom certificates panel above. +
+
+
+
+ + +
+
+ Application Registry +
+
+
+

If enabled, an additional registry API will be available for managing applications (Kubernetes manifests, Helm charts) via the App Registry specification. A great place to get started is to install the Helm Registry Plugin. +

+ +
+ Enable App Registry +
+
+
+ + +
+
+ BitTorrent-based download +
+
+
+

If enabled, all images in the registry can be downloaded using the quayctl tool via the BitTorrent protocol. A JWT-compatible BitTorrent tracker such as Chihaya must be run. +

+ +
+ Enable BitTorrent downloads +
+ + + + + + +
Announce URL: + +
+ The HTTP URL at which the torrents should be announced. A JWT-compatible tracker such as Chihaya must be run to ensure proper security. Documentation on running Chihaya with + this support can be found at Running Chihaya for Quay Enterprise. +
+
+
+
+ + +
+
+ rkt Conversion +
+
+
+

If enabled, all images in the registry can be fetched via rkt fetch or any other AppC discovery-compliant implementation.

+
+ +
+ Enable ACI Conversion +
+ +
+ Documentation on generating these keys can be found at Generating ACI Signing Keys. +
+ + + + + + + + + + + + + + +
GPG2 Public Key File: + +
+ The certificate must be in PEM format. +
+
GPG2 Private Key File: + +
GPG2 Private Key Name: + +
+
+
+ + +
+
+ E-mail +
+
+
+

Valid e-mail server configuration is required for notification e-mails and the ability of + users to reset their passwords.

+
+ +
+ Enable E-mails +
+ + + + + + + + + + + + + + + + + + + + + + + +
SMTP Server: + > +
SMTP Server Port: + +
TLS: +
+ Require TLS +
+
Mail Sender: + +
+ E-mail address from which all e-mails are sent. If not specified, + support@quay.io will be used. +
+
Authentication: +
+ Requires Authentication +
+ + + + + + + + + + +
Username: + +
Password: + +
+
+
+
+ + +
+
+ Internal Authentication +
+
+
+

+ Authentication for the registry can be handled by either the registry itself, LDAP, Keystone, or external JWT endpoint. +

+

+ Additional external authentication providers (such as GitHub) can be used in addition for login into the UI. +

+
+ +
+
+ It is highly recommended to require encrypted client passwords. External passwords used in the Docker client will be stored in plaintext! + Enable this requirement now. +
+ +
+ Note: The "Require Encrypted Client Passwords" feature is currently enabled which will + prevent passwords from being saved as plaintext by the Docker client. +
+
+ + + + + + + + + + + + + + + + + + + +
Authentication: + +
Team synchronization: +
+ Enable Team Synchronization Support +
+
+ If enabled, organization administrators who are also superusers can set teams to have their membership synchronized with a backing group in {{ config.AUTHENTICATION_TYPE }}. +
+
Resynchronization duration: + +
+ The duration before a team must be re-synchronized. Must be expressed in a duration string form: 30m, 1h, 1d. +
+
Self-service team syncing setup: +
If enabled, this feature will allow *any organization administrator* to read the membership of any {{ config.AUTHENTICATION_TYPE }} group.
+
+ Allow non-superusers to enable and manage team syncing +
+
+ If enabled, non-superusers will be able to enable and manage team sycning on teams under organizations in which they are administrators. +
+
+ + + + + + + + + + + + + + + + + + + + + + + +
Keystone API Version: + +
Keystone Authentication URL: + +
+ The URL (starting with http or https) of the Keystone Server endpoint for auth. +
+
Keystone Administrator Username: + +
+ The username for the Keystone admin. +
+
Keystone Administrator Password: + +
+ The password for the Keystone admin. +
+
Keystone Administrator Tenant: + +
+ The tenant (project/group) that contains the administrator user. +
+
+ + +
+ JSON Web Token authentication allows your organization to provide an HTTP endpoint that + verifies user credentials on behalf of . +
+ Documentation + on the API required can be found here: https://github.com/coreos/jwt-auth-example. +
+ + + + + + + + + + + + + + + + + + + + + + +
Authentication Issuer: + +
+ The id of the issuer signing the JWT token. Must be unique to your organization. +
+
Public Key: + +
+ A certificate containing the public key portion of the key pair used to sign + the JSON Web Tokens. This file must be in PEM format. +
+
User Verification Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for verifying username and password credentials. +
+ +
+ Credentials will be sent in the Authorization header as Basic Auth, and this endpoint should return 200 OK on success (or a 4** otherwise). +
+
User Query Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for looking up + users based on a prefix query. This is optional. +
+ +
+ The prefix query will be sent as a query parameter with name query. +
+
User Lookup Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for looking up + a user by username or email address. +
+ +
+ The username or email address will be sent as a query parameter with name username. +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LDAP URI: + +
+ The full LDAP URI, including the ldap:// or ldaps:// prefix. +
+
Base DN: + +
+ A Distinguished Name path which forms the base path for looking up all LDAP records. +
+
+ Example: dc=my,dc=domain,dc=com +
+
User Relative DN: + +
+ A Distinguished Name path which forms the base path for looking up all user LDAP records, + relative to the Base DN defined above. +
+
+ Example: ou=employees +
+
Secondary User Relative DNs: + +
+ A list of Distinguished Name path(s) which forms the secondary base path(s) for + looking up all user LDAP records, relative to the Base DN defined above. These path(s) + will be tried if the user is not found via the primary relative DN. +
+
+ Example: [ou=employees] +
+
Administrator DN: +
+ The Distinguished Name for the Administrator account. This account must be able to login and view the records for all user accounts. +
+
+ Example: uid=admin,ou=employees,dc=my,dc=domain,dc=com +
+
Administrator DN Password: +
+ Note: This will be stored in + plaintext inside the config.yaml, so setting up a dedicated account or using + a password hash is highly recommended. +
+ +
+ The password for the Administrator DN. +
+
UID Attribute: + +
+ The name of the property field in your LDAP user records that stores your + users' username. Typically "uid". +
+
Mail Attribute: + +
+ The name of the property field in your LDAP user records that stores your + users' e-mail address(es). Typically "mail". +
+
Custom TLS Certificate: + +
+ If specified, the certificate (in PEM format) for the LDAP TLS connection. +
+
Allow insecure: +
+ Allow fallback to non-TLS connections +
+
+ If enabled, LDAP will fallback to insecure non-TLS connections if TLS does not succeed. +
+
+
+
+ +
+
+ External Authorization (OAuth) +
+
+ +
+
+ GitHub (Enterprise) Authentication +
+
+
+

+ If enabled, users can use GitHub or GitHub Enterprise to authenticate to the registry. +

+

+ Note: A registered GitHub (Enterprise) OAuth application is required. + View instructions on how to + + Create an OAuth Application in GitHub + +

+
+ +
+ Enable GitHub Authentication +
+ +
+ Warning: This provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
GitHub: + +
GitHub Endpoint: + + +
+ The GitHub Enterprise endpoint. Must start with http:// or https://. +
+
OAuth Client ID: + + +
OAuth Client Secret: + + +
Organization Filtering: +
+ Restrict By Organization Membership +
+ +
+ If enabled, only members of specified GitHub + Enterprise organizations will be allowed to login via GitHub + Enterprise. +
+ + + +
Binding Field: + +
+ If selected, when a user logs in via this provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this provider. This is not the recommended setup. +
+
+
+
+ + +
+
+ Google Authentication +
+
+
+

+ If enabled, users can use Google to authenticate to the registry. +

+

+ Note: A registered Google OAuth application is required. + Visit the + + Google Developer Console + + to register an application. +

+
+ +
+ Enable Google Authentication +
+ +
+ Warning: This provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + +
OAuth Client ID: + + +
OAuth Client Secret: + + +
Binding Field: + +
+ If selected, when a user logs in via this provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this provider. This is not the recommended setup. +
+
+
+
+ + +
+
+ + {{ config[provider]['SERVICE_NAME'] || (getOIDCProviderId(provider) + ' Authentication') }} + (Delete) +
+
+
+ Warning: This OIDC provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Service ID: + {{ getOIDCProviderId(provider) }} +
OIDC Server: + + +
+ The URL of an OIDC-compliant server. +
+
Client ID: + +
Client Secret: + +
Service Name: + + +
+ The user friendly name to display for the service on the login page. +
+
Service Icon (optional): + + +
+ If specified, the icon to display for this login service on the login page. Can be either a URL to an icon or a CSS class name from Font Awesome +
+
Binding Field: + +
+ If selected, when a user logs in via this OIDC provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the OIDC provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this OIDC provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this OIDC provider. This is not the recommended setup. +
+
Login Scopes: + +
+ If specified, the scopes to send to the OIDC provider when performing the login flow. Note that, if specified, these scopes will + override those set by default, so this list must include a scope for OpenID Connect + (typically the openid scope) or this provider will fail. +
+
+
+

Callback URLs for this service:

+
    +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback
  • +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback/attach
  • +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback/cli
  • +
+
+
+
+ + + Add OIDC Provider + What is OIDC? +
+
+ + +
+
+ Access Settings +
+
+
+

Various settings around access and authentication to the registry.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Basic Credentials Login: +
+ Login to User Interface via credentials +
+
+
+ Login to User Interface via credentials must be enabled. Click here to enable. +
+
+ Login to User Interface via credentials is enabled (requires at least one OIDC provider to disable) +
+
+
+ If enabled, users will be able to login to the user interface via their username and password credentials. +
+
+ If disabled, users will only be able to login to the user interface via one of the configured External Authentication providers. +
+
External Application tokens +
+ Allow external application tokens +
+
+ If enabled, users will be able to generate external application tokens for use on the Docker and rkt CLI. Note + that these tokens will not be required unless "App Token" is chosen as the Internal Authentication method above. +
+
External application token expiration + +
+ The expiration time for user generated external application tokens. If none, tokens will never expire. +
+
Anonymous Access: +
+ Enable Anonymous Access +
+
+ If enabled, public repositories and search can be accessed by anyone that can + reach the registry, even if they are not authenticated. Disable to only allow + authenticated users to view and pull "public" resources. +
+
User Creation: +
+ Enable Open User Creation +
+
+ If enabled, user accounts can be created by anyone (unless restricted below to invited users). + Users can always be created in the users panel in this superuser tool, even if this feature is disabled. +
+
Invite-only User Creation: +
+ Enable Invite-only User Creation +
+
+ If enabled, user accounts can only be created when a user has been invited, by e-mail address, to join a team. + Users can always be created in the users panel in this superuser tool, even if this feature is enabled. +
+
Encrypted Client Password: +
+ Require Encrypted Client Passwords +
+
+ If enabled, users will not be able to login from the Docker command + line with a non-encrypted password and must generate an encrypted + password to use. +
+
+ This feature is highly recommended for setups with external authentication, as Docker currently stores passwords in plaintext on user's machines. +
+
Prefix username autocompletion: +
+ Allow prefix username autocompletion +
+
+ If disabled, autocompletion for users will only match on exact usernames. +
+
Team Invitations: +
+ Require Team Invitations +
+
+ If enabled, when adding a new user to a team, they will receive an invitation to join the team, with the option to decline. + Otherwise, users will be immediately part of a team when added by a team administrator. +
+
+
+
+ + +
+
+ Dockerfile Build Support +
+
+
+ If enabled, users can submit Dockerfiles to be built and pushed by . +
+ +
+ Enable Dockerfile Build +
+ +
+ Note: Build workers are required for this feature. + See Adding Build Workers for instructions on how to setup build workers. +
+
+
+ + +
+
+ GitHub (Enterprise) Build Triggers +
+
+
+

+ If enabled, users can setup GitHub or GitHub Enterprise triggers to invoke Registry builds. +

+

+ Note: A registered GitHub (Enterprise) OAuth application (separate from GitHub Authentication) is required. + View instructions on how to + + Create an OAuth Application in GitHub + +

+
+ +
+ Enable GitHub Triggers +
+ + + + + + + + + + + + + + + + + + +
GitHub: + +
GitHub Endpoint: + + +
+ The GitHub Enterprise endpoint. Must start with http:// or https://. +
+
OAuth Client ID: + + +
OAuth Client Secret: + + +
+
+
+ + +
+
+ BitBucket Build Triggers +
+
+
+

+ If enabled, users can setup BitBucket triggers to invoke Registry builds. +

+

+ Note: A registered BitBucket OAuth application is required. + View instructions on how to + + Create an OAuth Application in BitBucket + +

+
+ +
+ Enable BitBucket Triggers +
+ + + + + + + + + + +
OAuth Consumer Key: + + +
OAuth Consumer Secret: + + +
+
+
+ + +
+
+ GitLab Build Triggers +
+
+
+

+ If enabled, users can setup GitLab triggers to invoke Registry builds. +

+

+ Note: A registered GitLab OAuth application is required. + Visit the + + GitLab applications admin panel + + to create a new application. +

+

The callback URL to use is:   + {{ config.PREFERRED_URL_SCHEME || 'http' }}://{{ config.SERVER_HOSTNAME || 'localhost' }}/oauth2/gitlab/callback/trigger +

+
+ +
+ Enable GitLab Triggers +
+ + + + + + + + + + + + + + + + + + +
GitLab: + +
GitLab Endpoint: + + +
+ The GitLab Enterprise endpoint. Must start with http:// or https://. +
+
Application Id: + + +
Secret: + + +
+
+
+ + + + +
+ + +
+ + + + +
+
diff --git a/config_app/js/core-config-setup/core-config-setup.js b/config_app/js/core-config-setup/core-config-setup.js new file mode 100644 index 000000000..d069ab186 --- /dev/null +++ b/config_app/js/core-config-setup/core-config-setup.js @@ -0,0 +1,1454 @@ +import * as URI from 'urijs'; +import * as angular from 'angular'; +const templateUrl = require('./config-setup-tool.html'); +const urlParsedField = require('../config-field-templates/config-parsed-field.html'); +const urlVarField = require('../config-field-templates/config-variable-field.html'); +const urlListField = require('../config-field-templates/config-list-field.html'); +const urlFileField = require('../config-field-templates/config-file-field.html'); +const urlBoolField = require('../config-field-templates/config-bool-field.html'); +const urlNumericField = require('../config-field-templates/config-numeric-field.html'); +const urlContactsField = require('../config-field-templates/config-contacts-field.html'); +const urlMapField = require('../config-field-templates/config-map-field.html'); +const urlServiceKeyField = require('../config-field-templates/config-service-key-field.html'); +const urlStringField = require('../config-field-templates/config-string-field.html'); + +const urlStringListField = require('../config-field-templates/config-string-list-field.html'); +const urlCertField = require('../config-field-templates/config-certificates-field.html'); + + +angular.module("quay-config") + .directive('configSetupTool', () => { + var directiveDefinitionObject = { + priority: 1, + templateUrl, + replace: true, + transclude: true, + restrict: 'C', + scope: { + 'isActive': '=isActive', + 'configurationSaved': '&configurationSaved' + }, + controller: function($rootScope, $scope, $element, $timeout, ApiService) { + console.log('in the controller of the configSetupTool') + + var authPassword = null; + + $scope.HOSTNAME_REGEX = '^[a-zA-Z-0-9\.]+(:[0-9]+)?$'; + $scope.GITHOST_REGEX = '^https?://([a-zA-Z0-9]+\.?\/?)+$'; + + $scope.SERVICES = [ + {'id': 'redis', 'title': 'Redis'}, + + {'id': 'registry-storage', 'title': 'Registry Storage'}, + + {'id': 'time-machine', 'title': 'Time Machine'}, + + {'id': 'access', 'title': 'Access Settings'}, + + {'id': 'ssl', 'title': 'SSL certificate and key', 'condition': function(config) { + return config.PREFERRED_URL_SCHEME == 'https'; + }}, + + {'id': 'ldap', 'title': 'LDAP Authentication', 'condition': function(config) { + return config.AUTHENTICATION_TYPE == 'LDAP'; + }, 'password': true}, + + {'id': 'jwt', 'title': 'JWT Authentication', 'condition': function(config) { + return config.AUTHENTICATION_TYPE == 'JWT'; + }, 'password': true}, + + {'id': 'keystone', 'title': 'Keystone Authentication', 'condition': function(config) { + return config.AUTHENTICATION_TYPE == 'Keystone'; + }, 'password': true}, + + {'id': 'apptoken-auth', 'title': 'App Token Authentication', 'condition': function(config) { + return config.AUTHENTICATION_TYPE == 'AppToken'; + }}, + + {'id': 'signer', 'title': 'ACI Signing', 'condition': function(config) { + return config.FEATURE_ACI_CONVERSION; + }}, + + {'id': 'mail', 'title': 'E-mail Support', 'condition': function(config) { + return config.FEATURE_MAILING; + }}, + + {'id': 'github-login', 'title': 'Github (Enterprise) Authentication', 'condition': function(config) { + return config.FEATURE_GITHUB_LOGIN; + }}, + + {'id': 'google-login', 'title': 'Google Authentication', 'condition': function(config) { + return config.FEATURE_GOOGLE_LOGIN; + }}, + + {'id': 'github-trigger', 'title': 'GitHub (Enterprise) Build Triggers', 'condition': function(config) { + return config.FEATURE_GITHUB_BUILD; + }}, + + {'id': 'bitbucket-trigger', 'title': 'BitBucket Build Triggers', 'condition': function(config) { + return config.FEATURE_BITBUCKET_BUILD; + }}, + + {'id': 'gitlab-trigger', 'title': 'GitLab Build Triggers', 'condition': function(config) { + return config.FEATURE_GITLAB_BUILD; + }}, + + {'id': 'security-scanner', 'title': 'Quay Security Scanner', 'condition': function(config) { + return config.FEATURE_SECURITY_SCANNER; + }}, + + {'id': 'bittorrent', 'title': 'BitTorrent downloads', 'condition': function(config) { + return config.FEATURE_BITTORRENT; + }}, + + {'id': 'oidc-login', 'title': 'OIDC Login(s)', 'condition': function(config) { + return $scope.getOIDCProviders(config).length > 0; + }}, + + {'id': 'actionlogarchiving', 'title': 'Action Log Rotation', 'condition': function(config) { + return config.FEATURE_ACTION_LOG_ROTATION; + }}, + ]; + + $scope.STORAGE_CONFIG_FIELDS = { + 'LocalStorage': [ + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/some/directory', 'kind': 'text'} + ], + + 'S3Storage': [ + {'name': 's3_bucket', 'title': 'S3 Bucket', 'placeholder': 'my-cool-bucket', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'}, + {'name': 's3_access_key', 'title': 'AWS Access Key (optional if using IAM)', 'placeholder': 'accesskeyhere', 'kind': 'text', 'optional': true}, + {'name': 's3_secret_key', 'title': 'AWS Secret Key (optional if using IAM)', 'placeholder': 'secretkeyhere', 'kind': 'text', 'optional': true}, + {'name': 'host', 'title': 'S3 Host (optional)', 'placeholder': 's3.amazonaws.com', 'kind': 'text', 'optional': true}, + {'name': 'port', 'title': 'S3 Port (optional)', 'placeholder': '443', 'kind': 'text', 'pattern': '^[0-9]+$', 'optional': true} + ], + + 'AzureStorage': [ + {'name': 'azure_container', 'title': 'Azure Storage Container', 'placeholder': 'container', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/container', 'kind': 'text'}, + {'name': 'azure_account_name', 'title': 'Azure Account Name', 'placeholder': 'accountnamehere', 'kind': 'text'}, + {'name': 'azure_account_key', 'title': 'Azure Account Key', 'placeholder': 'accountkeyhere', 'kind': 'text', 'optional': true}, + {'name': 'sas_token', 'title': 'Azure SAS Token', 'placeholder': 'sastokenhere', 'kind': 'text', 'optional': true}, + ], + + 'GoogleCloudStorage': [ + {'name': 'access_key', 'title': 'Cloud Access Key', 'placeholder': 'accesskeyhere', 'kind': 'text'}, + {'name': 'secret_key', 'title': 'Cloud Secret Key', 'placeholder': 'secretkeyhere', 'kind': 'text'}, + {'name': 'bucket_name', 'title': 'GCS Bucket', 'placeholder': 'my-cool-bucket', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'} + ], + + 'RadosGWStorage': [ + {'name': 'hostname', 'title': 'Rados Server Hostname', 'placeholder': 'my.rados.hostname', 'kind': 'text'}, + {'name': 'port', 'title': 'Custom Port (optional)', 'placeholder': '443', 'kind': 'text', 'pattern': '^[0-9]+$', 'optional': true}, + {'name': 'is_secure', 'title': 'Is Secure', 'placeholder': 'Require SSL', 'kind': 'bool'}, + {'name': 'access_key', 'title': 'Access Key', 'placeholder': 'accesskeyhere', 'kind': 'text', 'help_url': 'http://ceph.com/docs/master/radosgw/admin/'}, + {'name': 'secret_key', 'title': 'Secret Key', 'placeholder': 'secretkeyhere', 'kind': 'text'}, + {'name': 'bucket_name', 'title': 'Bucket Name', 'placeholder': 'my-cool-bucket', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'} + ], + + 'SwiftStorage': [ + {'name': 'auth_version', 'title': 'Swift Auth Version', 'kind': 'option', 'values': [1, 2, 3]}, + {'name': 'auth_url', 'title': 'Swift Auth URL', 'placeholder': 'http://swiftdomain/auth/v1.0', 'kind': 'text'}, + {'name': 'swift_container', 'title': 'Swift Container Name', 'placeholder': 'mycontainer', 'kind': 'text', + 'help_text': 'The swift container for all objects. Must already exist inside Swift.'}, + + {'name': 'storage_path', 'title': 'Storage Path', 'placeholder': '/path/inside/container', 'kind': 'text'}, + + {'name': 'swift_user', 'title': 'Username', 'placeholder': 'accesskeyhere', 'kind': 'text', + 'help_text': 'Note: For Swift V1, this is "username:password" (-U on the CLI).'}, + {'name': 'swift_password', 'title': 'Key/Password', 'placeholder': 'secretkeyhere', 'kind': 'text', + 'help_text': 'Note: For Swift V1, this is the API token (-K on the CLI).'}, + + {'name': 'ca_cert_path', 'title': 'CA Cert Filename', 'placeholder': 'conf/stack/swift.cert', 'kind': 'text', 'optional': true}, + + {'name': 'temp_url_key', 'title': 'Temp URL Key (optional)', 'placholder': 'key-here', 'kind': 'text', 'optional': true, + 'help_url': 'https://coreos.com/products/enterprise-registry/docs/latest/swift-temp-url.html', + 'help_text': 'If enabled, will allow for faster pulls directly from Swift.'}, + + {'name': 'os_options', 'title': 'OS Options', 'kind': 'map', + 'keys': ['tenant_id', 'auth_token', 'service_type', 'endpoint_type', 'tenant_name', 'object_storage_url', 'region_name', + 'project_id', 'project_name', 'project_domain_name', 'user_domain_name', 'user_domain_id']} + ], + + 'CloudFrontedS3Storage': [ + {'name': 's3_bucket', 'title': 'S3 Bucket', 'placeholder': 'my-cool-bucket', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'}, + {'name': 's3_access_key', 'title': 'AWS Access Key (optional if using IAM)', 'placeholder': 'accesskeyhere', 'kind': 'text', 'optional': true}, + {'name': 's3_secret_key', 'title': 'AWS Secret Key (optional if using IAM)', 'placeholder': 'secretkeyhere', 'kind': 'text', 'optional': true}, + {'name': 'host', 'title': 'S3 Host (optional)', 'placeholder': 's3.amazonaws.com', 'kind': 'text', 'optional': true}, + {'name': 'port', 'title': 'S3 Port (optional)', 'placeholder': '443', 'kind': 'text', 'pattern': '^[0-9]+$', 'optional': true}, + + {'name': 'cloudfront_distribution_domain', 'title': 'CloudFront Distribution Domain Name', 'placeholder': 'somesubdomain.cloudfront.net', 'pattern': '^([0-9a-zA-Z]+\\.)+[0-9a-zA-Z]+$', 'kind': 'text'}, + {'name': 'cloudfront_key_id', 'title': 'CloudFront Key ID', 'placeholder': 'APKATHISISAKEYID', 'kind': 'text'}, + {'name': 'cloudfront_privatekey_filename', 'title': 'CloudFront Private Key', 'filesuffix': 'cloudfront-signing-key.pem', 'kind': 'file'}, + ], + }; + + $scope.enableFeature = function(config, feature) { + config[feature] = true; + }; + + $scope.validateHostname = function(hostname) { + if (hostname.indexOf('127.0.0.1') == 0 || hostname.indexOf('localhost') == 0) { + return 'Please specify a non-localhost hostname. "localhost" will refer to the container, not your machine.' + } + + return null; + }; + + $scope.config = null; + $scope.mapped = { + '$hasChanges': false + }; + + $scope.hasfile = {}; + $scope.validating = null; + $scope.savingConfiguration = false; + + $scope.removeOIDCProvider = function(provider) { + delete $scope.config[provider]; + }; + + $scope.addOIDCProvider = () => { + bootbox.prompt('Enter an ID for the OIDC provider', function(result) { + if (!result) { + return; + } + + result = result.toUpperCase(); + + if (!result.match(/^[A-Z0-9]+$/)) { + bootbox.alert('Invalid ID for OIDC provider: must be alphanumeric'); + return; + } + + if (result == 'GITHUB' || result == 'GOOGLE') { + bootbox.alert('Invalid ID for OIDC provider: cannot be a reserved name'); + return; + } + + var key = result + '_LOGIN_CONFIG'; + if ($scope.config[key]) { + bootbox.alert('Invalid ID for OIDC provider: already exists'); + return; + } + + $scope.config[key] = {}; + }); + }; + + $scope.getOIDCProviderId = function(key) { + var index = key.indexOf('_LOGIN_CONFIG'); + if (index <= 0) { + return null; + } + + return key.substr(0, index).toLowerCase(); + }; + + $scope.getOIDCProviders = function(config) { + var keys = Object.keys(config || {}); + return keys.filter(function(key) { + if (key == 'GITHUB_LOGIN_CONFIG' || key == 'GOOGLE_LOGIN_CONFIG') { + // Has custom UI and config. + return false; + } + + return !!$scope.getOIDCProviderId(key); + }); + }; + + $scope.getServices = function(config) { + var services = []; + if (!config) { return services; } + + for (var i = 0; i < $scope.SERVICES.length; ++i) { + var service = $scope.SERVICES[i]; + if (!service.condition || service.condition(config)) { + services.push({ + 'service': service, + 'status': 'validating' + }); + } + } + + return services; + }; + + $scope.validationStatus = function(serviceInfos) { + if (!serviceInfos) { return 'validating'; } + + var hasError = false; + for (var i = 0; i < serviceInfos.length; ++i) { + if (serviceInfos[i].status == 'validating') { + return 'validating'; + } + if (serviceInfos[i].status == 'error') { + hasError = true; + } + } + + return hasError ? 'failed' : 'success'; + }; + + $scope.cancelValidation = function() { + $('#validateAndSaveModal').modal('hide'); + $scope.validating = null; + $scope.savingConfiguration = false; + }; + + $scope.validateService = function(serviceInfo, opt_password) { + var params = { + 'service': serviceInfo.service.id + }; + + var data = { + 'config': $scope.config, + 'password': opt_password || '' + }; + + var errorDisplay = ApiService.errorDisplay( + 'Could not validate configuration. Please report this error.', + function() { + authPassword = null; + }); + + ApiService.scValidateConfig(data, params).then(function(resp) { + serviceInfo.status = resp.status ? 'success' : 'error'; + serviceInfo.errorMessage = $.trim(resp.reason || ''); + + if (!resp.status) { + authPassword = null; + } + + }, errorDisplay); + }; + + $scope.checkValidateAndSave = function() { + if ($scope.configform.$valid) { + saveStorageConfig(); + $scope.validateAndSave(); + return; + } + + var query = $element.find("input.ng-invalid:first"); + + if (query && query.length) { + query[0].scrollIntoView(); + query.focus(); + } + }; + + $scope.validateAndSave = function() { + $scope.validating = $scope.getServices($scope.config); + + var requirePassword = false; + for (var i = 0; i < $scope.validating.length; ++i) { + var serviceInfo = $scope.validating[i]; + if (serviceInfo.service.password) { + requirePassword = true; + break; + } + } + + if (!requirePassword) { + $scope.performValidateAndSave(); + return; + } + + var box = bootbox.dialog({ + "message": 'Please enter your superuser password to validate your auth configuration:' + + '
' + + '' + + '
', + "title": 'Enter Password', + "buttons": { + "success": { + "label": "Validate Config", + "className": "btn-success btn-continue", + "callback": function() { + $scope.performValidateAndSave($('#validatePassword').val()); + } + }, + "close": { + "label": "Cancel", + "className": "btn-default", + "callback": function() { + } + } + } + }); + + box.bind('shown.bs.modal', function(){ + box.find("input").focus(); + box.find("form").submit(function() { + if (!$('#validatePassword').val()) { return; } + box.modal('hide'); + }); + }); + }; + + $scope.performValidateAndSave = function(opt_password) { + $scope.savingConfiguration = false; + $scope.validating = $scope.getServices($scope.config); + + authPassword = opt_password; + + $('#validateAndSaveModal').modal({ + keyboard: false, + backdrop: 'static' + }); + + for (var i = 0; i < $scope.validating.length; ++i) { + var serviceInfo = $scope.validating[i]; + $scope.validateService(serviceInfo, opt_password); + } + }; + + $scope.saveConfiguration = function() { + $scope.savingConfiguration = true; + + // Make sure to note that fully verified setup is completed. We use this as a signal + // in the setup tool. + $scope.config['SETUP_COMPLETE'] = true; + + var data = { + 'config': $scope.config, + 'hostname': window.location.host, + 'password': authPassword || '' + }; + + var errorDisplay = ApiService.errorDisplay( + 'Could not save configuration. Please report this error.', + function() { + authPassword = null; + }); + + ApiService.scUpdateConfig(data).then(function(resp) { + authPassword = null; + + $scope.savingConfiguration = false; + $scope.mapped.$hasChanges = false; + + $('#validateAndSaveModal').modal('hide'); + + $scope.configurationSaved({'config': $scope.config}); + }, errorDisplay); + }; + + // Convert storage config to an array + var initializeStorageConfig = function($scope) { + var config = $scope.config.DISTRIBUTED_STORAGE_CONFIG || {}; + var defaultLocations = $scope.config.DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS || []; + var preference = $scope.config.DISTRIBUTED_STORAGE_PREFERENCE || []; + + $scope.serverStorageConfig = angular.copy(config); + $scope.storageConfig = []; + + Object.keys(config).forEach(function(location) { + $scope.storageConfig.push({ + location: location, + defaultLocation: defaultLocations.indexOf(location) >= 0, + data: angular.copy(config[location]), + error: {}, + }); + }); + + if (!$scope.storageConfig.length) { + $scope.addStorageConfig('default'); + return; + } + + // match DISTRIBUTED_STORAGE_PREFERENCE order first, remaining are + // ordered by unicode point value + $scope.storageConfig.sort(function(a, b) { + var indexA = preference.indexOf(a.location); + var indexB = preference.indexOf(b.location); + + if (indexA > -1 && indexB > -1) return indexA < indexB ? -1 : 1; + if (indexA > -1) return -1; + if (indexB > -1) return 1; + + return a.location < b.location ? -1 : 1; + }); + }; + + $scope.allowChangeLocationStorageConfig = function(location) { + if (!$scope.serverStorageConfig[location]) { return true }; + + // allow user to change location ID if another exists with the same ID + return $scope.storageConfig.filter(function(sc) { + return sc.location === location; + }).length >= 2; + }; + + $scope.allowRemoveStorageConfig = function(location) { + return $scope.storageConfig.length > 1 && $scope.allowChangeLocationStorageConfig(location); + }; + + $scope.canAddStorageConfig = function() { + return $scope.config && + $scope.config.FEATURE_STORAGE_REPLICATION && + $scope.storageConfig && + (!$scope.storageConfig.length || $scope.storageConfig.length < 10); + }; + + $scope.addStorageConfig = function(location) { + var storageType = 'LocalStorage'; + + // Use last storage type by default + if ($scope.storageConfig.length) { + storageType = $scope.storageConfig[$scope.storageConfig.length-1].data[0]; + } + + $scope.storageConfig.push({ + location: location || '', + defaultLocation: false, + data: [storageType, {}], + error: {}, + }); + }; + + $scope.removeStorageConfig = function(sc) { + $scope.storageConfig.splice($scope.storageConfig.indexOf(sc), 1); + }; + + var saveStorageConfig = function() { + var config = {}; + var defaultLocations = []; + var preference = []; + + $scope.storageConfig.forEach(function(sc) { + config[sc.location] = sc.data; + if (sc.defaultLocation) defaultLocations.push(sc.location); + preference.push(sc.location); + }); + + $scope.config.DISTRIBUTED_STORAGE_CONFIG = config; + $scope.config.DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS = defaultLocations; + $scope.config.DISTRIBUTED_STORAGE_PREFERENCE = preference; + }; + + var gitlabSelector = function(key) { + return function(value) { + if (!value || !$scope.config) { return; } + + if (!$scope.config[key]) { + $scope.config[key] = {}; + } + + if (value == 'enterprise') { + if ($scope.config[key]['GITLAB_ENDPOINT'] == 'https://gitlab.com/') { + $scope.config[key]['GITLAB_ENDPOINT'] = ''; + } + } else if (value == 'hosted') { + $scope.config[key]['GITLAB_ENDPOINT'] = 'https://gitlab.com/'; + } + }; + }; + + var githubSelector = function(key) { + return function(value) { + if (!value || !$scope.config) { return; } + + if (!$scope.config[key]) { + $scope.config[key] = {}; + } + + if (value == 'enterprise') { + if ($scope.config[key]['GITHUB_ENDPOINT'] == 'https://github.com/') { + $scope.config[key]['GITHUB_ENDPOINT'] = ''; + } + delete $scope.config[key]['API_ENDPOINT']; + } else if (value == 'hosted') { + $scope.config[key]['GITHUB_ENDPOINT'] = 'https://github.com/'; + $scope.config[key]['API_ENDPOINT'] = 'https://api.github.com/'; + } + }; + }; + + var getKey = function(config, path) { + if (!config) { + return null; + } + + var parts = path.split('.'); + var current = config; + for (var i = 0; i < parts.length; ++i) { + var part = parts[i]; + if (!current[part]) { return null; } + current = current[part]; + } + return current; + }; + + var initializeMappedLogic = function(config) { + var gle = getKey(config, 'GITHUB_LOGIN_CONFIG.GITHUB_ENDPOINT'); + var gte = getKey(config, 'GITHUB_TRIGGER_CONFIG.GITHUB_ENDPOINT'); + + $scope.mapped['GITHUB_LOGIN_KIND'] = gle == 'https://github.com/' ? 'hosted' : 'enterprise'; + $scope.mapped['GITHUB_TRIGGER_KIND'] = gte == 'https://github.com/' ? 'hosted' : 'enterprise'; + + var glabe = getKey(config, 'GITLAB_TRIGGER_KIND.GITHUB_ENDPOINT'); + $scope.mapped['GITLAB_TRIGGER_KIND'] = glabe == 'https://gitlab.com/' ? 'hosted' : 'enterprise'; + + $scope.mapped['redis'] = {}; + $scope.mapped['redis']['host'] = getKey(config, 'BUILDLOGS_REDIS.host') || getKey(config, 'USER_EVENTS_REDIS.host'); + $scope.mapped['redis']['port'] = getKey(config, 'BUILDLOGS_REDIS.port') || getKey(config, 'USER_EVENTS_REDIS.port'); + $scope.mapped['redis']['password'] = getKey(config, 'BUILDLOGS_REDIS.password') || getKey(config, 'USER_EVENTS_REDIS.password'); + + $scope.mapped['TLS_SETTING'] = 'none'; + if (config['PREFERRED_URL_SCHEME'] == 'https') { + if (config['EXTERNAL_TLS_TERMINATION'] === true) { + $scope.mapped['TLS_SETTING'] = 'external-tls'; + } else { + $scope.mapped['TLS_SETTING'] = 'internal-tls'; + } + } + }; + + var tlsSetter = function(value) { + if (value == null || !$scope.config) { return; } + + switch (value) { + case 'none': + $scope.config['PREFERRED_URL_SCHEME'] = 'http'; + delete $scope.config['EXTERNAL_TLS_TERMINATION']; + return; + + case 'external-tls': + $scope.config['PREFERRED_URL_SCHEME'] = 'https'; + $scope.config['EXTERNAL_TLS_TERMINATION'] = true; + return; + + case 'internal-tls': + $scope.config['PREFERRED_URL_SCHEME'] = 'https'; + delete $scope.config['EXTERNAL_TLS_TERMINATION']; + return; + } + }; + + var redisSetter = function(keyname) { + return function(value) { + if (value == null || !$scope.config) { return; } + + if (!$scope.config['BUILDLOGS_REDIS']) { + $scope.config['BUILDLOGS_REDIS'] = {}; + } + + if (!$scope.config['USER_EVENTS_REDIS']) { + $scope.config['USER_EVENTS_REDIS'] = {}; + } + + if (!value) { + delete $scope.config['BUILDLOGS_REDIS'][keyname]; + delete $scope.config['USER_EVENTS_REDIS'][keyname]; + return; + } + + $scope.config['BUILDLOGS_REDIS'][keyname] = value; + $scope.config['USER_EVENTS_REDIS'][keyname] = value; + }; + }; + + // Add mapped logic. + $scope.$watch('mapped.GITHUB_LOGIN_KIND', githubSelector('GITHUB_LOGIN_CONFIG')); + $scope.$watch('mapped.GITHUB_TRIGGER_KIND', githubSelector('GITHUB_TRIGGER_CONFIG')); + $scope.$watch('mapped.GITLAB_TRIGGER_KIND', gitlabSelector('GITLAB_TRIGGER_KIND')); + $scope.$watch('mapped.TLS_SETTING', tlsSetter); + + $scope.$watch('mapped.redis.host', redisSetter('host')); + $scope.$watch('mapped.redis.port', redisSetter('port')); + $scope.$watch('mapped.redis.password', redisSetter('password')); + + // Remove extra extra fields (which are not allowed) from storage config. + var updateFields = function(sc) { + var type = sc.data[0]; + var configObject = sc.data[1]; + var allowedFields = $scope.STORAGE_CONFIG_FIELDS[type]; + + // Remove any fields not allowed. + for (var fieldName in configObject) { + if (!configObject.hasOwnProperty(fieldName)) { + continue; + } + + var isValidField = $.grep(allowedFields, function(field) { + return field.name == fieldName; + }).length > 0; + + if (!isValidField) { + delete configObject[fieldName]; + } + } + + // Set any missing boolean fields to false. + for (var i = 0; i < allowedFields.length; ++i) { + if (allowedFields[i].kind == 'bool') { + configObject[allowedFields[i].name] = configObject[allowedFields[i].name] || false; + } + } + }; + + // Validate and update storage config on update. + var refreshStorageConfig = function() { + if (!$scope.config || !$scope.storageConfig) return; + + var locationCounts = {}; + var errors = []; + var valid = true; + + $scope.storageConfig.forEach(function(sc) { + // remove extra fields from storage config + updateFields(sc); + + if (!locationCounts[sc.location]) locationCounts[sc.location] = 0; + locationCounts[sc.location]++; + }); + + // validate storage config + $scope.storageConfig.forEach(function(sc) { + var error = {}; + + if ($scope.config.FEATURE_STORAGE_REPLICATION && sc.data[0] === 'LocalStorage') { + error.engine = 'Replication to a locally mounted directory is unsupported as it is only accessible on a single machine.'; + valid = false; + } + + if (locationCounts[sc.location] > 1) { + error.location = 'Location ID must be unique.'; + valid = false; + } + + errors.push(error); + }); + + $scope.storageConfigError = errors; + $scope.configform.$setValidity('storageConfig', valid); + }; + + $scope.$watch('config.INTERNAL_OIDC_SERVICE_ID', function(service_id) { + if (service_id) { + $scope.config['FEATURE_DIRECT_LOGIN'] = false; + } + }); + + $scope.$watch('config.FEATURE_STORAGE_REPLICATION', function() { + refreshStorageConfig(); + }); + + $scope.$watch('storageConfig', function() { + refreshStorageConfig(); + }, true); + + $scope.$watch('config', function(value) { + $scope.mapped['$hasChanges'] = true; + }, true); + + $scope.$watch('isActive', function(value) { + if (!value) { return; } + + ApiService.scGetConfig().then(function(resp) { + $scope.config = resp['config'] || {}; + initializeMappedLogic($scope.config); + initializeStorageConfig($scope); + $scope.mapped['$hasChanges'] = false; + }, ApiService.errorDisplay('Could not load config')); + }); + } + }; + + return directiveDefinitionObject; + }) + + .directive('configParsedField', function ($timeout) { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlParsedField, + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'binding': '=binding', + 'parser': '&parser', + 'serializer': '&serializer' + }, + controller: function($scope, $element, $transclude) { + $scope.childScope = null; + + $transclude(function(clone, scope) { + $scope.childScope = scope; + $scope.childScope['fields'] = {}; + $element.append(clone); + }); + + $scope.childScope.$watch('fields', function(value) { + // Note: We need the timeout here because Angular starts the digest of the + // parent scope AFTER the child scope, which means it can end up one action + // behind. The timeout ensures that the parent scope will be fully digest-ed + // and then we update the binding. Yes, this is a hack :-/. + $timeout(function() { + $scope.binding = $scope.serializer({'fields': value}); + }); + }, true); + + $scope.$watch('binding', function(value) { + var parsed = $scope.parser({'value': value}); + for (var key in parsed) { + if (parsed.hasOwnProperty(key)) { + $scope.childScope['fields'][key] = parsed[key]; + } + } + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configVariableField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlVarField, + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'binding': '=binding' + }, + controller: function($scope, $element) { + $scope.sections = {}; + $scope.currentSection = null; + + $scope.setSection = function(section) { + $scope.binding = section.value; + }; + + this.addSection = function(section, element) { + $scope.sections[section.value] = { + 'title': section.valueTitle, + 'value': section.value, + 'element': element + }; + + element.hide(); + + if (!$scope.binding) { + $scope.binding = section.value; + } + }; + + $scope.$watch('binding', function(binding) { + if (!binding) { return; } + + if ($scope.currentSection) { + $scope.currentSection.element.hide(); + } + + if ($scope.sections[binding]) { + $scope.sections[binding].element.show(); + $scope.currentSection = $scope.sections[binding]; + } + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('variableSection', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlVarField, + priority: 1, + require: '^configVariableField', + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'value': '@value', + 'valueTitle': '@valueTitle' + }, + controller: function($scope, $element) { + var parentCtrl = $element.parent().controller('configVariableField'); + parentCtrl.addSection($scope, $element); + } + }; + return directiveDefinitionObject; + }) + + .directive('configListField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlListField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding', + 'placeholder': '@placeholder', + 'defaultValue': '@defaultValue', + 'itemTitle': '@itemTitle', + 'itemPattern': '@itemPattern' + }, + controller: function($scope, $element) { + $scope.removeItem = function(item) { + var index = $scope.binding.indexOf(item); + if (index >= 0) { + $scope.binding.splice(index, 1); + } + }; + + $scope.addItem = function() { + if (!$scope.newItemName) { + return; + } + + if (!$scope.binding) { + $scope.binding = []; + } + + if ($scope.binding.indexOf($scope.newItemName) >= 0) { + return; + } + + $scope.binding.push($scope.newItemName); + $scope.newItemName = null; + }; + + $scope.patternMap = {}; + + $scope.getRegexp = function(pattern) { + if (!pattern) { + pattern = '.*'; + } + + if ($scope.patternMap[pattern]) { + return $scope.patternMap[pattern]; + } + + return $scope.patternMap[pattern] = new RegExp(pattern); + }; + + $scope.$watch('binding', function(binding) { + if (!binding && $scope.defaultValue) { + $scope.binding = eval($scope.defaultValue); + } + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configFileField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlFileField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'filename': '@filename', + 'skipCheckFile': '@skipCheckFile', + 'hasFile': '=hasFile', + 'binding': '=?binding' + }, + controller: function($scope, $element, Restangular, $upload) { + $scope.hasFile = false; + + var setHasFile = function(hasFile) { + $scope.hasFile = hasFile; + $scope.binding = hasFile ? $scope.filename : null; + }; + + $scope.onFileSelect = function(files) { + if (files.length < 1) { + setHasFile(false); + return; + } + + $scope.uploadProgress = 0; + $scope.upload = $upload.upload({ + url: '/api/v1/superuser/config/file/' + $scope.filename, + method: 'POST', + data: {'_csrf_token': window.__token}, + file: files[0], + }).progress(function(evt) { + $scope.uploadProgress = parseInt(100.0 * evt.loaded / evt.total); + if ($scope.uploadProgress == 100) { + $scope.uploadProgress = null; + setHasFile(true); + } + }).success(function(data, status, headers, config) { + $scope.uploadProgress = null; + setHasFile(true); + }); + }; + + var loadStatus = function(filename) { + Restangular.one('superuser/config/file/' + filename).get().then(function(resp) { + setHasFile(false); + }); + }; + + if ($scope.filename && $scope.skipCheckFile != "true") { + loadStatus($scope.filename); + } + } + }; + return directiveDefinitionObject; + }) + + .directive('configBoolField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlBoolField, + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'binding': '=binding' + }, + controller: function($scope, $element) { + } + }; + return directiveDefinitionObject; + }) + + .directive('configNumericField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlNumericField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding', + 'placeholder': '@placeholder', + 'defaultValue': '@defaultValue', + }, + controller: function($scope, $element) { + $scope.bindinginternal = 0; + + $scope.$watch('binding', function(binding) { + if ($scope.binding == 0 && $scope.defaultValue) { + $scope.binding = $scope.defaultValue * 1; + } + + $scope.bindinginternal = $scope.binding; + }); + + $scope.$watch('bindinginternal', function(binding) { + var newValue = $scope.bindinginternal * 1; + if (isNaN(newValue)) { + newValue = 0; + } + $scope.binding = newValue; + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configContactsField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlContactsField, + priority: 1, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding' + }, + controller: function($scope, $element) { + var padItems = function(items) { + // Remove the last item if both it and the second to last items are empty. + if (items.length > 1 && !items[items.length - 2].value && !items[items.length - 1].value) { + items.splice(items.length - 1, 1); + return; + } + + // If the last item is non-empty, add a new item. + if (items.length == 0 || items[items.length - 1].value) { + items.push({'value': ''}); + return; + } + }; + + $scope.itemHash = null; + $scope.$watch('items', function(items) { + if (!items) { return; } + padItems(items); + + var itemHash = ''; + var binding = []; + for (var i = 0; i < items.length; ++i) { + var item = items[i]; + if (item.value && (URI(item.value).host() || URI(item.value).path())) { + binding.push(item.value); + itemHash += item.value; + } + } + + $scope.itemHash = itemHash; + $scope.binding = binding; + }, true); + + $scope.$watch('binding', function(binding) { + var current = binding || []; + var items = []; + var itemHash = ''; + for (var i = 0; i < current.length; ++i) { + items.push({'value': current[i]}) + itemHash += current[i]; + } + + if ($scope.itemHash != itemHash) { + $scope.items = items; + } + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configContactField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlContactsField, + priority: 1, + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'binding': '=binding' + }, + controller: function($scope, $element) { + $scope.kind = null; + $scope.value = null; + + var updateBinding = function() { + if ($scope.value == null) { return; } + var value = $scope.value || ''; + + switch ($scope.kind) { + case 'mailto': + $scope.binding = 'mailto:' + value; + return; + + case 'tel': + $scope.binding = 'tel:' + value; + return; + + case 'irc': + $scope.binding = 'irc://' + value; + return; + + default: + $scope.binding = value; + return; + } + }; + + $scope.$watch('kind', updateBinding); + $scope.$watch('value', updateBinding); + + $scope.$watch('binding', function(value) { + if (!value) { + $scope.kind = null; + $scope.value = null; + return; + } + + var uri = URI(value); + $scope.kind = uri.scheme(); + + switch ($scope.kind) { + case 'mailto': + case 'tel': + $scope.value = uri.path(); + break; + + case 'irc': + $scope.value = value.substr('irc://'.length); + break; + + default: + $scope.kind = 'http'; + $scope.value = value; + break; + } + }); + + $scope.getPlaceholder = function(kind) { + switch (kind) { + case 'mailto': + return 'some@example.com'; + + case 'tel': + return '555-555-5555'; + + case 'irc': + return 'myserver:port/somechannel'; + + default: + return 'http://some/url'; + } + }; + } + }; + return directiveDefinitionObject; + }) + + .directive('configMapField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlMapField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding', + 'keys': '=keys' + }, + controller: function($scope, $element) { + $scope.newKey = null; + $scope.newValue = null; + + $scope.hasValues = function(binding) { + return binding && Object.keys(binding).length; + }; + + $scope.removeKey = function(key) { + delete $scope.binding[key]; + }; + + $scope.addEntry = function() { + if (!$scope.newKey || !$scope.newValue) { return; } + + $scope.binding = $scope.binding || {}; + $scope.binding[$scope.newKey] = $scope.newValue; + $scope.newKey = null; + $scope.newValue = null; + } + } + }; + return directiveDefinitionObject; + }) + + .directive('configServiceKeyField', function (ApiService) { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlServiceKeyField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'serviceName': '@serviceName', + }, + controller: function($scope, $element) { + $scope.foundKeys = []; + $scope.loading = false; + $scope.loadError = false; + $scope.hasValidKey = false; + $scope.hasValidKeyStr = null; + + $scope.updateKeys = function() { + $scope.foundKeys = []; + $scope.loading = true; + + ApiService.listServiceKeys().then(function(resp) { + $scope.loading = false; + $scope.loadError = false; + + resp['keys'].forEach(function(key) { + if (key['service'] == $scope.serviceName) { + $scope.foundKeys.push(key); + } + }); + + $scope.hasValidKey = checkValidKey($scope.foundKeys); + $scope.hasValidKeyStr = $scope.hasValidKey ? 'true' : ''; + }, function() { + $scope.loading = false; + $scope.loadError = true; + }); + }; + + // Perform initial loading of the keys. + $scope.updateKeys(); + + $scope.isKeyExpired = function(key) { + if (key.expiration_date != null) { + var expiration_date = moment(key.expiration_date); + return moment().isAfter(expiration_date); + } + return false; + }; + + $scope.showRequestServiceKey = function() { + $scope.requestKeyInfo = { + 'service': $scope.serviceName + }; + }; + + $scope.handleKeyCreated = function() { + $scope.updateKeys(); + }; + + var checkValidKey = function(keys) { + for (var i = 0; i < keys.length; ++i) { + var key = keys[i]; + if (!key.approval) { + continue; + } + + if ($scope.isKeyExpired(key)) { + continue; + } + + return true; + } + + return false; + }; + } + }; + return directiveDefinitionObject; + }) + + .directive('configStringField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlStringField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding', + 'placeholder': '@placeholder', + 'pattern': '@pattern', + 'defaultValue': '@defaultValue', + 'validator': '&validator', + 'isOptional': '=isOptional' + }, + controller: function($scope, $element) { + var firstSet = true; + + $scope.patternMap = {}; + + $scope.getRegexp = function(pattern) { + if (!pattern) { + pattern = '.*'; + } + + if ($scope.patternMap[pattern]) { + return $scope.patternMap[pattern]; + } + + return $scope.patternMap[pattern] = new RegExp(pattern); + }; + + $scope.$watch('binding', function(binding) { + if (firstSet && !binding && $scope.defaultValue) { + $scope.binding = $scope.defaultValue; + firstSet = false; + } + + $scope.errorMessage = $scope.validator({'value': binding || ''}); + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configStringListField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlStringListField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding', + 'itemTitle': '@itemTitle', + 'itemDelimiter': '@itemDelimiter', + 'placeholder': '@placeholder', + 'isOptional': '=isOptional' + }, + controller: function($scope, $element) { + $scope.$watch('internalBinding', function(value) { + if (value) { + $scope.binding = value.split($scope.itemDelimiter); + } + }); + + $scope.$watch('binding', function(value) { + if (value) { + $scope.internalBinding = value.join($scope.itemDelimiter); + } + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configCertificatesField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlCertField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + }, + controller: function($scope, $element, $upload, ApiService, UserService) { + $scope.resetUpload = 0; + $scope.certsUploading = false; + + var loadCertificates = function() { + $scope.certificatesResource = ApiService.getCustomCertificatesAsResource().get(function(resp) { + $scope.certInfo = resp; + $scope.certsUploading = false; + }); + }; + + UserService.updateUserIn($scope, function(user) { + if (!user.anonymous) { + loadCertificates(); + } + }); + + $scope.handleCertsSelected = function(files, callback) { + $scope.certsUploading = true; + $upload.upload({ + url: '/api/v1/superuser/customcerts/' + files[0].name, + method: 'POST', + data: {'_csrf_token': window.__token}, + file: files[0] + }).success(function() { + callback(true); + $scope.resetUpload++; + loadCertificates(); + }).error(function(r) { + bootbox.alert('Could not upload certificate') + callback(false); + $scope.resetUpload++; + loadCertificates(); + }); + }; + + $scope.deleteCert = function(path) { + var errorDisplay = ApiService.errorDisplay('Could not delete certificate'); + var params = { + 'certpath': path + }; + + ApiService.deleteCustomCertificate(null, params).then(loadCertificates, errorDisplay); + }; + } + }; + return directiveDefinitionObject; + }); diff --git a/config_app/js/main.ts b/config_app/js/main.ts new file mode 100644 index 000000000..9ca931a8d --- /dev/null +++ b/config_app/js/main.ts @@ -0,0 +1,36 @@ +// imports shims, etc +import 'core-js'; + +import '../static/css/core-ui.css'; + +import * as angular from 'angular'; +import { ConfigAppModule } from './config-app.module'; +import { bundle } from 'ng-metadata/core'; + +// load all app dependencies +require('../static/lib/angular-file-upload.min.js'); +require('../../static/js/tar'); + +const ng1QuayModule: string = bundle(ConfigAppModule, []).name; +angular.module('quay-config', [ng1QuayModule]) + .run(() => { + console.log(' init run was called') + }); + +console.log('Hello world! I\'m the config app'); + +declare var require: any; +function requireAll(r) { + r.keys().forEach(r); +} + +// load all services +// require('./services/api-service'); +requireAll(require.context('./services', true, /\.js$/)); + + +// load all the components after services +requireAll(require.context('./setup', true, /\.js$/)); +requireAll(require.context('./core-config-setup', true, /\.js$/)); + + diff --git a/config_app/js/services/api-service.js b/config_app/js/services/api-service.js new file mode 100644 index 000000000..eaa16b746 --- /dev/null +++ b/config_app/js/services/api-service.js @@ -0,0 +1,332 @@ +/** + * Service which exposes the server-defined API as a nice set of helper methods and automatic + * callbacks. Any method defined on the server is exposed here as an equivalent method. Also + * defines some helper functions for working with API responses. + */ +// console.log(angular.module('quay-config').requires); +angular.module('quay-config').factory('ApiService', ['Restangular', '$q', 'UtilService', function(Restangular, $q, UtilService) { + var apiService = {}; + + // if (!window.__endpoints) { + // return apiService; + // } + + var getResource = function(getMethod, operation, opt_parameters, opt_background) { + var resource = {}; + resource.withOptions = function(options) { + this.options = options; + return this; + }; + + resource.get = function(processor, opt_errorHandler) { + var options = this.options; + var result = { + 'loading': true, + 'value': null, + 'hasError': false + }; + + getMethod(options, opt_parameters, opt_background, true).then(function(resp) { + result.value = processor(resp); + result.loading = false; + }, function(resp) { + result.hasError = true; + result.loading = false; + if (opt_errorHandler) { + opt_errorHandler(resp); + } + }); + + return result; + }; + + return resource; + }; + + var buildUrl = function(path, parameters) { + // We already have /api/v1/ on the URLs, so remove them from the paths. + path = path.substr('/api/v1/'.length, path.length); + + // Build the path, adjusted with the inline parameters. + var used = {}; + var url = ''; + for (var i = 0; i < path.length; ++i) { + var c = path[i]; + if (c == '{') { + var end = path.indexOf('}', i); + var varName = path.substr(i + 1, end - i - 1); + + if (!parameters[varName]) { + throw new Error('Missing parameter: ' + varName); + } + + used[varName] = true; + url += parameters[varName]; + i = end; + continue; + } + + url += c; + } + + // Append any query parameters. + var isFirst = true; + for (var paramName in parameters) { + if (!parameters.hasOwnProperty(paramName)) { continue; } + if (used[paramName]) { continue; } + + var value = parameters[paramName]; + if (value) { + url += isFirst ? '?' : '&'; + url += paramName + '=' + encodeURIComponent(value) + isFirst = false; + } + } + + return url; + }; + + var getGenericOperationName = function(userOperationName) { + return userOperationName.replace('User', ''); + }; + + var getMatchingUserOperationName = function(orgOperationName, method, userRelatedResource) { + if (userRelatedResource) { + if (userRelatedResource[method.toLowerCase()]) { + return userRelatedResource[method.toLowerCase()]['operationId']; + } + } + + throw new Error('Could not find user operation matching org operation: ' + orgOperationName); + }; + + var freshLoginInProgress = []; + var reject = function(msg) { + for (var i = 0; i < freshLoginInProgress.length; ++i) { + freshLoginInProgress[i].deferred.reject({'data': {'message': msg}}); + } + freshLoginInProgress = []; + }; + + var retry = function() { + for (var i = 0; i < freshLoginInProgress.length; ++i) { + freshLoginInProgress[i].retry(); + } + freshLoginInProgress = []; + }; + + var freshLoginFailCheck = function(opName, opArgs) { + return function(resp) { + var deferred = $q.defer(); + + // If the error is a fresh login required, show the dialog. + // TODO: remove error_type (old style error) + var fresh_login_required = resp.data['title'] == 'fresh_login_required' || resp.data['error_type'] == 'fresh_login_required'; + if (resp.status == 401 && fresh_login_required) { + var retryOperation = function() { + apiService[opName].apply(apiService, opArgs).then(function(resp) { + deferred.resolve(resp); + }, function(resp) { + deferred.reject(resp); + }); + }; + + var verifyNow = function() { + if (!$('#freshPassword').val()) { + return; + } + + var info = { + 'password': $('#freshPassword').val() + }; + + $('#freshPassword').val(''); + + // Conduct the sign in of the user. + apiService.verifyUser(info).then(function() { + // On success, retry the operations. if it succeeds, then resolve the + // deferred promise with the result. Otherwise, reject the same. + retry(); + }, function(resp) { + // Reject with the sign in error. + reject('Invalid verification credentials'); + }); + }; + + // Add the retry call to the in progress list. If there is more than a single + // in progress call, we skip showing the dialog (since it has already been + // shown). + freshLoginInProgress.push({ + 'deferred': deferred, + 'retry': retryOperation + }) + + if (freshLoginInProgress.length > 1) { + return deferred.promise; + } + + var box = bootbox.dialog({ + "message": 'It has been more than a few minutes since you last logged in, ' + + 'so please verify your password to perform this sensitive operation:' + + '
' + + '' + + '
', + "title": 'Please Verify', + "buttons": { + "verify": { + "label": "Verify", + "className": "btn-success btn-continue", + "callback": verifyNow + }, + "close": { + "label": "Cancel", + "className": "btn-default", + "callback": function() { + reject('Verification canceled') + } + } + } + }); + + box.bind('shown.bs.modal', function(){ + box.find("input").focus(); + box.find("form").submit(function() { + if (!$('#freshPassword').val()) { return; } + + box.modal('hide'); + verifyNow(); + }); + }); + + // Return a new promise. We'll accept or reject it based on the result + // of the login. + return deferred.promise; + } + + // Otherwise, we just 'raise' the error via the reject method on the promise. + return $q.reject(resp); + }; + }; + + var buildMethodsForOperation = function(operation, method, path, resourceMap) { + var operationName = operation['operationId']; + var urlPath = path['x-path']; + + // Add the operation itself. + apiService[operationName] = function(opt_options, opt_parameters, opt_background, opt_forceget) { + var one = Restangular.one(buildUrl(urlPath, opt_parameters)); + if (opt_background) { + one.withHttpConfig({ + 'ignoreLoadingBar': true + }); + } + + var opObj = one[opt_forceget ? 'get' : 'custom' + method.toUpperCase()](opt_options); + + // If the operation requires_fresh_login, then add a specialized error handler that + // will defer the operation's result if sudo is requested. + if (operation['x-requires-fresh-login']) { + opObj = opObj.catch(freshLoginFailCheck(operationName, arguments)); + } + return opObj; + }; + + // If the method for the operation is a GET, add an operationAsResource method. + if (method == 'get') { + apiService[operationName + 'AsResource'] = function(opt_parameters, opt_background) { + var getMethod = apiService[operationName]; + return getResource(getMethod, operation, opt_parameters, opt_background); + }; + } + + // If the operation has a user-related operation, then make a generic operation for this operation + // that can call both the user and the organization versions of the operation, depending on the + // parameters given. + if (path['x-user-related']) { + var userOperationName = getMatchingUserOperationName(operationName, method, resourceMap[path['x-user-related']]); + var genericOperationName = getGenericOperationName(userOperationName); + apiService[genericOperationName] = function(orgname, opt_options, opt_parameters, opt_background) { + if (orgname) { + if (orgname.name) { + orgname = orgname.name; + } + + var params = jQuery.extend({'orgname' : orgname}, opt_parameters || {}, opt_background); + return apiService[operationName](opt_options, params); + } else { + return apiService[userOperationName](opt_options, opt_parameters, opt_background); + } + }; + } + }; + + + var allowedMethods = ['get', 'post', 'put', 'delete']; + var resourceMap = {}; + var forEachOperation = function(callback) { + for (var path in window.__endpoints) { + if (!window.__endpoints.hasOwnProperty(path)) { + continue; + } + + for (var method in window.__endpoints[path]) { + if (!window.__endpoints[path].hasOwnProperty(method)) { + continue; + } + + if (allowedMethods.indexOf(method.toLowerCase()) < 0) { continue; } + callback(window.__endpoints[path][method], method, window.__endpoints[path]); + } + } + }; + + // Build the map of resource names to their objects. + forEachOperation(function(operation, method, path) { + resourceMap[path['x-name']] = path; + }); + + // Construct the methods for each API endpoint. + forEachOperation(function(operation, method, path) { + buildMethodsForOperation(operation, method, path, resourceMap); + }); + + apiService.getErrorMessage = function(resp, defaultMessage) { + var message = defaultMessage; + if (resp && resp['data']) { + //TODO: remove error_message and error_description (old style error) + message = resp['data']['detail'] || resp['data']['error_message'] || resp['data']['message'] || resp['data']['error_description'] || message; + } + + return message; + }; + + apiService.errorDisplay = function(defaultMessage, opt_handler) { + return function(resp) { + var message = apiService.getErrorMessage(resp, defaultMessage); + if (opt_handler) { + var handlerMessage = opt_handler(resp); + if (handlerMessage) { + message = handlerMessage; + } + } + + message = UtilService.stringToHTML(message); + bootbox.dialog({ + "message": message, + "title": defaultMessage || 'Request Failure', + "buttons": { + "close": { + "label": "Close", + "className": "btn-primary" + } + } + }); + }; + }; + + // todo: remove hacks + apiService.scGetConfig = () => new Promise(() => { hello: true }); + apiService.scRegistryStatus = () => new Promise(() => { hello: true }); + + return apiService; +}]); diff --git a/config_app/js/services/container-service.js b/config_app/js/services/container-service.js new file mode 100644 index 000000000..c20cfc162 --- /dev/null +++ b/config_app/js/services/container-service.js @@ -0,0 +1,45 @@ +/** + * Helper service for working with the registry's container. Only works in enterprise. + */ +angular.module('quay-config') + .factory('ContainerService', ['ApiService', '$timeout', 'Restangular', + function(ApiService, $timeout, Restangular) { + var containerService = {}; + containerService.restartContainer = function(callback) { + ApiService.scShutdownContainer(null, null).then(function(resp) { + $timeout(callback, 2000); + }, ApiService.errorDisplay('Cannot restart container. Please report this to support.')) + }; + + containerService.scheduleStatusCheck = function(callback, opt_config) { + $timeout(function() { + containerService.checkStatus(callback, opt_config); + }, 2000); + }; + + containerService.checkStatus = function(callback, opt_config) { + var errorHandler = function(resp) { + if (resp.status == 404 || resp.status == 502 || resp.status == -1) { + // Container has not yet come back up, so we schedule another check. + containerService.scheduleStatusCheck(callback, opt_config); + return; + } + + return ApiService.errorDisplay('Cannot load status. Please report this to support')(resp); + }; + + // If config is specified, override the API base URL from this point onward. + // TODO(jschorr): Find a better way than this. This is safe, since this will only be called + // for a restart, but it is still ugly. + if (opt_config && opt_config['SERVER_HOSTNAME']) { + var scheme = opt_config['PREFERRED_URL_SCHEME'] || 'http'; + var baseUrl = scheme + '://' + opt_config['SERVER_HOSTNAME'] + '/api/v1/'; + Restangular.setBaseUrl(baseUrl); + } + + ApiService.scRegistryStatus(null, null, /* background */true) + .then(callback, errorHandler); + }; + + return containerService; + }]); diff --git a/config_app/js/services/cookie-service.js b/config_app/js/services/cookie-service.js new file mode 100644 index 000000000..af904124a --- /dev/null +++ b/config_app/js/services/cookie-service.js @@ -0,0 +1,23 @@ +/** + * Helper service for working with cookies. + */ +angular.module('quay-config').factory('CookieService', ['$cookies', function($cookies) { + var cookieService = {}; + cookieService.putPermanent = function(name, value) { + document.cookie = escape(name) + "=" + escape(value) + "; expires=Fri, 31 Dec 9999 23:59:59 GMT; path=/"; + }; + + cookieService.putSession = function(name, value) { + $cookies.put(name, value); + }; + + cookieService.clear = function(name) { + $cookies.remove(name); + }; + + cookieService.get = function(name) { + return $cookies.get(name); + }; + + return cookieService; +}]); diff --git a/config_app/js/services/features-config.js b/config_app/js/services/features-config.js new file mode 100644 index 000000000..e655f32bf --- /dev/null +++ b/config_app/js/services/features-config.js @@ -0,0 +1,91 @@ +/** + * Feature flags. + */ +angular.module('quay-config').factory('Features', [function() { + if (!window.__features) { + return {}; + } + + var features = window.__features; + features.getFeature = function(name, opt_defaultValue) { + var value = features[name]; + if (value == null) { + return opt_defaultValue; + } + return value; + }; + + features.hasFeature = function(name) { + return !!features.getFeature(name); + }; + + features.matchesFeatures = function(list) { + for (var i = 0; i < list.length; ++i) { + var value = features.getFeature(list[i]); + if (!value) { + return false; + } + } + return true; + }; + + return features; +}]); + +/** + * Application configuration. + */ +angular.module('quay-config').factory('Config', ['Features', function(Features) { + if (!window.__config) { + return {}; + } + + var config = window.__config; + config.getDomain = function() { + return config['SERVER_HOSTNAME']; + }; + + config.getHost = function(opt_auth) { + var auth = opt_auth; + if (auth) { + auth = auth + '@'; + } + + return config['PREFERRED_URL_SCHEME'] + '://' + auth + config['SERVER_HOSTNAME']; + }; + + config.getHttp = function() { + return config['PREFERRED_URL_SCHEME']; + }; + + config.getUrl = function(opt_path) { + var path = opt_path || ''; + return config['PREFERRED_URL_SCHEME'] + '://' + config['SERVER_HOSTNAME'] + path; + }; + + config.getValue = function(name, opt_defaultValue) { + var value = config[name]; + if (value == null) { + return opt_defaultValue; + } + return value; + }; + + config.getEnterpriseLogo = function(opt_defaultValue) { + if (!config.ENTERPRISE_LOGO_URL) { + if (opt_defaultValue) { + return opt_defaultValue; + } + + if (Features.BILLING) { + return '/static/img/quay-horizontal-color.svg'; + } else { + return '/static/img/QuayEnterprise_horizontal_color.svg'; + } + } + + return config.ENTERPRISE_LOGO_URL; + }; + + return config; +}]); \ No newline at end of file diff --git a/config_app/js/services/user-service.js b/config_app/js/services/user-service.js new file mode 100644 index 000000000..918ad9adb --- /dev/null +++ b/config_app/js/services/user-service.js @@ -0,0 +1,217 @@ +import * as Raven from 'raven-js'; + + +/** + * Service which monitors the current user session and provides methods for returning information + * about the user. + */ +angular.module('quay-config') + .factory('UserService', ['ApiService', 'CookieService', '$rootScope', 'Config', '$location', '$timeout', + +function(ApiService, CookieService, $rootScope, Config, $location, $timeout) { + var userResponse = { + verified: false, + anonymous: true, + username: null, + email: null, + organizations: [], + logins: [], + beforeload: true + }; + + var userService = {}; + var _EXTERNAL_SERVICES = ['ldap', 'jwtauthn', 'keystone', 'dex']; + + userService.hasEverLoggedIn = function() { + return CookieService.get('quay.loggedin') == 'true'; + }; + + userService.updateUserIn = function(scope, opt_callback) { + scope.$watch(function () { return userService.currentUser(); }, function (currentUser) { + if (currentUser) { + $timeout(function(){ + scope.user = currentUser; + if (opt_callback) { + opt_callback(currentUser); + } + }, 0, false); + }; + }, true); + }; + + userService.load = function(opt_callback) { + var handleUserResponse = function(loadedUser) { + userResponse = loadedUser; + + if (!userResponse.anonymous) { + if (Config.MIXPANEL_KEY) { + try { + mixpanel.identify(userResponse.username); + mixpanel.people.set({ + '$email': userResponse.email, + '$username': userResponse.username, + 'verified': userResponse.verified + }); + mixpanel.people.set_once({ + '$created': new Date() + }) + } catch (e) { + window.console.log(e); + } + } + + if (Config.MARKETO_MUNCHKIN_ID && userResponse['marketo_user_hash']) { + var associateLeadBody = {'Email': userResponse.email}; + if (window.Munchkin !== undefined) { + try { + Munchkin.munchkinFunction( + 'associateLead', + associateLeadBody, + userResponse['marketo_user_hash'] + ); + } catch (e) { + } + } else { + window.__quay_munchkin_queue.push([ + 'associateLead', + associateLeadBody, + userResponse['marketo_user_hash'] + ]); + } + } + + if (window.Raven !== undefined) { + try { + Raven.setUser({ + email: userResponse.email, + id: userResponse.username + }); + } catch (e) { + window.console.log(e); + } + } + + CookieService.putPermanent('quay.loggedin', 'true'); + } else { + if (window.Raven !== undefined) { + Raven.setUser(); + } + } + + // If the loaded user has a prompt, redirect them to the update page. + if (loadedUser.prompts && loadedUser.prompts.length) { + $location.path('/updateuser'); + return; + } + + if (opt_callback) { + opt_callback(loadedUser); + } + }; + + ApiService.getLoggedInUser().then(function(loadedUser) { + handleUserResponse(loadedUser); + }, function() { + handleUserResponse({'anonymous': true}); + }); + }; + + userService.isOrganization = function(name) { + return !!userService.getOrganization(name); + }; + + userService.getOrganization = function(name) { + if (!userResponse || !userResponse.organizations) { return null; } + for (var i = 0; i < userResponse.organizations.length; ++i) { + var org = userResponse.organizations[i]; + if (org.name == name) { + return org; + } + } + + return null; + }; + + userService.isNamespaceAdmin = function(namespace) { + if (namespace == userResponse.username) { + return true; + } + + var org = userService.getOrganization(namespace); + if (!org) { + return false; + } + + return org.is_org_admin; + }; + + userService.isKnownNamespace = function(namespace) { + if (namespace == userResponse.username) { + return true; + } + + var org = userService.getOrganization(namespace); + return !!org; + }; + + userService.getNamespace = function(namespace) { + var org = userService.getOrganization(namespace); + if (org) { + return org; + } + + if (namespace == userResponse.username) { + return userResponse; + } + + return null; + }; + + userService.getCLIUsername = function() { + if (!userResponse) { + return null; + } + + var externalUsername = null; + userResponse.logins.forEach(function(login) { + if (_EXTERNAL_SERVICES.indexOf(login.service) >= 0) { + externalUsername = login.service_identifier; + } + }); + + return externalUsername || userResponse.username; + }; + + userService.deleteNamespace = function(info, callback) { + var namespace = info.user ? info.user.username : info.organization.name; + if (!namespace) { + return; + } + + var errorDisplay = ApiService.errorDisplay('Could not delete namespace', callback); + var cb = function(resp) { + userService.load(function(currentUser) { + callback(true); + $location.path('/'); + }); + } + + if (info.user) { + ApiService.deleteCurrentUser().then(cb, errorDisplay) + } else { + var delParams = { + 'orgname': info.organization.name + }; + ApiService.deleteAdminedOrganization(null, delParams).then(cb, errorDisplay); + } + }; + + userService.currentUser = function() { + return userResponse; + }; + + // Update the user in the root scope. + userService.updateUserIn($rootScope); + + return userService; +}]); diff --git a/config_app/js/services/util-service.js b/config_app/js/services/util-service.js new file mode 100644 index 000000000..34f0a4191 --- /dev/null +++ b/config_app/js/services/util-service.js @@ -0,0 +1,83 @@ +/** + * Service which exposes various utility methods. + */ +angular.module('quay-config').factory('UtilService', ['$sanitize', + function($sanitize) { + var utilService = {}; + + var adBlockEnabled = null; + + utilService.isAdBlockEnabled = function(callback) { + if (adBlockEnabled !== null) { + callback(adBlockEnabled); + return; + } + + if(typeof blockAdBlock === 'undefined') { + callback(true); + return; + } + + var bab = new BlockAdBlock({ + checkOnLoad: false, + resetOnEnd: true + }); + + bab.onDetected(function() { adBlockEnabled = true; callback(true); }); + bab.onNotDetected(function() { adBlockEnabled = false; callback(false); }); + bab.check(); + }; + + utilService.isEmailAddress = function(val) { + var emailRegex = /^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$/; + return emailRegex.test(val); + }; + + utilService.escapeHtmlString = function(text) { + var textStr = (text || '').toString(); + var adjusted = textStr.replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); + + return adjusted; + }; + + utilService.stringToHTML = function(text) { + text = utilService.escapeHtmlString(text); + text = text.replace(/\n/g, '
'); + return text; + }; + + utilService.getRestUrl = function(args) { + var url = ''; + for (var i = 0; i < arguments.length; ++i) { + if (i > 0) { + url += '/'; + } + url += encodeURI(arguments[i]) + } + return url; + }; + + utilService.textToSafeHtml = function(text) { + return $sanitize(utilService.escapeHtmlString(text)); + }; + + return utilService; + }]) + .factory('CoreDialog', [() => { + var service = {}; + service['fatal'] = function(title, message) { + bootbox.dialog({ + "title": title, + "message": "
" + message, + "buttons": {}, + "className": "co-dialog fatal-error", + "closeButton": false + }); + }; + + return service; + }]); diff --git a/config_app/js/setup/setup.component.js b/config_app/js/setup/setup.component.js new file mode 100644 index 000000000..3e828214c --- /dev/null +++ b/config_app/js/setup/setup.component.js @@ -0,0 +1,332 @@ +import * as URI from 'urijs'; +const templateUrl = require('./setup.html'); + +(function() { + /** + * The Setup page provides a nice GUI walkthrough experience for setting up Quay Enterprise. + */ + + angular.module('quay-config').directive('setup', () => { + const directiveDefinitionObject = { + priority: 1, + templateUrl, + replace: true, + transclude: true, + restrict: 'C', + scope: { + 'isActive': '=isActive', + 'configurationSaved': '&configurationSaved' + }, + controller: SetupCtrl, + }; + + return directiveDefinitionObject; + }) + + function SetupCtrl($scope, $timeout, ApiService, Features, UserService, ContainerService, CoreDialog) { + // if (!Features.SUPER_USERS) { + // return; + // } + + $scope.HOSTNAME_REGEX = '^[a-zA-Z-0-9_\.\-]+(:[0-9]+)?$'; + + $scope.validateHostname = function(hostname) { + if (hostname.indexOf('127.0.0.1') == 0 || hostname.indexOf('localhost') == 0) { + return 'Please specify a non-localhost hostname. "localhost" will refer to the container, not your machine.' + } + + return null; + }; + + // Note: The values of the enumeration are important for isStepFamily. For example, + // *all* states under the "configuring db" family must start with "config-db". + $scope.States = { + // Loading the state of the product. + 'LOADING': 'loading', + + // The configuration directory is missing. + 'MISSING_CONFIG_DIR': 'missing-config-dir', + + // The config.yaml exists but it is invalid. + 'INVALID_CONFIG': 'config-invalid', + + // DB is being configured. + 'CONFIG_DB': 'config-db', + + // DB information is being validated. + 'VALIDATING_DB': 'config-db-validating', + + // DB information is being saved to the config. + 'SAVING_DB': 'config-db-saving', + + // A validation error occurred with the database. + 'DB_ERROR': 'config-db-error', + + // Database is being setup. + 'DB_SETUP': 'setup-db', + + // Database setup has succeeded. + 'DB_SETUP_SUCCESS': 'setup-db-success', + + // An error occurred when setting up the database. + 'DB_SETUP_ERROR': 'setup-db-error', + + // The container is being restarted for the database changes. + 'DB_RESTARTING': 'setup-db-restarting', + + // A superuser is being configured. + 'CREATE_SUPERUSER': 'create-superuser', + + // The superuser is being created. + 'CREATING_SUPERUSER': 'create-superuser-creating', + + // An error occurred when setting up the superuser. + 'SUPERUSER_ERROR': 'create-superuser-error', + + // The superuser was created successfully. + 'SUPERUSER_CREATED': 'create-superuser-created', + + // General configuration is being setup. + 'CONFIG': 'config', + + // The configuration is fully valid. + 'VALID_CONFIG': 'valid-config', + + // The container is being restarted for the configuration changes. + 'CONFIG_RESTARTING': 'config-restarting', + + // The product is ready for use. + 'READY': 'ready' + } + + $scope.csrf_token = window.__token; + $scope.currentStep = $scope.States.LOADING; + $scope.errors = {}; + $scope.stepProgress = []; + $scope.hasSSL = false; + $scope.hostname = null; + $scope.currentConfig = null; + + $scope.currentState = { + 'hasDatabaseSSLCert': false + }; + + $scope.$watch('currentStep', function(currentStep) { + $scope.stepProgress = $scope.getProgress(currentStep); + + switch (currentStep) { + case $scope.States.CONFIG: + $('#setupModal').modal('hide'); + break; + + case $scope.States.MISSING_CONFIG_DIR: + $scope.showMissingConfigDialog(); + break; + + case $scope.States.INVALID_CONFIG: + $scope.showInvalidConfigDialog(); + break; + + case $scope.States.DB_SETUP: + $scope.performDatabaseSetup(); + // Fall-through. + + case $scope.States.CREATE_SUPERUSER: + case $scope.States.DB_RESTARTING: + case $scope.States.CONFIG_DB: + case $scope.States.VALID_CONFIG: + case $scope.States.READY: + $('#setupModal').modal({ + keyboard: false, + backdrop: 'static' + }); + break; + } + }); + + $scope.restartContainer = function(state) { + $scope.currentStep = state; + ContainerService.restartContainer(function() { + $scope.checkStatus() + }); + }; + + $scope.showSuperuserPanel = function() { + $('#setupModal').modal('hide'); + var prefix = $scope.hasSSL ? 'https' : 'http'; + var hostname = $scope.hostname; + if (!hostname) { + hostname = document.location.hostname; + if (document.location.port) { + hostname = hostname + ':' + document.location.port; + } + } + + window.location = prefix + '://' + hostname + '/superuser'; + }; + + $scope.configurationSaved = function(config) { + $scope.hasSSL = config['PREFERRED_URL_SCHEME'] == 'https'; + $scope.hostname = config['SERVER_HOSTNAME']; + $scope.currentConfig = config; + + $scope.currentStep = $scope.States.VALID_CONFIG; + }; + + $scope.getProgress = function(step) { + var isStep = $scope.isStep; + var isStepFamily = $scope.isStepFamily; + var States = $scope.States; + + return [ + isStepFamily(step, States.CONFIG_DB), + isStepFamily(step, States.DB_SETUP), + isStep(step, States.DB_RESTARTING), + isStepFamily(step, States.CREATE_SUPERUSER), + isStep(step, States.CONFIG), + isStep(step, States.VALID_CONFIG), + isStep(step, States.CONFIG_RESTARTING), + isStep(step, States.READY) + ]; + }; + + $scope.isStepFamily = function(step, family) { + if (!step) { return false; } + return step.indexOf(family) == 0; + }; + + $scope.isStep = function(step) { + for (var i = 1; i < arguments.length; ++i) { + if (arguments[i] == step) { + return true; + } + } + return false; + }; + + $scope.beginSetup = function() { + $scope.currentStep = $scope.States.CONFIG_DB; + }; + + $scope.showInvalidConfigDialog = function() { + var message = "The config.yaml file found in conf/stack could not be parsed." + var title = "Invalid configuration file"; + CoreDialog.fatal(title, message); + }; + + + $scope.showMissingConfigDialog = function() { + var message = "A volume should be mounted into the container at /conf/stack: " + + "

docker run -v /path/to/config:/conf/stack
" + + "
Once fixed, restart the container. For more information, " + + "" + + "Read the Setup Guide" + + var title = "Missing configuration volume"; + CoreDialog.fatal(title, message); + }; + + $scope.parseDbUri = function(value) { + if (!value) { return null; } + + // Format: mysql+pymysql://:@/ + var uri = URI(value); + return { + 'kind': uri.protocol(), + 'username': uri.username(), + 'password': uri.password(), + 'server': uri.host(), + 'database': uri.path() ? uri.path().substr(1) : '' + }; + }; + + $scope.serializeDbUri = function(fields) { + if (!fields['server']) { return ''; } + if (!fields['database']) { return ''; } + + var uri = URI(); + try { + uri = uri && uri.host(fields['server']); + uri = uri && uri.protocol(fields['kind']); + uri = uri && uri.username(fields['username']); + uri = uri && uri.password(fields['password']); + uri = uri && uri.path('/' + (fields['database'] || '')); + uri = uri && uri.toString(); + } catch (ex) { + return ''; + } + + return uri; + }; + + $scope.createSuperUser = function() { + $scope.currentStep = $scope.States.CREATING_SUPERUSER; + ApiService.scCreateInitialSuperuser($scope.superUser, null).then(function(resp) { + UserService.load(); + $scope.checkStatus(); + }, function(resp) { + $scope.currentStep = $scope.States.SUPERUSER_ERROR; + $scope.errors.SuperuserCreationError = ApiService.getErrorMessage(resp, 'Could not create superuser'); + }); + }; + + $scope.performDatabaseSetup = function() { + $scope.currentStep = $scope.States.DB_SETUP; + ApiService.scSetupDatabase(null, null).then(function(resp) { + if (resp['error']) { + $scope.currentStep = $scope.States.DB_SETUP_ERROR; + $scope.errors.DatabaseSetupError = resp['error']; + } else { + $scope.currentStep = $scope.States.DB_SETUP_SUCCESS; + } + }, ApiService.errorDisplay('Could not setup database. Please report this to support.')) + }; + + $scope.validateDatabase = function() { + $scope.currentStep = $scope.States.VALIDATING_DB; + $scope.databaseInvalid = null; + + var data = { + 'config': { + 'DB_URI': $scope.databaseUri + }, + 'hostname': window.location.host + }; + + if ($scope.currentState.hasDatabaseSSLCert) { + data['config']['DB_CONNECTION_ARGS'] = { + 'ssl': { + 'ca': 'conf/stack/database.pem' + } + }; + } + + var params = { + 'service': 'database' + }; + + ApiService.scValidateConfig(data, params).then(function(resp) { + var status = resp.status; + + if (status) { + $scope.currentStep = $scope.States.SAVING_DB; + ApiService.scUpdateConfig(data, null).then(function(resp) { + $scope.checkStatus(); + }, ApiService.errorDisplay('Cannot update config. Please report this to support')); + } else { + $scope.currentStep = $scope.States.DB_ERROR; + $scope.errors.DatabaseValidationError = resp.reason; + } + }, ApiService.errorDisplay('Cannot validate database. Please report this to support')); + }; + + $scope.checkStatus = function() { + ContainerService.checkStatus(function(resp) { + $scope.currentStep = resp['status']; + }, $scope.currentConfig); + }; + + // Load the initial status. + $scope.checkStatus(); + }; +})(); diff --git a/config_app/js/setup/setup.html b/config_app/js/setup/setup.html new file mode 100644 index 000000000..b40e1d78d --- /dev/null +++ b/config_app/js/setup/setup.html @@ -0,0 +1,311 @@ +
+
+
+
+
+ + Quay Enterprise Setup +
+ +
+
+ + + + + + + + + + + +
Almost done!
+
Configure your Redis database and other settings below
+
+ + + +
+
+
+
+ + + +
diff --git a/config_app/static/css/core-ui.css b/config_app/static/css/core-ui.css new file mode 100644 index 000000000..2a7fdaf15 --- /dev/null +++ b/config_app/static/css/core-ui.css @@ -0,0 +1,1500 @@ +/* Global Brand Bar */ +.co-m-global-nav { + background: white; + height: 30px; + line-height: 36px; + position: relative; + z-index: 90; +} + +.co-m-global-nav svg { + width: auto !important; +} + +.co-m-global-nav .co-m-global-nav-left { + text-align: left; + padding-left: 28px; +} + +.co-m-global-nav .co-m-global-nav-right { + text-align: right; + font-size: 16px; + line-height: 30px; + padding-right: 25px; +} + +.co-m-global-nav .co-m-global-nav-item { + padding: 0 20px 0 15px; + border-right: 1px solid #eee; + display: inline-block; + height: 16px; + line-height: 16px; +} + +.co-m-global-nav .co-m-global-nav-item:first-of-type { + padding-left: 0; +} + +.co-m-global-nav .co-m-global-nav-item:last-of-type { + padding-right: 0; + border-right: 0; +} + +/* Tweaks for small screens */ +@media screen and (max-width: 767px) { + .co-m-global-nav { + display: none; /* hide the whole thing */ + } +} + +a:active { + outline: none !important; +} + +a:focus { + outline: none !important; +} + +.co-form-table label { + white-space: nowrap; +} + +.co-form-table td { + padding: 8px; +} + +.co-form-table td:first-child { + vertical-align: top; + padding-top: 14px; +} + +.co-form-table td .co-help-text { + margin-top: 10px; + margin-bottom: 4px; +} + +.co-help-text { + margin-top: 6px; + color: #aaa; + display: inline-block; +} + +.co-options-menu .fa-gear { + color: #999; + cursor: pointer; +} + +.co-options-menu .dropdown.open .fa-gear { + color: #428BCA; +} + +.co-img-bg-network { + background: url('/static/img/network-tile.png') left top repeat, linear-gradient(30deg, #2277ad, #144768) no-repeat left top fixed; + background-color: #2277ad; + background-size: auto, 100% 100%; +} + +.co-m-navbar { + background-color: white; + margin: 0; + padding-left: 10px; +} + +.co-fx-box-shadow { + -webkit-box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); + -moz-box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); + -ms-box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); + -o-box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); + box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); +} + +.co-fx-box-shadow-heavy { + -webkit-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + -moz-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + -ms-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + -o-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); +} + +.co-fx-text-shadow { + text-shadow: rgba(0, 0, 0, 1) 1px 1px 2px; +} + +.co-nav-title { + margin-top: -22px; + height: 70px; +} + +.co-nav-title .co-nav-title-content { + color: white; + text-align: center; + white-space: nowrap; +} + +.co-nav-title .co-nav-title-action { + display: block; + color: white; + text-align: center; + line-height: 70px; + font-size: 18px; +} + +.co-nav-title .co-nav-title-action a { + color: white; +} + +.co-nav-title .co-nav-title-action .fa { + margin-right: 6px; +} + + +@media (max-width: 767px) { + .co-nav-title { + height: auto; + min-height: 70px; + } + + .co-nav-title .co-nav-title-content { + height: 34px; + overflow: hidden; + text-overflow: ellipsis; + font-size: 22px; + } +} + +.co-main-content-panel { + margin-bottom: 20px; + background-color: #fff; + border: 1px solid transparent; + padding: 10px; + + -webkit-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + -moz-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + -ms-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + -o-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); + box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); +} + +.cor-log-box { + width: 100%; + height: 550px; + position: relative; +} + +.co-log-viewer { + position: absolute; + top: 20px; + left: 20px; + right: 20px; + height: 500px; + + padding: 20px; + + background: rgb(55, 55, 55); + border: 1px solid black; + color: white; + + overflow: scroll; +} + +.co-log-viewer .co-log-content { + font-family: Consolas, "Lucida Console", Monaco, monospace; + font-size: 12px; + white-space: pre; +} + +.cor-log-box .co-log-viewer-new-logs i { + margin-left: 10px; + display: inline-block; +} + +.cor-log-box .co-log-viewer-new-logs { + cursor: pointer; + position: absolute; + bottom: 40px; + right: 30px; + padding: 10px; + color: white; + border-radius: 10px; + background: rgba(72, 158, 72, 0.8); +} + +.co-panel { + margin-bottom: 40px; + + /*border: 1px solid #eee;*/ +} + +.co-panel .co-panel-heading img { + margin-right: 6px; + width: 24px; +} + +.co-panel .co-panel-heading > i.fa { + margin-right: 6px; + width: 24px; + text-align: center; +} + +.co-panel .co-panel-heading { + padding: 6px; + /*background: #eee;*/ + border-bottom: 1px solid #eee; + + margin-bottom: 4px; + font-size: 135%; + padding-left: 10px; +} + +.co-panel .co-panel-body { + padding: 10px; +} + +@media (max-width: 767px) { + .co-panel > .co-panel-body { + padding: 0px; + padding-top: 10px; + padding-bottom: 10px; + } + + .co-panel > .panel-body { + padding: 0px; + padding-top: 10px; + padding-bottom: 10px; + } +} + + +.co-panel .co-panel-button-bar { + margin-top: 10px; + padding-top: 10px; + border-top: 1px solid #eee; +} + +.co-panel-body .co-panel-heading { + font-size: 120%; + border-bottom: 0px; + margin: 0px; + margin-bottom: -6px; +} + +.co-panel-body .co-panel-body { + padding-left: 38px; +} + + +.config-bool-field-element input { + margin-right: 6px; + font-size: 24px; +} + +.config-setup-tool-element .help-text { + margin-top: 6px; + color: #aaa; +} + +.config-setup-tool-element .description { + padding: 6px; +} + +.config-setup-tool-element .config-table > tbody > tr > td:first-child { + padding-top: 14px; + font-weight: bold; +} + +.config-setup-tool-element .config-table > tbody > tr > td.non-input { + padding-top: 8px; +} + +.config-setup-tool-element .config-table > tbody > tr > td { + padding: 8px; + vertical-align: top; +} + +.config-setup-tool-element .config-table > tbody > tr > td .config-numeric-field-element { + width: 100px; +} + +.config-setup-tool-element .config-table > tbody > tr > td .config-string-field-element { + width: 400px; +} + +.config-setup-tool-element .config-table > tbody > tr > td .config-string-list-field-element { + width: 400px; +} + +.config-map-field-element table { + margin-bottom: 10px; +} + +.config-map-field-element .form-control-container { + border-top: 1px solid #eee; + padding-top: 10px; +} + +.config-map-field-element .form-control-container select, .config-map-field-element .form-control-container input { + margin-bottom: 10px; +} + +.config-map-field-element .empty { + color: #ccc; + margin-bottom: 10px; + display: block; +} + +.config-map-field-element .item-title { + font-weight: bold; +} + +.config-contact-field { + margin-bottom: 4px; +} + +.config-contact-field .dropdown button { + width: 100px; + text-align: left; +} + +.config-contact-field .dropdown button .caret { + float: right; + margin-top: 9px; +} + +.config-contact-field .dropdown button i.fa { + margin-right: 6px; + width: 14px; + text-align: center; + display: inline-block; +} + +.config-contact-field .form-control { + width: 350px; +} + +.config-certificates-field-element .dns-name { + display: inline-block; + margin-right: 10px; +} + +.config-certificates-field-element .cert-status .fa { + margin-right: 4px; +} + +.config-certificates-field-element .cert-status .green { + color: #2FC98E; +} + +.config-certificates-field-element .cert-status .orange { + color: #FCA657; +} + +.config-certificates-field-element .cert-status .red { + color: #D64456; +} + +.config-certificates-field-element .file-upload-box-element .file-input-container { + padding: 0px; + text-align: left; +} + +.config-certificates-field-element .file-upload-box-element .file-drop + label { + margin-top: 0px; + margin-bottom: 4px; +} + +.config-list-field-element .empty { + color: #ccc; + margin-bottom: 10px; + display: block; +} + +.config-list-field-element input { + vertical-align: middle; +} + +.config-list-field-element .item-delete { + display: inline-block; + margin-left: 20px; +} + +.config-list-field-element input { + width: 350px; +} + +.config-setup-tool-element .inner-table { + margin-left: 10px; +} + +.config-setup-tool-element .inner-table tr td:first-child { + font-weight: bold; +} + +.config-setup-tool-element .inner-table td { + padding: 6px; +} + +.config-file-field-element input { + display: inline-block; + margin-left: 10px; +} + +.config-service-key-field-element { + position: relative; +} + +.config-service-key-field-element .co-modify-link { + margin-left: 10px; +} + +.config-service-key-field-element .fa-check { + margin-right: 4px; +} + +.co-checkbox { + position: relative; +} + +.co-checkbox input { + display: none; +} + +.co-checkbox label { + position: relative; + padding-left: 28px; + cursor: pointer; +} + +.co-checkbox label:before { + content: ''; + cursor: pointer; + position: absolute; + width: 20px; + height: 20px; + top: 0; + left: 0; + border-radius: 4px; + + -webkit-box-shadow: inset 0px 1px 1px rgba(0,0,0,0.5), 0px 1px 0px rgba(255,255,255,.4); + -moz-box-shadow: inset 0px 1px 1px rgba(0,0,0,0.5), 0px 1px 0px rgba(255,255,255,.4); + box-shadow: inset 0px 1px 1px rgba(0,0,0,0.5), 0px 1px 0px rgba(255,255,255,.4); + + background: -webkit-linear-gradient(top, #222 0%, #45484d 100%); + background: -moz-linear-gradient(top, #222 0%, #45484d 100%); + background: -o-linear-gradient(top, #222 0%, #45484d 100%); + background: -ms-linear-gradient(top, #222 0%, #45484d 100%); + background: linear-gradient(top, #222 0%, #45484d 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#222', endColorstr='#45484d',GradientType=0 ); +} + +.co-checkbox label:after { + -ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=0)"; + filter: alpha(opacity=0); + opacity: 0; + content: ''; + position: absolute; + width: 11px; + height: 7px; + background: transparent; + top: 5px; + left: 4px; + border: 3px solid #fcfff4; + border-top: none; + border-right: none; + + -webkit-transform: rotate(-45deg); + -moz-transform: rotate(-45deg); + -o-transform: rotate(-45deg); + -ms-transform: rotate(-45deg); + transform: rotate(-45deg); +} + +.co-checkbox label:hover::after { + -ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=30)"; + filter: alpha(opacity=30); + opacity: 0.3; +} + +.co-checkbox input[type=checkbox]:checked + label:after { + -ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=100)"; + filter: alpha(opacity=100); + opacity: 1; + border: 3px solid rgb(26, 255, 26); + border-top: none; + border-right: none; +} + +.co-floating-bottom-bar { + height: 50px; +} + +.co-floating-bottom-bar.floating { + position: fixed; + bottom: 0px; +} + +.config-setup-tool .cor-floating-bottom-bar button i.fa { + margin-right: 6px; +} + +.config-setup-tool .service-verification { + padding: 20px; + background: #343434; + color: white; + margin-bottom: -14px; +} + +.config-setup-tool .service-verification-row { + margin-bottom: 6px; +} + +.config-setup-tool .service-verification-row .service-title { + font-variant: small-caps; + font-size: 145%; + vertical-align: middle; +} + +#validateAndSaveModal .fa-warning { + font-size: 22px; + margin-right: 10px; + vertical-align: middle; + color: rgb(255, 186, 53); +} + +#validateAndSaveModal .fa-check-circle { + font-size: 22px; + margin-right: 10px; + vertical-align: middle; + color: rgb(53, 186, 53); +} + +.config-setup-tool .service-verification-error { + white-space: pre-wrap; + margin-top: 10px; + margin-left: 36px; + margin-bottom: 20px; + max-height: 250px; + overflow: auto; + border: 1px solid #797979; + background: black; + padding: 6px; + font-family: Consolas, "Lucida Console", Monaco, monospace; + font-size: 12px; +} + +.co-m-loader, .co-m-inline-loader { + min-width: 28px; } + +.co-m-loader { + display: block; + position: absolute; + left: 50%; + top: 50%; + margin: -11px 0 0 -13px; } + +.co-m-inline-loader { + display: inline-block; + cursor: default; } + .co-m-inline-loader:hover { + text-decoration: none; } + +.co-m-loader-dot__one, .co-m-loader-dot__two, .co-m-loader-dot__three { + -webkit-border-radius: 3px; + -moz-border-radius: 3px; + -ms-border-radius: 3px; + -o-border-radius: 3px; + border-radius: 3px; + animation-fill-mode: both; + -webkit-animation-fill-mode: both; + -moz-animation-fill-mode: both; + -ms-animation-fill-mode: both; + -o-animation-fill-mode: both; + animation-name: bouncedelay; + animation-duration: 1s; + animation-timing-function: ease-in-out; + animation-delay: 0; + animation-direction: normal; + animation-iteration-count: infinite; + animation-fill-mode: forwards; + animation-play-state: running; + -webkit-animation-name: bouncedelay; + -webkit-animation-duration: 1s; + -webkit-animation-timing-function: ease-in-out; + -webkit-animation-delay: 0; + -webkit-animation-direction: normal; + -webkit-animation-iteration-count: infinite; + -webkit-animation-fill-mode: forwards; + -webkit-animation-play-state: running; + -moz-animation-name: bouncedelay; + -moz-animation-duration: 1s; + -moz-animation-timing-function: ease-in-out; + -moz-animation-delay: 0; + -moz-animation-direction: normal; + -moz-animation-iteration-count: infinite; + -moz-animation-fill-mode: forwards; + -moz-animation-play-state: running; + display: inline-block; + height: 6px; + width: 6px; + background: #419eda; + border-radius: 100%; + display: inline-block; } + +.co-m-loader-dot__one { + animation-delay: -0.32s; + -webkit-animation-delay: -0.32s; + -moz-animation-delay: -0.32s; + -ms-animation-delay: -0.32s; + -o-animation-delay: -0.32s; } + +.co-m-loader-dot__two { + animation-delay: -0.16s; + -webkit-animation-delay: -0.16s; + -moz-animation-delay: -0.16s; + -ms-animation-delay: -0.16s; + -o-animation-delay: -0.16s; } + +@-webkit-keyframes bouncedelay { + 0%, 80%, 100% { + -webkit-transform: scale(0.25, 0.25); + -moz-transform: scale(0.25, 0.25); + -ms-transform: scale(0.25, 0.25); + -o-transform: scale(0.25, 0.25); + transform: scale(0.25, 0.25); } + + 40% { + -webkit-transform: scale(1, 1); + -moz-transform: scale(1, 1); + -ms-transform: scale(1, 1); + -o-transform: scale(1, 1); + transform: scale(1, 1); } } + +@-moz-keyframes bouncedelay { + 0%, 80%, 100% { + -webkit-transform: scale(0.25, 0.25); + -moz-transform: scale(0.25, 0.25); + -ms-transform: scale(0.25, 0.25); + -o-transform: scale(0.25, 0.25); + transform: scale(0.25, 0.25); } + + 40% { + -webkit-transform: scale(1, 1); + -moz-transform: scale(1, 1); + -ms-transform: scale(1, 1); + -o-transform: scale(1, 1); + transform: scale(1, 1); } } + +@-ms-keyframes bouncedelay { + 0%, 80%, 100% { + -webkit-transform: scale(0.25, 0.25); + -moz-transform: scale(0.25, 0.25); + -ms-transform: scale(0.25, 0.25); + -o-transform: scale(0.25, 0.25); + transform: scale(0.25, 0.25); } + + 40% { + -webkit-transform: scale(1, 1); + -moz-transform: scale(1, 1); + -ms-transform: scale(1, 1); + -o-transform: scale(1, 1); + transform: scale(1, 1); } } + +@keyframes bouncedelay { + 0%, 80%, 100% { + -webkit-transform: scale(0.25, 0.25); + -moz-transform: scale(0.25, 0.25); + -ms-transform: scale(0.25, 0.25); + -o-transform: scale(0.25, 0.25); + transform: scale(0.25, 0.25); } + + 40% { + -webkit-transform: scale(1, 1); + -moz-transform: scale(1, 1); + -ms-transform: scale(1, 1); + -o-transform: scale(1, 1); + transform: scale(1, 1); } } + +.co-dialog .modal-body { + padding: 10px; + min-height: 100px; +} + +.co-dialog .modal-body h4 { + margin-bottom: 20px; +} + +.co-dialog .modal-content { + border-radius: 0px; +} + +.co-dialog.fatal-error .modal-content { + padding-left: 175px; +} + +.co-dialog.fatal-error .alert-icon-container-container { + position: absolute; + top: -36px; + left: -175px; + bottom: 20px; +} + +.co-dialog.fatal-error .alert-icon-container { + height: 100%; + display: table; +} + +.co-dialog.fatal-error .alert-icon { + display: table-cell; + vertical-align: middle; + border-right: 1px solid #eee; + margin-right: 20px; +} + +.co-dialog.fatal-error .alert-icon:before { + content: "\f071"; + font-family: FontAwesome; + font-size: 60px; + padding-left: 50px; + padding-right: 50px; + color: #c53c3f; + text-align: center; +} + + +.co-dialog .modal-header .cor-step-bar { + float: right; +} + +.co-dialog .modal-footer.working { + text-align: left; +} + +.co-dialog .modal-footer.working .btn { + float: right; +} + +.co-dialog .modal-footer.working .cor-loader-inline { + margin-right: 10px; +} + +.co-dialog .modal-footer .left-align { + float: left; + vertical-align: middle; + font-size: 16px; + margin-top: 8px; +} + +.co-dialog .modal-footer .left-align i.fa-warning { + color: #ffba35; + display: inline-block; + margin-right: 6px; +} + +.co-dialog .modal-footer .left-align i.fa-check { + color: green; + display: inline-block; + margin-right: 6px; +} + +.co-dialog .co-single-field-dialog { + padding: 10px; +} + +.co-dialog .co-single-field-dialog input { + margin-top: 10px; +} + +.co-step-bar .co-step-element { + cursor: default; + display: inline-block; + width: 28px; + height: 28px; + + position: relative; + color: #ddd; + + text-align: center; + line-height: 24px; + font-size: 16px; +} + +.co-step-bar .co-step-element.text { + margin-left: 24px; + background: white; +} + +.co-step-bar .co-step-element.icon { + margin-left: 22px; +} + +.co-step-bar .co-step-element:first-child { + margin-left: 0px; +} + +.co-step-bar .co-step-element.active { + color: #53a3d9; +} + +.co-step-bar .co-step-element:first-child:before { + display: none; +} + +.co-step-bar .co-step-element:before { + content: ""; + position: absolute; + top: 12px; + width: 14px; + border-top: 2px solid #ddd; +} + +.co-step-bar .co-step-element.icon:before { + left: -20px; +} + +.co-step-bar .co-step-element.text:before { + left: -22px; +} + +.co-step-bar .co-step-element.active:before { + border-top: 2px solid #53a3d9; +} + + +.co-step-bar .co-step-element.text { + border-radius: 100%; + border: 2px solid #ddd; +} + +.co-step-bar .co-step-element.text.active { + border: 2px solid #53a3d9; +} + +@media screen and (min-width: 900px) { + .co-dialog .modal-dialog { + width: 800px; + } +} + +@media screen and (min-width: 1200px) { + .co-dialog.wider .modal-dialog { + width: 1000px; + } +} + +.co-alert .co-step-bar { + float: right; + margin-top: 6px; +} + +.cor-container { + padding-left: 15px; + padding-right: 15px; +} + +.cor-title-link { + font-weight: 300; + line-height: 30px; + margin-top: 22px; + margin-bottom: 10px; + font-size: 16px; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + color: white; +} + +.cor-title-link a { + color: white; + text-decoration: none !important; +} + +.cor-title-link a.back-link .avatar { + margin-right: 6px; +} + +.cor-title-link a.back-link:before { + content: "\f060"; + color: white; + display: inline-block; + margin-right: 10px; + vertical-align: middle; + font-family: FontAwesome; +} + +.co-table { + width: 100%; +} + +.co-fixed-table { + table-layout: fixed; +} + +.co-fixed-table .co-flowing-col { + overflow: hidden; + text-overflow: ellipsis; + padding-left: 16px; + vertical-align: middle; +} + +.co-fixed-table .nowrap-col { + white-space: nowrap; + overflow: hidden; +} + +.co-table td { + border-bottom: 1px solid #eee; + padding: 10px; +} + +.co-table.no-lines td { + border-bottom: 0px; + padding: 6px; +} + +.co-table thead td { + color: #999; + font-size: 90%; + text-transform: uppercase; + font-weight: 300; + padding-top: 0px !important; +} + +.co-table thead td a { + color: #666; +} + +.co-table thead td:after { + content: "\f175"; + font-family: FontAwesome; + font-size: 12px; + margin-left: 10px; + visibility: hidden; +} + +.co-table thead td.unorderable-col:after { + display: none; +} + +.co-table thead td.current:after { + content: "\f175"; + visibility: visible; +} + +.co-table thead td.current.reversed:after { + content: "\f176"; + visibility: visible; +} + +.co-table thead td.current a { + color: #337ab7; +} + +.co-table .checkbox-col { + width: 24px; + text-align: center; +} + +.co-table .checkbox-col .co-checkable-menu a { + color: black; + text-transform: none; +} + +.co-table thead td.checkbox-menu-col:after { + display: none; +} + +.co-table .offset-check-col { + padding-left: 22px; +} + +.co-table td.options-col { + width: 36px; + text-align: center; +} + +.co-table td.caret-col { + width: 10px; + padding-left: 6px; + padding-right: 0px; + color: #aaa; + text-align: center; + max-width: 20px; +} + +.co-table td.caret-col i.fa { + cursor: pointer; +} + +.co-table td.caret-col i.fa.fa-caret-down { + color: black; +} + +.co-table .add-row-spacer td { + padding: 5px; +} + +.co-table .add-row td { + padding-top: 10px; + border-top: 2px solid #eee; + border-bottom: none; +} + +.co-table tr.co-table-header-row td { + font-size: 12px; + text-transform: uppercase; + color: #ccc; + border-bottom: none; + padding-left: 10px; + padding-top: 10px; + padding-bottom: 4px; +} + +.co-table tr.co-table-header-row td i.fa { + margin-right: 4px; +} + +.co-table tr.indented-row td:first-child { + padding-left: 28px; +} + +@media (max-width: 767px) { + .co-table tr.indented-row td:first-child { + padding-left: 10px; + } +} + +.co-table .mobile-row { + border-bottom: 2px solid #eee; + padding-bottom: 10px; + margin-bottom: 10px; + + position: relative; +} + +.co-table .mobile-row:last-child { + border-bottom: 0px solid #eee; + padding-bottom: 0px; + margin-bottom: 0px; +} + +.co-table .mobile-row .mobile-col-header { + font-weight: bold; + color: #444; +} + +.co-table .mobile-row .mobile-col-value { + padding: 6px; +} + +.co-table .mobile-row .options-col { + position: absolute; + top: -6px; + right: 0px; +} + + +.cor-checkable-menu { + display: inline-block; +} + +.co-checkable-menu .co-checkable-menu-state { + display: inline-block; + margin-left: -1px; + margin-right: 4px; +} + +.co-checkable-menu .dropdown { + display: inline-block; +} + +.co-checkable-item, .co-checkable-menu-state { + width: 18px; + height: 18px; + cursor: pointer; + border: 1px solid #ddd; + display: inline-block; + vertical-align: middle; + + position: relative +} + +.co-checkable-item:after, .co-checkable-menu-state:after { + content: "\f00c"; + font-family: FontAwesome; + color: #ccc; + + position: absolute; + top: -1px; + left: 1px; + + visibility: hidden; +} + +.co-checkable-menu-state.some:after { + content: "-"; + font-size: 24px; + top: -10px; + left: 4px; +} + +@media (min-width: 768px) { + .co-checkable-item:hover:after { + visibility: visible; + } +} + +.co-checkable-item.checked:after, .co-checkable-menu-state.all:after, .co-checkable-menu-state.some:after { + visibility: visible; + color: #428bca; +} + +.co-table .co-checkable-row.checked { + background: #F6FCFF; +} + +.co-filter-box { + position: relative;; +} + +.co-filter-box input { + display: inline-block; + width: auto !important; +} + +.co-filter-box .filter-message { + display: inline-block; + position: absolute; + left: -220px; + top: 7px; + color: #ccc; +} + +.co-filter-box .filter-options { + margin-top: 4px; + font-size: 14px; + text-align: right; + display: inline-block; +} + +.co-filter-box .filter-options label input { + margin-right: 4px; +} + + +.co-filter-box.with-options > input { + display: inline-block; + width: 200px; + margin-right: 4px; +} + +.co-check-bar { + margin-bottom: 10px; +} + +.co-check-bar .co-checked-actions { + display: inline-block; + border-left: 1px solid #eee; + margin-left: 10px; + padding-left: 4px; +} + +.co-top-bar { + height: 50px; + padding-bottom: 40px; +} + +.co-check-bar .co-checked-actions .btn { + margin-left: 6px; +} + +.co-check-bar .co-checked-actions .btn .fa { + margin-right: 4px; +} + +.co-check-bar .co-filter-box, .co-top-bar .co-filter-box { + float: right; +} + +.co-check-bar .co-filter-box .page-controls, .co-top-bar .co-filter-box .page-controls { + margin-right: 6px; + margin-bottom: 6px; +} + +.co-check-bar .co-filter-box input, .co-top-bar .co-filter-box input[type="text"] { + width: 300px; + display: inline-block; + vertical-align: middle; +} + +.co-check-bar .co-filter-box input, .co-top-bar .co-filter-box label { + margin-left: 6px; +} + +.co-top-bar .co-filter-box input { + vertical-align: top; +} + +@media screen and (max-width: 640px) { + .co-top-bar .page-controls { + margin-bottom: 10px; + text-align: right; + } + + .co-top-bar .co-filter-box { + display: block; + margin-bottom: 10px; + } + + .co-top-bar .filter-options { + display: block; + margin-bottom: 10px; + } + + .co-filter-box input { + display: block !important; + } +} + +.empty { + border-bottom: none !important; +} + +.empty-icon { + color: #aaa; + font-size: 60px; + margin-bottom: 0px; + text-align: center; +} + +.empty-primary-msg { + font-size: 18px; + margin-bottom: 10px; + text-align: center; +} + +.empty-secondary-msg { + font-size: 14px; + color: #999; + text-align: center; + margin-bottom: 10px; +} + +.co-alert { + padding: 16px; + padding-left: 46px; + position: relative; + margin-bottom: 20px; + position: relative; + border: 1px solid #eee; +} + +.co-alert.co-alert-success { + background: #F0FFF4; +} + +.co-alert.co-alert-success:before { + font-family: FontAwesome; + content: "\f058"; + position: absolute; + top: 11px; + left: 12px; + font-size: 22px; + color: #83D29C; +} + +.co-alert.co-alert-info { + background: #F0FAFF; +} + +.co-alert.co-alert-info:before { + font-family: FontAwesome; + content: "\f05a"; + position: absolute; + top: 11px; + left: 12px; + font-size: 22px; + color: #83B7D2; +} + +.co-alert.co-alert-warning { + background: #FFFBF0; +} + +.co-alert.co-alert-warning:before { + font-family: FontAwesome; + content: "\f071"; + position: absolute; + top: 11px; + left: 12px; + font-size: 22px; + color: #FCA657; +} + +.co-alert.co-alert-danger { + background: #FFF0F0; +} + +.co-alert.co-alert-danger:before { + font-family: core-icons; + content: "\f109"; + position: absolute; + top: 11px; + left: 12px; + font-size: 22px; + color: red; +} + +.co-alert.co-alert-danger:after { + font-family: FontAwesome; + content: "\f12a"; + position: absolute; + top: 16px; + left: 20px; + font-size: 16px; + color: white; + z-index: 2; +} + +.co-alert.thin { + padding: 6px; + padding-left: 38px; + margin-bottom: 0px; +} + +.co-alert.thin:before { + top: 5px; + font-size: 18px; +} + +.co-alert.thin:after { + top: 9px; + font-size: 13px; + left: 19px; +} + +.co-alert-inline:before { + position: relative !important; + top: auto !important; + left: auto !important; + vertical-align: middle; + margin-right: 10px; +} + +.co-alert-popin-warning { + margin-left: 10px; +} + +@media screen and (max-width: 767px) { + .co-alert-popin-warning { + display: block; + margin: 0px; + margin-top: 10px; + float: none; + } +} + +.co-alert-inline { + border: 0px; + display: inline-block; + background-color: transparent !important; + margin: 0px; + padding: 4px; +} + +.co-list-table tr td:first-child { + font-weight: bold; + padding-right: 10px; + vertical-align: top; + width: 120px; + padding-left: 0px; +} + +.co-list-table tr td { + padding: 10px; + font-size: 15px; +} + +.co-list-table .help-text { + margin-top: 6px; + font-size: 14px; + color: #aaa; +} + +.co-modify-link:after { + font-family: FontAwesome; + content: "\f054"; + color: #ccc; + vertical-align: middle; + display: inline-block; + margin-left: 10px; + font-size: 10px; + line-height: 16px; +} + +.co-option-table tr td:first-child { + padding-left: 16px; + padding-right: 16px; + padding-top: 0px; + vertical-align: top; +} + +.co-option-table tr td:last-child { + padding-bottom: 10px; +} + +.co-option-table .help-text { + margin-top: 4px; + margin-bottom: 10px; + font-size: 14px; + color: #aaa; +} + +.co-modal-body-scrollable { + overflow-y: auto; + overflow-x: hidden; + max-height: 400px; +} + +.cor-confirm-dialog-element .modal-body { + padding: 20px; +} + +.cor-confirm-dialog-element .progress-message { + margin-bottom: 10px; + font-size: 16px; +} + +.co-top-tab-bar { + padding: 0px; + margin: 0px; + padding-left: 10px; + + margin-bottom: 10px; + border-bottom: 1px solid #eee; +} + +.co-top-tab-bar li { + display: inline-block; + list-style: none; + text-align: center; + padding: 6px; + padding-left: 10px; + padding-right: 10px; + border-bottom: 1px solid #eee; + font-size: 15px; + cursor: pointer; + color: #666; + + bottom: -2px; + position: relative; +} + +.co-top-tab-bar li.active { + color: #51a3d9; + border-bottom: 2px solid #51a3d9; + top: 2px; +} + +.modal-header.ahead-of-tabs { + border-bottom: 0px; + padding-bottom: 4px; +} diff --git a/config_app/static/lib/angular-file-upload.min.js b/config_app/static/lib/angular-file-upload.min.js new file mode 100644 index 000000000..b9d0196f7 --- /dev/null +++ b/config_app/static/lib/angular-file-upload.min.js @@ -0,0 +1,2 @@ +/*! 1.4.0 */ +!function(){var a=angular.module("angularFileUpload",[]);a.service("$upload",["$http","$timeout",function(a,b){function c(c){c.method=c.method||"POST",c.headers=c.headers||{},c.transformRequest=c.transformRequest||function(b,c){return window.ArrayBuffer&&b instanceof window.ArrayBuffer?b:a.defaults.transformRequest[0](b,c)},window.XMLHttpRequest.__isShim&&(c.headers.__setXHR_=function(){return function(a){a&&(c.__XHR=a,c.xhrFn&&c.xhrFn(a),a.upload.addEventListener("progress",function(a){c.progress&&b(function(){c.progress&&c.progress(a)})},!1),a.upload.addEventListener("load",function(a){a.lengthComputable&&c.progress&&c.progress(a)},!1))}});var d=a(c);return d.progress=function(a){return c.progress=a,d},d.abort=function(){return c.__XHR&&b(function(){c.__XHR.abort()}),d},d.xhr=function(a){return c.xhrFn=a,d},d.then=function(a,b){return function(d,e,f){c.progress=f||c.progress;var g=b.apply(a,[d,e,f]);return g.abort=a.abort,g.progress=a.progress,g.xhr=a.xhr,g.then=a.then,g}}(d,d.then),d}this.upload=function(b){b.headers=b.headers||{},b.headers["Content-Type"]=void 0,b.transformRequest=b.transformRequest||a.defaults.transformRequest;var d=new FormData,e=b.transformRequest,f=b.data;return b.transformRequest=function(a,c){if(f)if(b.formDataAppender)for(var d in f){var g=f[d];b.formDataAppender(a,d,g)}else for(var d in f){var g=f[d];if("function"==typeof e)g=e(g,c);else for(var h=0;h0||navigator.msMaxTouchPoints>0)&&d.bind("touchend",function(a){a.preventDefault(),a.target.click()})}}]),a.directive("ngFileDropAvailable",["$parse","$timeout",function(a,b){return function(c,d,e){if("draggable"in document.createElement("span")){var f=a(e.ngFileDropAvailable);b(function(){f(c)})}}}]),a.directive("ngFileDrop",["$parse","$timeout",function(a,b){return function(c,d,e){function f(a,b){if(b.isDirectory){var c=b.createReader();i++,c.readEntries(function(b){for(var c=0;c0&&j[0].webkitGetAsEntry)for(var k=0;k - - - Config app - - -
-

What is my purpose

-

You make tarballs

-
- - diff --git a/config_app/templates/index.html b/config_app/templates/index.html new file mode 100644 index 000000000..7a40e3d30 --- /dev/null +++ b/config_app/templates/index.html @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {% for script_path in main_scripts %} + + {% endfor %} + Config app + + +
+

What is my purpose

+

You make tarballs

+
+ +
+
+
+ + diff --git a/config_app/util/__init__.py b/config_app/util/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/config_app/util/baseprovider.py b/config_app/util/baseprovider.py new file mode 100644 index 000000000..5a616895f --- /dev/null +++ b/config_app/util/baseprovider.py @@ -0,0 +1,128 @@ +import logging +import yaml + +from abc import ABCMeta, abstractmethod +from six import add_metaclass + +from jsonschema import validate, ValidationError + +from util.config.schema import CONFIG_SCHEMA + +logger = logging.getLogger(__name__) + + +class CannotWriteConfigException(Exception): + """ Exception raised when the config cannot be written. """ + pass + + +class SetupIncompleteException(Exception): + """ Exception raised when attempting to verify config that has not yet been setup. """ + pass + + +def import_yaml(config_obj, config_file): + with open(config_file) as f: + c = yaml.safe_load(f) + if not c: + logger.debug('Empty YAML config file') + return + + if isinstance(c, str): + raise Exception('Invalid YAML config file: ' + str(c)) + + for key in c.iterkeys(): + if key.isupper(): + config_obj[key] = c[key] + + if config_obj.get('SETUP_COMPLETE', True): + try: + validate(config_obj, CONFIG_SCHEMA) + except ValidationError: + # TODO: Change this into a real error + logger.exception('Could not validate config schema') + else: + logger.debug('Skipping config schema validation because setup is not complete') + + return config_obj + + +def get_yaml(config_obj): + return yaml.safe_dump(config_obj, encoding='utf-8', allow_unicode=True) + + +def export_yaml(config_obj, config_file): + try: + with open(config_file, 'w') as f: + f.write(get_yaml(config_obj)) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + +@add_metaclass(ABCMeta) +class BaseProvider(object): + """ A configuration provider helps to load, save, and handle config override in the application. + """ + + @property + def provider_id(self): + raise NotImplementedError + + @abstractmethod + def update_app_config(self, app_config): + """ Updates the given application config object with the loaded override config. """ + + @abstractmethod + def get_config(self): + """ Returns the contents of the config override file, or None if none. """ + + @abstractmethod + def save_config(self, config_object): + """ Updates the contents of the config override file to those given. """ + + @abstractmethod + def config_exists(self): + """ Returns true if a config override file exists in the config volume. """ + + @abstractmethod + def volume_exists(self): + """ Returns whether the config override volume exists. """ + + @abstractmethod + def volume_file_exists(self, filename): + """ Returns whether the file with the given name exists under the config override volume. """ + + @abstractmethod + def get_volume_file(self, filename, mode='r'): + """ Returns a Python file referring to the given name under the config override volume. """ + + @abstractmethod + def write_volume_file(self, filename, contents): + """ Writes the given contents to the config override volumne, with the given filename. """ + + @abstractmethod + def remove_volume_file(self, filename): + """ Removes the config override volume file with the given filename. """ + + @abstractmethod + def list_volume_directory(self, path): + """ Returns a list of strings representing the names of the files found in the config override + directory under the given path. If the path doesn't exist, returns None. + """ + + @abstractmethod + def save_volume_file(self, filename, flask_file): + """ Saves the given flask file to the config override volume, with the given + filename. + """ + + @abstractmethod + def requires_restart(self, app_config): + """ If true, the configuration loaded into memory for the app does not match that on disk, + indicating that this container requires a restart. + """ + + @abstractmethod + def get_volume_path(self, directory, filename): + """ Helper for constructing file paths, which may differ between providers. For example, + kubernetes can't have subfolders in configmaps """ diff --git a/config_app/util/config.py b/config_app/util/config.py new file mode 100644 index 000000000..f01e2565d --- /dev/null +++ b/config_app/util/config.py @@ -0,0 +1,21 @@ +import os +from util.config.provider import TestConfigProvider, KubernetesConfigProvider, FileConfigProvider + +ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/")) +OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/') + + +def get_config_provider(config_volume, yaml_filename, py_filename, testing=False, kubernetes=False): + """ Loads and returns the config provider for the current environment. """ + if testing: + return TestConfigProvider() + + if kubernetes: + return KubernetesConfigProvider(config_volume, yaml_filename, py_filename) + + return FileConfigProvider(config_volume, yaml_filename, py_filename) + + +config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', + testing=False, kubernetes=False) \ No newline at end of file diff --git a/config_app/util/fileprovider.py b/config_app/util/fileprovider.py new file mode 100644 index 000000000..ac2ceec40 --- /dev/null +++ b/config_app/util/fileprovider.py @@ -0,0 +1,60 @@ +import os +import logging + +from util.config.provider.baseprovider import export_yaml, CannotWriteConfigException +from util.config.provider.basefileprovider import BaseFileProvider + + +logger = logging.getLogger(__name__) + + +def _ensure_parent_dir(filepath): + """ Ensures that the parent directory of the given file path exists. """ + try: + parentpath = os.path.abspath(os.path.join(filepath, os.pardir)) + if not os.path.isdir(parentpath): + os.makedirs(parentpath) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + +class FileConfigProvider(BaseFileProvider): + """ Implementation of the config provider that reads and writes the data + from/to the file system. """ + def __init__(self, config_volume, yaml_filename, py_filename): + super(FileConfigProvider, self).__init__(config_volume, yaml_filename, py_filename) + + @property + def provider_id(self): + return 'file' + + def save_config(self, config_obj): + export_yaml(config_obj, self.yaml_path) + + def write_volume_file(self, filename, contents): + filepath = os.path.join(self.config_volume, filename) + _ensure_parent_dir(filepath) + + try: + with open(filepath, mode='w') as f: + f.write(contents) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + return filepath + + def remove_volume_file(self, filename): + filepath = os.path.join(self.config_volume, filename) + os.remove(filepath) + + def save_volume_file(self, filename, flask_file): + filepath = os.path.join(self.config_volume, filename) + _ensure_parent_dir(filepath) + + # Write the file. + try: + flask_file.save(filepath) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + return filepath diff --git a/config_app/web.py b/config_app/web.py index c98239f38..967a4a11a 100644 --- a/config_app/web.py +++ b/config_app/web.py @@ -1,6 +1,8 @@ from app import app as application from config_endpoints.setup_web import setup_web +from config_endpoints.api import api_bp -application.register_blueprint(setup_web) +# application.register_blueprint(setup_web) +application.register_blueprint(api_bp, url_prefix='/api') diff --git a/config_app/webpack.config.js b/config_app/webpack.config.js new file mode 100644 index 000000000..4b52f243e --- /dev/null +++ b/config_app/webpack.config.js @@ -0,0 +1,60 @@ +const webpack = require('webpack'); +const path = require('path'); + +let config = { + entry: { + configapp: "./js/main.ts" + }, + output: { + path: path.resolve(__dirname, "static/build"), + filename: '[name]-quay-frontend.bundle.js', + }, + resolve: { + extensions: [".ts", ".js"], + modules: [ + // Allows us to use the top-level node modules + path.resolve(__dirname, '../node_modules'), + ] + }, + externals: { + angular: "angular", + jquery: "$", + // moment: "moment", + // "raven-js": "Raven", + }, + module: { + rules: [ + { + test: /\.ts$/, + use: ["ts-loader"], + exclude: /node_modules/ + }, + { + test: /\.css$/, + use: [ + "style-loader", + "css-loader?minimize=true", + ], + }, + { + test: /\.html$/, + use: [ + 'ngtemplate-loader?relativeTo=' + (path.resolve(__dirname)), + 'html-loader', + ] + }, + ] + }, + plugins: [ + // Replace references to global variables with associated modules + new webpack.ProvidePlugin({ + FileSaver: 'file-saver', + angular: "angular", + $: "jquery", + // moment: "moment", + }), + ], + devtool: "cheap-module-source-map", +}; + +module.exports = config; diff --git a/endpoints/common.py b/endpoints/common.py index a5113ca79..82040bb06 100644 --- a/endpoints/common.py +++ b/endpoints/common.py @@ -24,7 +24,7 @@ from _init import __version__ logger = logging.getLogger(__name__) -JS_BUNDLE_NAME = 'main' +JS_BUNDLE_NAME = 'bundle' def common_login(user_uuid, permanent_session=True): @@ -73,9 +73,9 @@ def _list_files(path, extension, contains=""): return [join_path(dp, f) for dp, _, files in os.walk(filepath) for f in files if matches(f)] -def render_page_template(name, route_data=None, js_bundle_name=JS_BUNDLE_NAME, **kwargs): +def render_page_template(name, route_data=None, **kwargs): """ Renders the page template with the given name as the response and returns its contents. """ - main_scripts = _list_files('build', 'js', js_bundle_name) + main_scripts = _list_files('build', 'js', JS_BUNDLE_NAME) use_cdn = app.config.get('USE_CDN', True) if request.args.get('use_cdn') is not None: diff --git a/local-config-app.sh b/local-config-app.sh index 6dc723670..e2d63562e 100755 --- a/local-config-app.sh +++ b/local-config-app.sh @@ -1,11 +1,11 @@ #!/usr/bin/env bash cat << "EOF" __ __ - / \ / \ ______ _ _ __ __ __ - / /\ / /\ \ / __ \ | | | | / \ \ \ / / -/ / / / \ \ | | | | | | | | / /\ \ \ / -\ \ \ \ / / | |__| | | |__| | / ____ \ | | - \ \/ \ \/ / \_ ___/ \____/ /_/ \_\ |_| + / \ / \ ______ _ _ __ __ __ _____ ____ _ _ _____ _____ _____ + / /\ / /\ \ / __ \ | | | | / \ \ \ / / / ____| / __ \ | \ | | | ___| |_ _| / ____| +/ / / / \ \ | | | | | | | | / /\ \ \ / | | | | | | | \| | | |__ | | | | _ +\ \ \ \ / / | |__| | | |__| | / ____ \ | | | |____ | |__| | | . ` | | __| _| |_ | |__| | + \ \/ \ \/ / \_ ___/ \____/ /_/ \_\ |_| \_____| \____/ |_| \_| |_| |_____| \_____| \__/ \__/ \ \__ \___\ by CoreOS diff --git a/package.json b/package.json index 5ed245cb3..1a084ab37 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,10 @@ "watch": "npm run clean && webpack --watch", "lint": "tslint --type-check -p tsconfig.json -e **/*.spec.ts", "analyze": "NODE_ENV=production webpack --profile --json | awk '{if(NR>1)print}' > static/build/stats.json && webpack-bundle-analyzer --mode static -r static/build/report.html static/build/stats.json", - "clean": "rm -f static/build/*" + "clean": "rm -f static/build/*", + + "clean-config-app": "rm -f config_app/static/build/*", + "watch-config-app": "npm run clean-config-app && cd config_app && webpack --watch" }, "repository": { "type": "git", diff --git a/static/configappjs/index.js b/static/configappjs/index.js deleted file mode 100644 index 9bac6fe97..000000000 --- a/static/configappjs/index.js +++ /dev/null @@ -1,5 +0,0 @@ - - -const setupPage = ''; -angular.module('quay', [setupPage]); -console.log('Hello world! I\'m the config app'); \ No newline at end of file diff --git a/web.py b/web.py index ed2ef24b6..fd3b1768e 100644 --- a/web.py +++ b/web.py @@ -11,15 +11,7 @@ from endpoints.webhooks import webhooks from endpoints.wellknown import wellknown -import os -is_config_mode = 'FLAGGED_CONFIG_MODE' in os.environ -print('\n\n\nAre we in config mode?') -print(is_config_mode) - - application.register_blueprint(web) - - application.register_blueprint(githubtrigger, url_prefix='/oauth2') application.register_blueprint(gitlabtrigger, url_prefix='/oauth2') application.register_blueprint(oauthlogin, url_prefix='/oauth2') diff --git a/webpack.config.js b/webpack.config.js index 51df57480..6944a3a64 100644 --- a/webpack.config.js +++ b/webpack.config.js @@ -3,10 +3,7 @@ const path = require('path'); let config = { - entry: { - main: "./static/js/main.ts", - configapp: "./static/configappjs/index.js" - }, + entry: "./static/js/main.ts", output: { path: path.resolve(__dirname, "static/build"), publicPath: "/static/build/", From c378e408efe43b86baf6ff275d0dcc901471458e Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Mon, 21 May 2018 17:02:38 -0400 Subject: [PATCH 06/14] Add some basic endpoints to the config app backend rename files to avoid overlap with quay app --- config_app/Procfile | 2 +- config_app/_init_config.py | 39 + config_app/app.py | 4 - config_app/conf/gunicorn_local.py | 6 +- config_app/conf/logging.conf | 36 + config_app/conf/logging_debug.conf | 41 + config_app/conf/logging_debug_json.conf | 41 + config_app/conf/logging_json.conf | 36 + config_app/config_app.py | 31 + config_app/config_app_config.py | 544 +++++++++++ .../{application.py => config_application.py} | 4 +- config_app/config_endpoints/api/__init__.py | 90 +- config_app/config_endpoints/api/discovery.py | 252 +++++ config_app/config_endpoints/api/suconfig.py | 87 ++ config_app/config_endpoints/api/superuser.py | 151 +++ config_app/config_endpoints/api/user.py | 18 + config_app/config_endpoints/common.py | 249 ----- config_app/config_endpoints/exception.py | 123 +++ config_app/config_endpoints/setup_web.py | 16 +- config_app/config_test/testconfig.py | 108 +++ config_app/{util => config_util}/__init__.py | 0 config_app/config_util/config/__init__.py | 16 + .../config_util/config/basefileprovider.py | 71 ++ .../config}/baseprovider.py | 2 +- .../config}/fileprovider.py | 4 +- config_app/config_util/config/k8sprovider.py | 170 ++++ config_app/config_util/config/schema.py | 914 ++++++++++++++++++ config_app/config_util/config/testprovider.py | 81 ++ config_app/config_util/log.py | 47 + config_app/config_util/ssl.py | 81 ++ config_app/config_util/workers.py | 32 + config_app/{web.py => config_web.py} | 6 +- config_app/js/config-app.module.ts | 2 - .../js/core-config-setup/core-config-setup.js | 12 +- config_app/js/main.ts | 3 - config_app/js/services/api-service.js | 1 - config_app/js/services/user-service.js | 24 - config_app/loghandler_config.py | 114 +++ config_app/util/config.py | 21 - 39 files changed, 3095 insertions(+), 384 deletions(-) create mode 100644 config_app/_init_config.py delete mode 100644 config_app/app.py create mode 100644 config_app/conf/logging.conf create mode 100644 config_app/conf/logging_debug.conf create mode 100644 config_app/conf/logging_debug_json.conf create mode 100644 config_app/conf/logging_json.conf create mode 100644 config_app/config_app.py create mode 100644 config_app/config_app_config.py rename config_app/{application.py => config_application.py} (78%) create mode 100644 config_app/config_endpoints/api/discovery.py create mode 100644 config_app/config_endpoints/api/suconfig.py create mode 100644 config_app/config_endpoints/api/superuser.py create mode 100644 config_app/config_endpoints/api/user.py create mode 100644 config_app/config_endpoints/exception.py create mode 100644 config_app/config_test/testconfig.py rename config_app/{util => config_util}/__init__.py (100%) create mode 100644 config_app/config_util/config/__init__.py create mode 100644 config_app/config_util/config/basefileprovider.py rename config_app/{util => config_util/config}/baseprovider.py (98%) rename config_app/{util => config_util/config}/fileprovider.py (91%) create mode 100644 config_app/config_util/config/k8sprovider.py create mode 100644 config_app/config_util/config/schema.py create mode 100644 config_app/config_util/config/testprovider.py create mode 100644 config_app/config_util/log.py create mode 100644 config_app/config_util/ssl.py create mode 100644 config_app/config_util/workers.py rename config_app/{web.py => config_web.py} (64%) create mode 100755 config_app/loghandler_config.py delete mode 100644 config_app/util/config.py diff --git a/config_app/Procfile b/config_app/Procfile index 0ea2ba9c6..ac20de89b 100644 --- a/config_app/Procfile +++ b/config_app/Procfile @@ -1,3 +1,3 @@ -app: PYTHONPATH="../" gunicorn -c conf/gunicorn_local.py application:application +app: PYTHONPATH="./" gunicorn -c conf/gunicorn_local.py config_application:application # webpack: npm run watch-config-app diff --git a/config_app/_init_config.py b/config_app/_init_config.py new file mode 100644 index 000000000..494edad57 --- /dev/null +++ b/config_app/_init_config.py @@ -0,0 +1,39 @@ +import os +import re +import subprocess + + +ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/")) +STATIC_DIR = os.path.join(ROOT_DIR, 'static/') +STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/') +STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/') +TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/') + + +# TODO(config): Remove this external folder dependency +EXTERNAL_REPO_REQUIRE_PATH = os.path.dirname(ROOT_DIR) + + +def _get_version_number_changelog(): + try: + with open(os.path.join(ROOT_DIR, 'CHANGELOG.md')) as f: + return re.search(r'(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0) + except IOError: + return '' + + +def _get_git_sha(): + if os.path.exists("GIT_HEAD"): + with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f: + return f.read() + else: + try: + return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8] + except (OSError, subprocess.CalledProcessError): + pass + return "unknown" + + +__version__ = _get_version_number_changelog() +__gitrev__ = _get_git_sha() diff --git a/config_app/app.py b/config_app/app.py deleted file mode 100644 index 811f51457..000000000 --- a/config_app/app.py +++ /dev/null @@ -1,4 +0,0 @@ -from flask import Flask - -app = Flask(__name__) - diff --git a/config_app/conf/gunicorn_local.py b/config_app/conf/gunicorn_local.py index b33558ef2..96cd7d19c 100644 --- a/config_app/conf/gunicorn_local.py +++ b/config_app/conf/gunicorn_local.py @@ -5,12 +5,12 @@ sys.path.append(os.path.join(os.path.dirname(__file__), "../")) import logging from Crypto import Random -from util.log import logfile_path -from util.workers import get_worker_count +from config_util.log import logfile_path +from config_util.workers import get_worker_count logconfig = logfile_path(debug=True) -bind = '0.0.0.0:5000' +bind = '127.0.0.1:5000' workers = get_worker_count('local', 2, minimum=2, maximum=8) worker_class = 'gevent' daemon = False diff --git a/config_app/conf/logging.conf b/config_app/conf/logging.conf new file mode 100644 index 000000000..885678395 --- /dev/null +++ b/config_app/conf/logging.conf @@ -0,0 +1,36 @@ +[loggers] +keys=root,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=generic,json + +[logger_root] +level=INFO +handlers=console + +[handler_console] +class=StreamHandler +formatter=generic +args=(sys.stdout, ) + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[formatter_json] +class=loghandler_config.JsonFormatter + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG diff --git a/config_app/conf/logging_debug.conf b/config_app/conf/logging_debug.conf new file mode 100644 index 000000000..1f1bb2c63 --- /dev/null +++ b/config_app/conf/logging_debug.conf @@ -0,0 +1,41 @@ +[loggers] +keys=root,boto,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=generic,json + +[logger_root] +level=DEBUG +handlers=console + +[logger_boto] +level=INFO +handlers=console +qualname=boto + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG + +[handler_console] +class=StreamHandler +formatter=generic +args=(sys.stdout, ) + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[formatter_json] +class=loghandler_config.JsonFormatter diff --git a/config_app/conf/logging_debug_json.conf b/config_app/conf/logging_debug_json.conf new file mode 100644 index 000000000..382f882d1 --- /dev/null +++ b/config_app/conf/logging_debug_json.conf @@ -0,0 +1,41 @@ +[loggers] +keys=root,boto,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=generic,json + +[logger_root] +level=DEBUG +handlers=console + +[logger_boto] +level=INFO +handlers=console +qualname=boto + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG + +[handler_console] +class=StreamHandler +formatter=json +args=(sys.stdout, ) + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[formatter_json] +class=loghandler_config.JsonFormatter diff --git a/config_app/conf/logging_json.conf b/config_app/conf/logging_json.conf new file mode 100644 index 000000000..cccdcf832 --- /dev/null +++ b/config_app/conf/logging_json.conf @@ -0,0 +1,36 @@ +[loggers] +keys=root,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=json,generic + +[logger_root] +level=INFO +handlers=console + +[handler_console] +class=StreamHandler +formatter=json +args=(sys.stdout, ) + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[formatter_json] +class=loghandler_config.JsonFormatter + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG diff --git a/config_app/config_app.py b/config_app/config_app.py new file mode 100644 index 000000000..616ecc157 --- /dev/null +++ b/config_app/config_app.py @@ -0,0 +1,31 @@ +import os +import logging +from flask import Flask +from _init_config import CONF_DIR +from config_util.config import get_config_provider + +app = Flask(__name__) + +logger = logging.getLogger(__name__) + +OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/') + + +is_testing = 'TEST' in os.environ +is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ + +config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config_app_config.py', + testing=is_testing, kubernetes=is_kubernetes) + +if is_testing: + from config_test.testconfig import TestConfig + logger.debug('Loading test config.') + app.config.from_object(TestConfig()) +else: + from config_app_config import DefaultConfig + logger.debug('Loading default config.') + app.config.from_object(DefaultConfig()) + # app.teardown_request(database.close_db_filter) + +# Load the override config via the provider. +config_provider.update_app_config(app.config) diff --git a/config_app/config_app_config.py b/config_app/config_app_config.py new file mode 100644 index 000000000..2b8204115 --- /dev/null +++ b/config_app/config_app_config.py @@ -0,0 +1,544 @@ +from uuid import uuid4 + +import os.path +import requests + +from _init_config import ROOT_DIR, CONF_DIR, EXTERNAL_REPO_REQUIRE_PATH + + +def build_requests_session(): + sess = requests.Session() + adapter = requests.adapters.HTTPAdapter(pool_connections=100, + pool_maxsize=100) + sess.mount('http://', adapter) + sess.mount('https://', adapter) + return sess + + +# The set of configuration key names that will be accessible in the client. Since these +# values are sent to the frontend, DO NOT PLACE ANY SECRETS OR KEYS in this list. +CLIENT_WHITELIST = ['SERVER_HOSTNAME', 'PREFERRED_URL_SCHEME', 'MIXPANEL_KEY', + 'STRIPE_PUBLISHABLE_KEY', 'ENTERPRISE_LOGO_URL', 'SENTRY_PUBLIC_DSN', + 'AUTHENTICATION_TYPE', 'REGISTRY_TITLE', 'REGISTRY_TITLE_SHORT', + 'CONTACT_INFO', 'AVATAR_KIND', 'LOCAL_OAUTH_HANDLER', 'DOCUMENTATION_LOCATION', + 'DOCUMENTATION_METADATA', 'SETUP_COMPLETE', 'DEBUG', 'MARKETO_MUNCHKIN_ID', + 'STATIC_SITE_BUCKET', 'RECAPTCHA_SITE_KEY', 'CHANNEL_COLORS', + 'TAG_EXPIRATION_OPTIONS', 'INTERNAL_OIDC_SERVICE_ID', + 'SEARCH_RESULTS_PER_PAGE', 'SEARCH_MAX_RESULT_PAGE_COUNT'] + + +def frontend_visible_config(config_dict): + visible_dict = {} + for name in CLIENT_WHITELIST: + if name.lower().find('secret') >= 0: + raise Exception('Cannot whitelist secrets: %s' % name) + + if name in config_dict: + visible_dict[name] = config_dict.get(name, None) + + return visible_dict + + +# Configuration that should not be changed by end users +class ImmutableConfig(object): + + # Requests based HTTP client with a large request pool + HTTPCLIENT = build_requests_session() + + # Status tag config + STATUS_TAGS = {} + for tag_name in ['building', 'failed', 'none', 'ready', 'cancelled']: + tag_path = os.path.join(EXTERNAL_REPO_REQUIRE_PATH, 'buildstatus', tag_name + '.svg') + with open(tag_path) as tag_svg: + STATUS_TAGS[tag_name] = tag_svg.read() + + # Reverse DNS prefixes that are reserved for internal use on labels and should not be allowable + # to be set via the API. + DEFAULT_LABEL_KEY_RESERVED_PREFIXES = ['com.docker.', 'io.docker.', 'org.dockerproject.', + 'org.opencontainers.', 'io.cncf.', + 'io.kubernetes.', 'io.k8s.', + 'io.quay', 'com.coreos', 'com.tectonic', + 'internal', 'quay'] + + # Colors for local avatars. + AVATAR_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', + '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', + '#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79', + '#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b', + '#8c6d31', '#ad494a', '#e7ba52', '#a55194'] + + # Colors for channels. + CHANNEL_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', + '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', + '#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79', + '#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b', + '#8c6d31', '#ad494a', '#e7ba52', '#a55194'] + + PROPAGATE_EXCEPTIONS = True + + +class DefaultConfig(ImmutableConfig): + # Flask config + JSONIFY_PRETTYPRINT_REGULAR = False + SESSION_COOKIE_SECURE = False + + LOGGING_LEVEL = 'DEBUG' + SEND_FILE_MAX_AGE_DEFAULT = 0 + PREFERRED_URL_SCHEME = 'http' + SERVER_HOSTNAME = 'localhost:5000' + + REGISTRY_TITLE = 'Quay Enterprise' + REGISTRY_TITLE_SHORT = 'Quay Enterprise' + + CONTACT_INFO = [ + 'mailto:support@quay.io', + 'irc://chat.freenode.net:6665/quay', + 'tel:+1-888-930-3475', + 'https://twitter.com/quayio', + ] + + # Mail config + MAIL_SERVER = '' + MAIL_USE_TLS = True + MAIL_PORT = 587 + MAIL_USERNAME = None + MAIL_PASSWORD = None + MAIL_DEFAULT_SENDER = 'support@quay.io' + MAIL_FAIL_SILENTLY = False + TESTING = True + + # DB config + DB_URI = 'sqlite:///test/data/test.db' + DB_CONNECTION_ARGS = { + 'threadlocals': True, + 'autorollback': True, + } + + @staticmethod + def create_transaction(db): + return db.transaction() + + DB_TRANSACTION_FACTORY = create_transaction + + # If set to true, TLS is used, but is terminated by an external service (such as a load balancer). + # Note that PREFERRED_URL_SCHEME must be `https` when this flag is set or it can lead to undefined + # behavior. + EXTERNAL_TLS_TERMINATION = False + + # If true, CDN URLs will be used for our external dependencies, rather than the local + # copies. + USE_CDN = False + + # Authentication + AUTHENTICATION_TYPE = 'Database' + + # Build logs + BUILDLOGS_REDIS = {'host': 'localhost'} + BUILDLOGS_OPTIONS = [] + + # Real-time user events + USER_EVENTS_REDIS = {'host': 'localhost'} + + # Stripe config + BILLING_TYPE = 'FakeStripe' + + # Analytics + ANALYTICS_TYPE = 'FakeAnalytics' + + # Build Queue Metrics + QUEUE_METRICS_TYPE = 'Null' + QUEUE_WORKER_METRICS_REFRESH_SECONDS = 300 + + # Exception logging + EXCEPTION_LOG_TYPE = 'FakeSentry' + SENTRY_DSN = None + SENTRY_PUBLIC_DSN = None + + # Github Config + GITHUB_LOGIN_CONFIG = None + GITHUB_TRIGGER_CONFIG = None + + # Google Config. + GOOGLE_LOGIN_CONFIG = None + + # Bitbucket Config. + BITBUCKET_TRIGGER_CONFIG = None + + # Gitlab Config. + GITLAB_TRIGGER_CONFIG = None + + NOTIFICATION_QUEUE_NAME = 'notification' + DOCKERFILE_BUILD_QUEUE_NAME = 'dockerfilebuild' + REPLICATION_QUEUE_NAME = 'imagestoragereplication' + SECSCAN_NOTIFICATION_QUEUE_NAME = 'security_notification' + CHUNK_CLEANUP_QUEUE_NAME = 'chunk_cleanup' + NAMESPACE_GC_QUEUE_NAME = 'namespacegc' + + # Super user config. Note: This MUST BE an empty list for the default config. + SUPER_USERS = [] + + # Feature Flag: Whether sessions are permanent. + FEATURE_PERMANENT_SESSIONS = True + + # Feature Flag: Whether super users are supported. + FEATURE_SUPER_USERS = True + + # Feature Flag: Whether to allow anonymous users to browse and pull public repositories. + FEATURE_ANONYMOUS_ACCESS = True + + # Feature Flag: Whether billing is required. + FEATURE_BILLING = False + + # Feature Flag: Whether user accounts automatically have usage log access. + FEATURE_USER_LOG_ACCESS = False + + # Feature Flag: Whether GitHub login is supported. + FEATURE_GITHUB_LOGIN = False + + # Feature Flag: Whether Google login is supported. + FEATURE_GOOGLE_LOGIN = False + + # Feature Flag: whether to enable support chat + FEATURE_SUPPORT_CHAT = False + + # Feature Flag: Whether to support GitHub build triggers. + FEATURE_GITHUB_BUILD = False + + # Feature Flag: Whether to support Bitbucket build triggers. + FEATURE_BITBUCKET_BUILD = False + + # Feature Flag: Whether to support GitLab build triggers. + FEATURE_GITLAB_BUILD = False + + # Feature Flag: Dockerfile build support. + FEATURE_BUILD_SUPPORT = True + + # Feature Flag: Whether emails are enabled. + FEATURE_MAILING = True + + # Feature Flag: Whether users can be created (by non-super users). + FEATURE_USER_CREATION = True + + # Feature Flag: Whether users being created must be invited by another user. If FEATURE_USER_CREATION is off, + # this flag has no effect. + FEATURE_INVITE_ONLY_USER_CREATION = False + + # Feature Flag: Whether users can be renamed + FEATURE_USER_RENAME = False + + # Feature Flag: Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for + # basic auth. + FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH = False + + # Feature Flag: Whether to automatically replicate between storage engines. + FEATURE_STORAGE_REPLICATION = False + + # Feature Flag: Whether users can directly login to the UI. + FEATURE_DIRECT_LOGIN = True + + # Feature Flag: Whether the v2/ endpoint is visible + FEATURE_ADVERTISE_V2 = True + + # Semver spec for which Docker versions we will blacklist + # Documentation: http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec + BLACKLIST_V2_SPEC = '<1.6.0' + + # Feature Flag: Whether or not to rotate old action logs to storage. + FEATURE_ACTION_LOG_ROTATION = False + + # Feature Flag: Whether to enable conversion to ACIs. + FEATURE_ACI_CONVERSION = False + + # Feature Flag: Whether to allow for "namespace-less" repositories when pulling and pushing from + # Docker. + FEATURE_LIBRARY_SUPPORT = True + + # Feature Flag: Whether to require invitations when adding a user to a team. + FEATURE_REQUIRE_TEAM_INVITE = True + + # Feature Flag: Whether to proxy all direct download URLs in storage via the registry's nginx. + FEATURE_PROXY_STORAGE = False + + # Feature Flag: Whether to collect and support user metadata. + FEATURE_USER_METADATA = False + + # Feature Flag: Whether to support signing + FEATURE_SIGNING = False + + # Feature Flag: Whether to enable support for App repositories. + FEATURE_APP_REGISTRY = False + + # Feature Flag: If set to true, the _catalog endpoint returns public repositories. Otherwise, + # only private repositories can be returned. + FEATURE_PUBLIC_CATALOG = False + + # Feature Flag: If set to true, build logs may be read by those with read access to the repo, + # rather than only write access or admin access. + FEATURE_READER_BUILD_LOGS = False + + # Feature Flag: If set to true, autocompletion will apply to partial usernames. + FEATURE_PARTIAL_USER_AUTOCOMPLETE = True + + # If a namespace is defined in the public namespace list, then it will appear on *all* + # user's repository list pages, regardless of whether that user is a member of the namespace. + # Typically, this is used by an enterprise customer in configuring a set of "well-known" + # namespaces. + PUBLIC_NAMESPACES = [] + + # The namespace to use for library repositories. + # Note: This must remain 'library' until Docker removes their hard-coded namespace for libraries. + # See: https://github.com/docker/docker/blob/master/registry/session.go#L320 + LIBRARY_NAMESPACE = 'library' + + BUILD_MANAGER = ('enterprise', {}) + + DISTRIBUTED_STORAGE_CONFIG = { + 'local_eu': ['LocalStorage', {'storage_path': 'test/data/registry/eu'}], + 'local_us': ['LocalStorage', {'storage_path': 'test/data/registry/us'}], + } + + DISTRIBUTED_STORAGE_PREFERENCE = ['local_us'] + DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS = ['local_us'] + + # Health checker. + HEALTH_CHECKER = ('LocalHealthCheck', {}) + + # Userfiles + USERFILES_LOCATION = 'local_us' + USERFILES_PATH = 'userfiles/' + + # Build logs archive + LOG_ARCHIVE_LOCATION = 'local_us' + LOG_ARCHIVE_PATH = 'logarchive/' + + # Action logs archive + ACTION_LOG_ARCHIVE_LOCATION = 'local_us' + ACTION_LOG_ARCHIVE_PATH = 'actionlogarchive/' + + # System logs. + SYSTEM_LOGS_PATH = "/var/log/" + SYSTEM_LOGS_FILE = "/var/log/syslog" + SYSTEM_SERVICES_PATH = os.path.join(CONF_DIR, "init/service/") + + # Allow registry pulls when unable to write to the audit log + ALLOW_PULLS_WITHOUT_STRICT_LOGGING = False + + # Services that should not be shown in the logs view. + SYSTEM_SERVICE_BLACKLIST = [] + + # Temporary tag expiration in seconds, this may actually be longer based on GC policy + PUSH_TEMP_TAG_EXPIRATION_SEC = 60 * 60 # One hour per layer + + # Signed registry grant token expiration in seconds + SIGNED_GRANT_EXPIRATION_SEC = 60 * 60 * 24 # One day to complete a push/pull + + # Registry v2 JWT Auth config + REGISTRY_JWT_AUTH_MAX_FRESH_S = 60 * 60 + 60 # At most signed one hour, accounting for clock skew + + # The URL endpoint to which we redirect OAuth when generating a token locally. + LOCAL_OAUTH_HANDLER = '/oauth/localapp' + + # The various avatar background colors. + AVATAR_KIND = 'local' + + # The location of the Quay documentation. + DOCUMENTATION_LOCATION = 'http://docs.quay.io' + DOCUMENTATION_METADATA = 'https://coreos.github.io/quay-docs/search.json' + + # How often the Garbage Collection worker runs. + GARBAGE_COLLECTION_FREQUENCY = 30 # seconds + + # How long notifications will try to send before timing out. + NOTIFICATION_SEND_TIMEOUT = 10 + + # Security scanner + FEATURE_SECURITY_SCANNER = False + FEATURE_SECURITY_NOTIFICATIONS = False + + # The endpoint for the security scanner. + SECURITY_SCANNER_ENDPOINT = 'http://192.168.99.101:6060' + + # The number of seconds between indexing intervals in the security scanner + SECURITY_SCANNER_INDEXING_INTERVAL = 30 + + # If specified, the security scanner will only index images newer than the provided ID. + SECURITY_SCANNER_INDEXING_MIN_ID = None + + # If specified, the endpoint to be used for all POST calls to the security scanner. + SECURITY_SCANNER_ENDPOINT_BATCH = None + + # If specified, GET requests that return non-200 will be retried at the following instances. + SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS = [] + + # The indexing engine version running inside the security scanner. + SECURITY_SCANNER_ENGINE_VERSION_TARGET = 3 + + # The version of the API to use for the security scanner. + SECURITY_SCANNER_API_VERSION = 'v1' + + # API call timeout for the security scanner. + SECURITY_SCANNER_API_TIMEOUT_SECONDS = 10 + + # POST call timeout for the security scanner. + SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS = 480 + + # The issuer name for the security scanner. + SECURITY_SCANNER_ISSUER_NAME = 'security_scanner' + + # JWTProxy Settings + # The address (sans schema) to proxy outgoing requests through the jwtproxy + # to be signed + JWTPROXY_SIGNER = 'localhost:8080' + + # The audience that jwtproxy should verify on incoming requests + # If None, will be calculated off of the SERVER_HOSTNAME (default) + JWTPROXY_AUDIENCE = None + + # Torrent management flags + FEATURE_BITTORRENT = False + BITTORRENT_PIECE_SIZE = 512 * 1024 + BITTORRENT_ANNOUNCE_URL = 'https://localhost:6881/announce' + BITTORRENT_FILENAME_PEPPER = str(uuid4()) + BITTORRENT_WEBSEED_LIFETIME = 3600 + + # "Secret" key for generating encrypted paging tokens. Only needed to be secret to + # hide the ID range for production (in which this value is overridden). Should *not* + # be relied upon for secure encryption otherwise. + # This value is a Fernet key and should be 32bytes URL-safe base64 encoded. + PAGE_TOKEN_KEY = '0OYrc16oBuksR8T3JGB-xxYSlZ2-7I_zzqrLzggBJ58=' + + # The timeout for service key approval. + UNAPPROVED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 # One day + + # How long to wait before GCing an expired service key. + EXPIRED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 * 7 # One week + + # The ID of the user account in the database to be used for service audit logs. If none, the + # lowest user in the database will be used. + SERVICE_LOG_ACCOUNT_ID = None + + # The service key ID for the instance service. + # NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated. + INSTANCE_SERVICE_KEY_SERVICE = 'quay' + + # The location of the key ID file generated for this instance. + INSTANCE_SERVICE_KEY_KID_LOCATION = os.path.join(CONF_DIR, 'quay.kid') + + # The location of the private key generated for this instance. + # NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated. + INSTANCE_SERVICE_KEY_LOCATION = os.path.join(CONF_DIR, 'quay.pem') + + # This instance's service key expiration in minutes. + INSTANCE_SERVICE_KEY_EXPIRATION = 120 + + # Number of minutes between expiration refresh in minutes. Should be the expiration / 2 minus + # some additional window time. + INSTANCE_SERVICE_KEY_REFRESH = 55 + + # The whitelist of client IDs for OAuth applications that allow for direct login. + DIRECT_OAUTH_CLIENTID_WHITELIST = [] + + # URL that specifies the location of the prometheus stats aggregator. + PROMETHEUS_AGGREGATOR_URL = 'http://localhost:9092' + + # Namespace prefix for all prometheus metrics. + PROMETHEUS_NAMESPACE = 'quay' + + # Overridable list of reverse DNS prefixes that are reserved for internal use on labels. + LABEL_KEY_RESERVED_PREFIXES = [] + + # Delays workers from starting until a random point in time between 0 and their regular interval. + STAGGER_WORKERS = True + + # Location of the static marketing site. + STATIC_SITE_BUCKET = None + + # Site key and secret key for using recaptcha. + FEATURE_RECAPTCHA = False + RECAPTCHA_SITE_KEY = None + RECAPTCHA_SECRET_KEY = None + + # Server where TUF metadata can be found + TUF_SERVER = None + + # Prefix to add to metadata e.g. // + TUF_GUN_PREFIX = None + + # Maximum size allowed for layers in the registry. + MAXIMUM_LAYER_SIZE = '20G' + + # Feature Flag: Whether team syncing from the backing auth is enabled. + FEATURE_TEAM_SYNCING = False + TEAM_RESYNC_STALE_TIME = '30m' + TEAM_SYNC_WORKER_FREQUENCY = 60 # seconds + + # Feature Flag: If enabled, non-superusers can setup team syncing. + FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP = False + + # The default configurable tag expiration time for time machine. + DEFAULT_TAG_EXPIRATION = '2w' + + # The options to present in namespace settings for the tag expiration. If empty, no option + # will be given and the default will be displayed read-only. + TAG_EXPIRATION_OPTIONS = ['0s', '1d', '1w', '2w', '4w'] + + # Feature Flag: Whether users can view and change their tag expiration. + FEATURE_CHANGE_TAG_EXPIRATION = True + + # Defines a secret for enabling the health-check endpoint's debug information. + ENABLE_HEALTH_DEBUG_SECRET = None + + # The lifetime for a user recovery token before it becomes invalid. + USER_RECOVERY_TOKEN_LIFETIME = '30m' + + # If specified, when app specific passwords expire by default. + APP_SPECIFIC_TOKEN_EXPIRATION = None + + # Feature Flag: If enabled, users can create and use app specific tokens to login via the CLI. + FEATURE_APP_SPECIFIC_TOKENS = True + + # How long expired app specific tokens should remain visible to users before being automatically + # deleted. Set to None to turn off garbage collection. + EXPIRED_APP_SPECIFIC_TOKEN_GC = '1d' + + # The size of pages returned by the Docker V2 API. + V2_PAGINATION_SIZE = 50 + + # If enabled, ensures that API calls are made with the X-Requested-With header + # when called from a browser. + BROWSER_API_CALLS_XHR_ONLY = True + + # If set to a non-None integer value, the default number of maximum builds for a namespace. + DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT = None + + # If set to a non-None integer value, the default number of maximum builds for a namespace whose + # creator IP is deemed a threat. + THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT = None + + # For Billing Support Only: The number of allowed builds on a namespace that has been billed + # successfully. + BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT = None + + # Configuration for the data model cache. + DATA_MODEL_CACHE_CONFIG = { + 'engine': 'memcached', + 'endpoint': ('127.0.0.1', 18080), + } + + # Defines the number of successive failures of a build trigger's build before the trigger is + # automatically disabled. + SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD = 100 + + # Defines the number of successive internal errors of a build trigger's build before the + # trigger is automatically disabled. + SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD = 5 + + # Defines the delay required (in seconds) before the last_accessed field of a user/robot or access + # token will be updated after the previous update. + LAST_ACCESSED_UPDATE_THRESHOLD_S = 60 + + # Defines the number of results per page used to show search results + SEARCH_RESULTS_PER_PAGE = 10 + + # Defines the maximum number of pages the user can paginate before they are limited + SEARCH_MAX_RESULT_PAGE_COUNT = 10 diff --git a/config_app/application.py b/config_app/config_application.py similarity index 78% rename from config_app/application.py rename to config_app/config_application.py index 9f4249e00..b408a7984 100644 --- a/config_app/application.py +++ b/config_app/config_application.py @@ -1,7 +1,7 @@ -from app import app as application +from config_app import app as application # Bind all of the blueprints -import web +import config_web if __name__ == '__main__': diff --git a/config_app/config_endpoints/api/__init__.py b/config_app/config_endpoints/api/__init__.py index 67580475a..e220e27ed 100644 --- a/config_app/config_endpoints/api/__init__.py +++ b/config_app/config_endpoints/api/__init__.py @@ -1,14 +1,12 @@ import logging -from config_app import app -from config_app.util.config import config_provider - -from flask import Blueprint, request, session -from flask_restful import Resource, abort, Api, reqparse +from flask import Blueprint +from flask_restful import Resource, Api from flask_restful.utils.cors import crossdomain - +from config_app import app from functools import partial, wraps - +from jsonschema import validate, ValidationError +from config_endpoints.exception import InvalidResponse logger = logging.getLogger(__name__) api_bp = Blueprint('api', __name__) @@ -17,6 +15,8 @@ CROSS_DOMAIN_HEADERS = ['Authorization', 'Content-Type', 'X-Requested-With'] class ApiExceptionHandlingApi(Api): + pass + @crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS) def handle_error(self, error): print('HANDLING ERROR IN API') @@ -25,19 +25,12 @@ class ApiExceptionHandlingApi(Api): api = ApiExceptionHandlingApi() - -class HelloWorld(Resource): - def get(self): - print("hit the dummy endpoint") - return {'hello': 'world'} - - -api.add_resource(HelloWorld, '/') - +api.init_app(api_bp) def verify_not_prod(func): @add_method_metadata('enterprise_only', True) + @wraps(func) def wrapped(*args, **kwargs): # Verify that we are not running on a production (i.e. hosted) stack. If so, we fail. # This should never happen (because of the feature-flag on SUPER_USERS), but we want to be @@ -58,6 +51,7 @@ def resource(*urls, **kwargs): if not api_resource: return None + print('registering resource: ', urls) api_resource.registered = True api.add_resource(api_resource, *urls, **kwargs) return api_resource @@ -105,47 +99,31 @@ def no_cache(f): return add_no_cache +def define_json_response(schema_name): + def wrapper(func): + @add_method_metadata('response_schema', schema_name) + @wraps(func) + def wrapped(self, *args, **kwargs): + schema = self.schemas[schema_name] + resp = func(self, *args, **kwargs) + + if app.config['TESTING']: + try: + validate(resp, schema) + except ValidationError as ex: + raise InvalidResponse(ex.message) + + return resp + return wrapped + return wrapper + + nickname = partial(add_method_metadata, 'nickname') -api.init_app(api_bp) -# api.decorators = [csrf_protect(), -# crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS), -# process_oauth, time_decorator(api_bp.name, metric_queue), -# require_xhr_from_browser] +import config_endpoints.api +import config_endpoints.api.discovery +import config_endpoints.api.suconfig +import config_endpoints.api.superuser +import config_endpoints.api.user - - -@resource('/v1/superuser/config') -class SuperUserConfig(ApiResource): - """ Resource for fetching and updating the current configuration, if any. """ - schemas = { - 'UpdateConfig': { - 'type': 'object', - 'description': 'Updates the YAML config file', - 'required': [ - 'config', - 'hostname' - ], - 'properties': { - 'config': { - 'type': 'object' - }, - 'hostname': { - 'type': 'string' - }, - 'password': { - 'type': 'string' - }, - }, - }, - } - - @verify_not_prod - @nickname('scGetConfig') - def get(self): - """ Returns the currently defined configuration, if any. """ - config_object = config_provider.get_config() - return { - 'config': config_object - } diff --git a/config_app/config_endpoints/api/discovery.py b/config_app/config_endpoints/api/discovery.py new file mode 100644 index 000000000..dda178c62 --- /dev/null +++ b/config_app/config_endpoints/api/discovery.py @@ -0,0 +1,252 @@ +import logging +import sys +from collections import OrderedDict + +from config_app import app +from config_endpoints.api import method_metadata +from config_endpoints.common import fully_qualified_name, PARAM_REGEX, TYPE_CONVERTER + + +logger = logging.getLogger(__name__) + + +def generate_route_data(): + include_internal = True + compact = True + + def swagger_parameter(name, description, kind='path', param_type='string', required=True, + enum=None, schema=None): + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject + parameter_info = { + 'name': name, + 'in': kind, + 'required': required + } + + if schema: + parameter_info['schema'] = { + '$ref': '#/definitions/%s' % schema + } + else: + parameter_info['type'] = param_type + + if enum is not None and len(list(enum)) > 0: + parameter_info['enum'] = list(enum) + + return parameter_info + + paths = {} + models = {} + tags = [] + tags_added = set() + operation_ids = set() + + for rule in app.url_map.iter_rules(): + endpoint_method = app.view_functions[rule.endpoint] + + # Verify that we have a view class for this API method. + if not 'view_class' in dir(endpoint_method): + continue + + view_class = endpoint_method.view_class + + # Hide the class if it is internal. + internal = method_metadata(view_class, 'internal') + if not include_internal and internal: + continue + + # Build the tag. + parts = fully_qualified_name(view_class).split('.') + tag_name = parts[-2] + if not tag_name in tags_added: + tags_added.add(tag_name) + tags.append({ + 'name': tag_name, + 'description': (sys.modules[view_class.__module__].__doc__ or '').strip() + }) + + # Build the Swagger data for the path. + swagger_path = PARAM_REGEX.sub(r'{\2}', rule.rule) + full_name = fully_qualified_name(view_class) + path_swagger = { + 'x-name': full_name, + 'x-path': swagger_path, + 'x-tag': tag_name + } + + related_user_res = method_metadata(view_class, 'related_user_resource') + if related_user_res is not None: + path_swagger['x-user-related'] = fully_qualified_name(related_user_res) + + paths[swagger_path] = path_swagger + + # Add any global path parameters. + param_data_map = view_class.__api_path_params if '__api_path_params' in dir(view_class) else {} + if param_data_map: + path_parameters_swagger = [] + for path_parameter in param_data_map: + description = param_data_map[path_parameter].get('description') + path_parameters_swagger.append(swagger_parameter(path_parameter, description)) + + path_swagger['parameters'] = path_parameters_swagger + + # Add the individual HTTP operations. + method_names = list(rule.methods.difference(['HEAD', 'OPTIONS'])) + for method_name in method_names: + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object + method = getattr(view_class, method_name.lower(), None) + if method is None: + logger.debug('Unable to find method for %s in class %s', method_name, view_class) + continue + + operationId = method_metadata(method, 'nickname') + operation_swagger = { + 'operationId': operationId, + 'parameters': [], + } + + if operationId is None: + continue + + if operationId in operation_ids: + raise Exception('Duplicate operation Id: %s' % operationId) + + operation_ids.add(operationId) + + # Mark the method as internal. + internal = method_metadata(method, 'internal') + if internal is not None: + operation_swagger['x-internal'] = True + + if include_internal: + requires_fresh_login = method_metadata(method, 'requires_fresh_login') + if requires_fresh_login is not None: + operation_swagger['x-requires-fresh-login'] = True + + # Add the path parameters. + if rule.arguments: + for path_parameter in rule.arguments: + description = param_data_map.get(path_parameter, {}).get('description') + operation_swagger['parameters'].append(swagger_parameter(path_parameter, description)) + + # Add the query parameters. + if '__api_query_params' in dir(method): + for query_parameter_info in method.__api_query_params: + name = query_parameter_info['name'] + description = query_parameter_info['help'] + param_type = TYPE_CONVERTER[query_parameter_info['type']] + required = query_parameter_info['required'] + + operation_swagger['parameters'].append( + swagger_parameter(name, description, kind='query', + param_type=param_type, + required=required, + enum=query_parameter_info['choices'])) + + # Add the OAuth security block. + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject + scope = method_metadata(method, 'oauth2_scope') + if scope and not compact: + operation_swagger['security'] = [{'oauth2_implicit': [scope.scope]}] + + # Add the responses block. + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject + response_schema_name = method_metadata(method, 'response_schema') + if not compact: + if response_schema_name: + models[response_schema_name] = view_class.schemas[response_schema_name] + + models['ApiError'] = { + 'type': 'object', + 'properties': { + 'status': { + 'type': 'integer', + 'description': 'Status code of the response.' + }, + 'type': { + 'type': 'string', + 'description': 'Reference to the type of the error.' + }, + 'detail': { + 'type': 'string', + 'description': 'Details about the specific instance of the error.' + }, + 'title': { + 'type': 'string', + 'description': 'Unique error code to identify the type of error.' + }, + 'error_message': { + 'type': 'string', + 'description': 'Deprecated; alias for detail' + }, + 'error_type': { + 'type': 'string', + 'description': 'Deprecated; alias for detail' + } + }, + 'required': [ + 'status', + 'type', + 'title', + ] + } + + responses = { + '400': { + 'description': 'Bad Request', + }, + + '401': { + 'description': 'Session required', + }, + + '403': { + 'description': 'Unauthorized access', + }, + + '404': { + 'description': 'Not found', + }, + } + + for _, body in responses.items(): + body['schema'] = {'$ref': '#/definitions/ApiError'} + + if method_name == 'DELETE': + responses['204'] = { + 'description': 'Deleted' + } + elif method_name == 'POST': + responses['201'] = { + 'description': 'Successful creation' + } + else: + responses['200'] = { + 'description': 'Successful invocation' + } + + if response_schema_name: + responses['200']['schema'] = { + '$ref': '#/definitions/%s' % response_schema_name + } + + operation_swagger['responses'] = responses + + # Add the request block. + request_schema_name = method_metadata(method, 'request_schema') + if request_schema_name and not compact: + models[request_schema_name] = view_class.schemas[request_schema_name] + + operation_swagger['parameters'].append( + swagger_parameter('body', 'Request body contents.', kind='body', + schema=request_schema_name)) + + # Add the operation to the parent path. + if not internal or (internal and include_internal): + path_swagger[method_name.lower()] = operation_swagger + + tags.sort(key=lambda t: t['name']) + paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]['x-tag'])) + + if compact: + return {'paths': paths} diff --git a/config_app/config_endpoints/api/suconfig.py b/config_app/config_endpoints/api/suconfig.py new file mode 100644 index 000000000..02a3cb2d4 --- /dev/null +++ b/config_app/config_endpoints/api/suconfig.py @@ -0,0 +1,87 @@ +import logging + +from config_endpoints.api import resource, ApiResource, verify_not_prod, nickname +from config_app import app, config_provider + +logger = logging.getLogger(__name__) + + +@resource('/v1/superuser/config') +class SuperUserConfig(ApiResource): + """ Resource for fetching and updating the current configuration, if any. """ + schemas = { + 'UpdateConfig': { + 'type': 'object', + 'description': 'Updates the YAML config file', + 'required': [ + 'config', + 'hostname' + ], + 'properties': { + 'config': { + 'type': 'object' + }, + 'hostname': { + 'type': 'string' + }, + 'password': { + 'type': 'string' + }, + }, + }, + } + + @verify_not_prod + @nickname('scGetConfig') + def get(self): + """ Returns the currently defined configuration, if any. """ + config_object = config_provider.get_config() + logger.debug(config_object) + logger.debug(config_provider) + # Todo: do we even need this endpoint? Since we'll be loading the config in browser + return { + 'config': config_object + } + + +@resource('/v1/superuser/registrystatus') +class SuperUserRegistryStatus(ApiResource): + """ Resource for determining the status of the registry, such as if config exists, + if a database is configured, and if it has any defined users. + """ + @nickname('scRegistryStatus') + @verify_not_prod + def get(self): + """ Returns the status of the registry. """ + + # If we have SETUP_COMPLETE, then we're ready to go! + if app.config.get('SETUP_COMPLETE', False): + return { + 'provider_id': config_provider.provider_id, + 'requires_restart': config_provider.requires_restart(app.config), + 'status': 'ready' + } + + # If there is no conf/stack volume, then report that status. + if not config_provider.volume_exists(): + return { + 'status': 'missing-config-dir' + } + + # If there is no config file, we need to setup the database. + if not config_provider.config_exists(): + return { + 'status': 'config-db' + } + + # If the database isn't yet valid, then we need to set it up. + # if not database_is_valid(): + # return { + # 'status': 'setup-db' + # } + # + # return { + # 'status': 'create-superuser' if not database_has_users() else 'config' + # } + + return {} diff --git a/config_app/config_endpoints/api/superuser.py b/config_app/config_endpoints/api/superuser.py new file mode 100644 index 000000000..227257a56 --- /dev/null +++ b/config_app/config_endpoints/api/superuser.py @@ -0,0 +1,151 @@ +import os +import logging +import pathvalidate +from flask import request + +from config_endpoints.exception import InvalidRequest +from config_endpoints.api import resource, ApiResource, verify_not_prod, nickname +from config_util.ssl import load_certificate, CertInvalidException +from config_app import app, config_provider + +logger = logging.getLogger(__name__) +EXTRA_CA_DIRECTORY = 'extra_ca_certs' + + +@resource('/v1/superuser/customcerts/') +class SuperUserCustomCertificate(ApiResource): + """ Resource for managing a custom certificate. """ + + @nickname('uploadCustomCertificate') + @verify_not_prod + def post(self, certpath): + uploaded_file = request.files['file'] + if not uploaded_file: + raise InvalidRequest('Missing certificate file') + + # Save the certificate. + certpath = pathvalidate.sanitize_filename(certpath) + if not certpath.endswith('.crt'): + raise InvalidRequest('Invalid certificate file: must have suffix `.crt`') + + logger.debug('Saving custom certificate %s', certpath) + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) + config_provider.save_volume_file(cert_full_path, uploaded_file) + logger.debug('Saved custom certificate %s', certpath) + + # Validate the certificate. + try: + logger.debug('Loading custom certificate %s', certpath) + with config_provider.get_volume_file(cert_full_path) as f: + load_certificate(f.read()) + except CertInvalidException: + logger.exception('Got certificate invalid error for cert %s', certpath) + return '', 204 + except IOError: + logger.exception('Got IO error for cert %s', certpath) + return '', 204 + + # Call the update script to install the certificate immediately. + if not app.config['TESTING']: + logger.debug('Calling certs_install.sh') + if os.system('/conf/init/certs_install.sh') != 0: + raise Exception('Could not install certificates') + + logger.debug('certs_install.sh completed') + + return '', 204 + + @nickname('deleteCustomCertificate') + @verify_not_prod + def delete(self, certpath): + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) + config_provider.remove_volume_file(cert_full_path) + return '', 204 + + +@resource('/v1/superuser/customcerts') +class SuperUserCustomCertificates(ApiResource): + """ Resource for managing custom certificates. """ + + @nickname('getCustomCertificates') + @verify_not_prod + def get(self): + has_extra_certs_path = config_provider.volume_file_exists(EXTRA_CA_DIRECTORY) + extra_certs_found = config_provider.list_volume_directory(EXTRA_CA_DIRECTORY) + if extra_certs_found is None: + return { + 'status': 'file' if has_extra_certs_path else 'none', + } + + cert_views = [] + for extra_cert_path in extra_certs_found: + try: + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, extra_cert_path) + with config_provider.get_volume_file(cert_full_path) as f: + certificate = load_certificate(f.read()) + cert_views.append({ + 'path': extra_cert_path, + 'names': list(certificate.names), + 'expired': certificate.expired, + }) + except CertInvalidException as cie: + cert_views.append({ + 'path': extra_cert_path, + 'error': cie.message, + }) + except IOError as ioe: + cert_views.append({ + 'path': extra_cert_path, + 'error': ioe.message, + }) + + return { + 'status': 'directory', + 'certs': cert_views, + } + +# TODO(config) port this endpoint when (https://github.com/quay/quay/pull/3055) merged to ensure no conflicts +# @resource('/v1/superuser/keys') +# class SuperUserServiceKeyManagement(ApiResource): +# """ Resource for managing service keys.""" +# schemas = { +# 'CreateServiceKey': { +# 'id': 'CreateServiceKey', +# 'type': 'object', +# 'description': 'Description of creation of a service key', +# 'required': ['service', 'expiration'], +# 'properties': { +# 'service': { +# 'type': 'string', +# 'description': 'The service authenticating with this key', +# }, +# 'name': { +# 'type': 'string', +# 'description': 'The friendly name of a service key', +# }, +# 'metadata': { +# 'type': 'object', +# 'description': 'The key/value pairs of this key\'s metadata', +# }, +# 'notes': { +# 'type': 'string', +# 'description': 'If specified, the extra notes for the key', +# }, +# 'expiration': { +# 'description': 'The expiration date as a unix timestamp', +# 'anyOf': [{'type': 'number'}, {'type': 'null'}], +# }, +# }, +# }, +# } +# +# @verify_not_prod +# @nickname('listServiceKeys') +# def get(self): +# keys = pre_oci_model.list_all_service_keys() +# +# return jsonify({ +# 'keys': [key.to_dict() for key in keys], +# }) +# + diff --git a/config_app/config_endpoints/api/user.py b/config_app/config_endpoints/api/user.py new file mode 100644 index 000000000..b7ff870cf --- /dev/null +++ b/config_app/config_endpoints/api/user.py @@ -0,0 +1,18 @@ +from config_endpoints.api import resource, ApiResource, nickname + + +@resource('/v1/user/') +class User(ApiResource): + """ Operations related to users. """ + + @nickname('getLoggedInUser') + def get(self): + """ Get user information for the authenticated user. """ + # user = get_authenticated_user() + + # return user_view(user) + return { + 'anonymous': False, + # 'username': user.username, + } + diff --git a/config_app/config_endpoints/common.py b/config_app/config_endpoints/common.py index 1378f0209..160cf7068 100644 --- a/config_app/config_endpoints/common.py +++ b/config_app/config_endpoints/common.py @@ -1,16 +1,10 @@ import logging import os import re -import sys -from collections import OrderedDict -from cachetools import lru_cache from flask import make_response, render_template from flask_restful import reqparse -from config_app.config_endpoints.api import method_metadata -from config_app.app import app - def truthy_bool(param): return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'} @@ -60,246 +54,3 @@ def fully_qualified_name(method_view_class): return '%s.%s' % (method_view_class.__module__, method_view_class.__name__) -# @lru_cache(maxsize=1) -def generate_route_data(): - include_internal = True - compact = True - - def swagger_parameter(name, description, kind='path', param_type='string', required=True, - enum=None, schema=None): - # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject - parameter_info = { - 'name': name, - 'in': kind, - 'required': required - } - - if schema: - parameter_info['schema'] = { - '$ref': '#/definitions/%s' % schema - } - else: - parameter_info['type'] = param_type - - if enum is not None and len(list(enum)) > 0: - parameter_info['enum'] = list(enum) - - return parameter_info - - paths = {} - models = {} - tags = [] - tags_added = set() - operation_ids = set() - - print('APP URL MAp:') - print(app.url_map) - for rule in app.url_map.iter_rules(): - endpoint_method = app.view_functions[rule.endpoint] - - # Verify that we have a view class for this API method. - if not 'view_class' in dir(endpoint_method): - continue - - view_class = endpoint_method.view_class - - # Hide the class if it is internal. - internal = method_metadata(view_class, 'internal') - if not include_internal and internal: - continue - - # Build the tag. - parts = fully_qualified_name(view_class).split('.') - tag_name = parts[-2] - if not tag_name in tags_added: - tags_added.add(tag_name) - tags.append({ - 'name': tag_name, - 'description': (sys.modules[view_class.__module__].__doc__ or '').strip() - }) - - # Build the Swagger data for the path. - swagger_path = PARAM_REGEX.sub(r'{\2}', rule.rule) - full_name = fully_qualified_name(view_class) - path_swagger = { - 'x-name': full_name, - 'x-path': swagger_path, - 'x-tag': tag_name - } - - related_user_res = method_metadata(view_class, 'related_user_resource') - if related_user_res is not None: - path_swagger['x-user-related'] = fully_qualified_name(related_user_res) - - paths[swagger_path] = path_swagger - - # Add any global path parameters. - param_data_map = view_class.__api_path_params if '__api_path_params' in dir(view_class) else {} - if param_data_map: - path_parameters_swagger = [] - for path_parameter in param_data_map: - description = param_data_map[path_parameter].get('description') - path_parameters_swagger.append(swagger_parameter(path_parameter, description)) - - path_swagger['parameters'] = path_parameters_swagger - - # Add the individual HTTP operations. - method_names = list(rule.methods.difference(['HEAD', 'OPTIONS'])) - for method_name in method_names: - # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object - method = getattr(view_class, method_name.lower(), None) - if method is None: - logger.debug('Unable to find method for %s in class %s', method_name, view_class) - continue - - operationId = method_metadata(method, 'nickname') - operation_swagger = { - 'operationId': operationId, - 'parameters': [], - } - - if operationId is None: - continue - - if operationId in operation_ids: - raise Exception('Duplicate operation Id: %s' % operationId) - - operation_ids.add(operationId) - - # Mark the method as internal. - internal = method_metadata(method, 'internal') - if internal is not None: - operation_swagger['x-internal'] = True - - if include_internal: - requires_fresh_login = method_metadata(method, 'requires_fresh_login') - if requires_fresh_login is not None: - operation_swagger['x-requires-fresh-login'] = True - - # Add the path parameters. - if rule.arguments: - for path_parameter in rule.arguments: - description = param_data_map.get(path_parameter, {}).get('description') - operation_swagger['parameters'].append(swagger_parameter(path_parameter, description)) - - # Add the query parameters. - if '__api_query_params' in dir(method): - for query_parameter_info in method.__api_query_params: - name = query_parameter_info['name'] - description = query_parameter_info['help'] - param_type = TYPE_CONVERTER[query_parameter_info['type']] - required = query_parameter_info['required'] - - operation_swagger['parameters'].append( - swagger_parameter(name, description, kind='query', - param_type=param_type, - required=required, - enum=query_parameter_info['choices'])) - - # Add the OAuth security block. - # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject - scope = method_metadata(method, 'oauth2_scope') - if scope and not compact: - operation_swagger['security'] = [{'oauth2_implicit': [scope.scope]}] - - # Add the responses block. - # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject - response_schema_name = method_metadata(method, 'response_schema') - if not compact: - if response_schema_name: - models[response_schema_name] = view_class.schemas[response_schema_name] - - models['ApiError'] = { - 'type': 'object', - 'properties': { - 'status': { - 'type': 'integer', - 'description': 'Status code of the response.' - }, - 'type': { - 'type': 'string', - 'description': 'Reference to the type of the error.' - }, - 'detail': { - 'type': 'string', - 'description': 'Details about the specific instance of the error.' - }, - 'title': { - 'type': 'string', - 'description': 'Unique error code to identify the type of error.' - }, - 'error_message': { - 'type': 'string', - 'description': 'Deprecated; alias for detail' - }, - 'error_type': { - 'type': 'string', - 'description': 'Deprecated; alias for detail' - } - }, - 'required': [ - 'status', - 'type', - 'title', - ] - } - - responses = { - '400': { - 'description': 'Bad Request', - }, - - '401': { - 'description': 'Session required', - }, - - '403': { - 'description': 'Unauthorized access', - }, - - '404': { - 'description': 'Not found', - }, - } - - for _, body in responses.items(): - body['schema'] = {'$ref': '#/definitions/ApiError'} - - if method_name == 'DELETE': - responses['204'] = { - 'description': 'Deleted' - } - elif method_name == 'POST': - responses['201'] = { - 'description': 'Successful creation' - } - else: - responses['200'] = { - 'description': 'Successful invocation' - } - - if response_schema_name: - responses['200']['schema'] = { - '$ref': '#/definitions/%s' % response_schema_name - } - - operation_swagger['responses'] = responses - - # Add the request block. - request_schema_name = method_metadata(method, 'request_schema') - if request_schema_name and not compact: - models[request_schema_name] = view_class.schemas[request_schema_name] - - operation_swagger['parameters'].append( - swagger_parameter('body', 'Request body contents.', kind='body', - schema=request_schema_name)) - - # Add the operation to the parent path. - if not internal or (internal and include_internal): - path_swagger[method_name.lower()] = operation_swagger - - tags.sort(key=lambda t: t['name']) - paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]['x-tag'])) - - if compact: - return {'paths': paths} diff --git a/config_app/config_endpoints/exception.py b/config_app/config_endpoints/exception.py new file mode 100644 index 000000000..20b0976b3 --- /dev/null +++ b/config_app/config_endpoints/exception.py @@ -0,0 +1,123 @@ +from enum import Enum + +from flask import url_for +from werkzeug.exceptions import HTTPException + + +class ApiErrorType(Enum): + external_service_timeout = 'external_service_timeout' + invalid_request = 'invalid_request' + invalid_response = 'invalid_response' + invalid_token = 'invalid_token' + expired_token = 'expired_token' + insufficient_scope = 'insufficient_scope' + fresh_login_required = 'fresh_login_required' + exceeds_license = 'exceeds_license' + not_found = 'not_found' + downstream_issue = 'downstream_issue' + + +ERROR_DESCRIPTION = { + ApiErrorType.external_service_timeout.value: "An external service timed out. Retrying the request may resolve the issue.", + ApiErrorType.invalid_request.value: "The request was invalid. It may have contained invalid values or was improperly formatted.", + ApiErrorType.invalid_response.value: "The response was invalid.", + ApiErrorType.invalid_token.value: "The access token provided was invalid.", + ApiErrorType.expired_token.value: "The access token provided has expired.", + ApiErrorType.insufficient_scope.value: "The access token did not have sufficient scope to access the requested resource.", + ApiErrorType.fresh_login_required.value: "The action requires a fresh login to succeed.", + ApiErrorType.exceeds_license.value: "The action was refused because the current license does not allow it.", + ApiErrorType.not_found.value: "The resource was not found.", + ApiErrorType.downstream_issue.value: "An error occurred in a downstream service.", +} + + +class ApiException(HTTPException): + """ + Represents an error in the application/problem+json format. + + See: https://tools.ietf.org/html/rfc7807 + + - "type" (string) - A URI reference that identifies the + problem type. + + - "title" (string) - A short, human-readable summary of the problem + type. It SHOULD NOT change from occurrence to occurrence of the + problem, except for purposes of localization + + - "status" (number) - The HTTP status code + + - "detail" (string) - A human-readable explanation specific to this + occurrence of the problem. + + - "instance" (string) - A URI reference that identifies the specific + occurrence of the problem. It may or may not yield further + information if dereferenced. + """ + + def __init__(self, error_type, status_code, error_description, payload=None): + Exception.__init__(self) + self.error_description = error_description + self.code = status_code + self.payload = payload + self.error_type = error_type + self.data = self.to_dict() + + super(ApiException, self).__init__(error_description, None) + + def to_dict(self): + rv = dict(self.payload or ()) + + if self.error_description is not None: + rv['detail'] = self.error_description + rv['error_message'] = self.error_description # TODO: deprecate + + rv['error_type'] = self.error_type.value # TODO: deprecate + rv['title'] = self.error_type.value + rv['type'] = url_for('api.error', error_type=self.error_type.value, _external=True) + rv['status'] = self.code + + return rv + + +class ExternalServiceError(ApiException): + def __init__(self, error_description, payload=None): + ApiException.__init__(self, ApiErrorType.external_service_timeout, 520, error_description, payload) + + +class InvalidRequest(ApiException): + def __init__(self, error_description, payload=None): + ApiException.__init__(self, ApiErrorType.invalid_request, 400, error_description, payload) + + +class InvalidResponse(ApiException): + def __init__(self, error_description, payload=None): + ApiException.__init__(self, ApiErrorType.invalid_response, 400, error_description, payload) + + +class InvalidToken(ApiException): + def __init__(self, error_description, payload=None): + ApiException.__init__(self, ApiErrorType.invalid_token, 401, error_description, payload) + +class ExpiredToken(ApiException): + def __init__(self, error_description, payload=None): + ApiException.__init__(self, ApiErrorType.expired_token, 401, error_description, payload) + + +class FreshLoginRequired(ApiException): + def __init__(self, payload=None): + ApiException.__init__(self, ApiErrorType.fresh_login_required, 401, "Requires fresh login", payload) + + +class ExceedsLicenseException(ApiException): + def __init__(self, payload=None): + ApiException.__init__(self, ApiErrorType.exceeds_license, 402, 'Payment Required', payload) + + +class NotFound(ApiException): + def __init__(self, payload=None): + ApiException.__init__(self, ApiErrorType.not_found, 404, 'Not Found', payload) + + +class DownstreamIssue(ApiException): + def __init__(self, error_description, payload=None): + ApiException.__init__(self, ApiErrorType.downstream_issue, 520, error_description, payload) diff --git a/config_app/config_endpoints/setup_web.py b/config_app/config_endpoints/setup_web.py index 90693d037..44a8f5cbd 100644 --- a/config_app/config_endpoints/setup_web.py +++ b/config_app/config_endpoints/setup_web.py @@ -1,17 +1,23 @@ from flask import Blueprint -from common import render_page_template -from config_app.config_endpoints.common import generate_route_data -from util.cache import no_cache +from config_endpoints.common import render_page_template +from config_endpoints.api.discovery import generate_route_data +# from config_util.cache import no_cache + setup_web = Blueprint('setup_web', __name__, template_folder='templates') +# @lru_cache(maxsize=1) +def _get_route_data(): + return generate_route_data() + + def render_page_template_with_routedata(name, *args, **kwargs): - return render_page_template(name, generate_route_data(), *args, **kwargs) + return render_page_template(name, _get_route_data(), *args, **kwargs) +# @no_cache @setup_web.route('/', methods=['GET'], defaults={'path': ''}) -@no_cache def index(path, **kwargs): return render_page_template_with_routedata('index.html', js_bundle_name='configapp', **kwargs) diff --git a/config_app/config_test/testconfig.py b/config_app/config_test/testconfig.py new file mode 100644 index 000000000..2ba731589 --- /dev/null +++ b/config_app/config_test/testconfig.py @@ -0,0 +1,108 @@ +import os + +from datetime import datetime, timedelta +from tempfile import NamedTemporaryFile + +from config import DefaultConfig + + +class FakeTransaction(object): + def __enter__(self): + return self + + def __exit__(self, exc_type, value, traceback): + pass + + +TEST_DB_FILE = NamedTemporaryFile(delete=True) + + +class TestConfig(DefaultConfig): + TESTING = True + SECRET_KEY = 'a36c9d7d-25a9-4d3f-a586-3d2f8dc40a83' + BILLING_TYPE = 'FakeStripe' + + TEST_DB_FILE = TEST_DB_FILE + DB_URI = os.environ.get('TEST_DATABASE_URI', 'sqlite:///{0}'.format(TEST_DB_FILE.name)) + DB_CONNECTION_ARGS = { + 'threadlocals': True, + 'autorollback': True, + } + + @staticmethod + def create_transaction(db): + return FakeTransaction() + + DB_TRANSACTION_FACTORY = create_transaction + + DISTRIBUTED_STORAGE_CONFIG = {'local_us': ['FakeStorage', {}], 'local_eu': ['FakeStorage', {}]} + DISTRIBUTED_STORAGE_PREFERENCE = ['local_us'] + + BUILDLOGS_MODULE_AND_CLASS = ('test.testlogs', 'testlogs.TestBuildLogs') + BUILDLOGS_OPTIONS = ['devtable', 'building', 'deadbeef-dead-beef-dead-beefdeadbeef', False] + + USERFILES_LOCATION = 'local_us' + + FEATURE_SUPER_USERS = True + FEATURE_BILLING = True + FEATURE_MAILING = True + SUPER_USERS = ['devtable'] + + LICENSE_USER_LIMIT = 500 + LICENSE_EXPIRATION = datetime.now() + timedelta(weeks=520) + LICENSE_EXPIRATION_WARNING = datetime.now() + timedelta(weeks=520) + + FEATURE_GITHUB_BUILD = True + FEATURE_BITTORRENT = True + FEATURE_ACI_CONVERSION = True + + CLOUDWATCH_NAMESPACE = None + + FEATURE_SECURITY_SCANNER = True + FEATURE_SECURITY_NOTIFICATIONS = True + SECURITY_SCANNER_ENDPOINT = 'http://fakesecurityscanner/' + SECURITY_SCANNER_API_VERSION = 'v1' + SECURITY_SCANNER_ENGINE_VERSION_TARGET = 1 + SECURITY_SCANNER_API_TIMEOUT_SECONDS = 1 + + FEATURE_SIGNING = True + + SIGNING_ENGINE = 'gpg2' + + GPG2_PRIVATE_KEY_NAME = 'EEB32221' + GPG2_PRIVATE_KEY_FILENAME = 'test/data/signing-private.gpg' + GPG2_PUBLIC_KEY_FILENAME = 'test/data/signing-public.gpg' + + INSTANCE_SERVICE_KEY_KID_LOCATION = 'test/data/test.kid' + INSTANCE_SERVICE_KEY_LOCATION = 'test/data/test.pem' + + PROMETHEUS_AGGREGATOR_URL = None + + GITHUB_LOGIN_CONFIG = {} + GOOGLE_LOGIN_CONFIG = {} + + FEATURE_GITHUB_LOGIN = True + FEATURE_GOOGLE_LOGIN = True + + TESTOIDC_LOGIN_CONFIG = { + 'CLIENT_ID': 'foo', + 'CLIENT_SECRET': 'bar', + 'OIDC_SERVER': 'http://fakeoidc', + 'DEBUGGING': True, + 'LOGIN_BINDING_FIELD': 'sub', + } + + RECAPTCHA_SITE_KEY = 'somekey' + RECAPTCHA_SECRET_KEY = 'somesecretkey' + + FEATURE_APP_REGISTRY = True + FEATURE_TEAM_SYNCING = True + FEATURE_CHANGE_TAG_EXPIRATION = True + + TAG_EXPIRATION_OPTIONS = ['0s', '1s', '1d', '1w', '2w', '4w'] + + DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT = None + + DATA_MODEL_CACHE_CONFIG = { + 'engine': 'inmemory', + } diff --git a/config_app/util/__init__.py b/config_app/config_util/__init__.py similarity index 100% rename from config_app/util/__init__.py rename to config_app/config_util/__init__.py diff --git a/config_app/config_util/config/__init__.py b/config_app/config_util/config/__init__.py new file mode 100644 index 000000000..b7b940d4d --- /dev/null +++ b/config_app/config_util/config/__init__.py @@ -0,0 +1,16 @@ +from config_util.config.fileprovider import FileConfigProvider +from config_util.config.testprovider import TestConfigProvider +from config_util.config.k8sprovider import KubernetesConfigProvider + + +def get_config_provider(config_volume, yaml_filename, py_filename, testing=False, kubernetes=False): + """ Loads and returns the config provider for the current environment. """ + if testing: + return TestConfigProvider() + + if kubernetes: + return KubernetesConfigProvider(config_volume, yaml_filename, py_filename) + + return FileConfigProvider(config_volume, yaml_filename, py_filename) + + diff --git a/config_app/config_util/config/basefileprovider.py b/config_app/config_util/config/basefileprovider.py new file mode 100644 index 000000000..1bcf497a2 --- /dev/null +++ b/config_app/config_util/config/basefileprovider.py @@ -0,0 +1,71 @@ +import os +import logging + +from config_util.config.baseprovider import (BaseProvider, import_yaml, export_yaml, + CannotWriteConfigException) + +logger = logging.getLogger(__name__) + + +class BaseFileProvider(BaseProvider): + """ Base implementation of the config provider that reads the data from the file system. """ + def __init__(self, config_volume, yaml_filename, py_filename): + self.config_volume = config_volume + self.yaml_filename = yaml_filename + self.py_filename = py_filename + + self.yaml_path = os.path.join(config_volume, yaml_filename) + self.py_path = os.path.join(config_volume, py_filename) + + def update_app_config(self, app_config): + if os.path.exists(self.py_path): + logger.debug('Applying config file: %s', self.py_path) + app_config.from_pyfile(self.py_path) + + if os.path.exists(self.yaml_path): + logger.debug('Applying config file: %s', self.yaml_path) + import_yaml(app_config, self.yaml_path) + + def get_config(self): + if not self.config_exists(): + return None + + config_obj = {} + import_yaml(config_obj, self.yaml_path) + return config_obj + + def config_exists(self): + return self.volume_file_exists(self.yaml_filename) + + def volume_exists(self): + return os.path.exists(self.config_volume) + + def volume_file_exists(self, filename): + return os.path.exists(os.path.join(self.config_volume, filename)) + + def get_volume_file(self, filename, mode='r'): + return open(os.path.join(self.config_volume, filename), mode=mode) + + def get_volume_path(self, directory, filename): + return os.path.join(directory, filename) + + def list_volume_directory(self, path): + dirpath = os.path.join(self.config_volume, path) + if not os.path.exists(dirpath): + return None + + if not os.path.isdir(dirpath): + return None + + return os.listdir(dirpath) + + def requires_restart(self, app_config): + file_config = self.get_config() + if not file_config: + return False + + for key in file_config: + if app_config.get(key) != file_config[key]: + return True + + return False diff --git a/config_app/util/baseprovider.py b/config_app/config_util/config/baseprovider.py similarity index 98% rename from config_app/util/baseprovider.py rename to config_app/config_util/config/baseprovider.py index 5a616895f..ce6c6589c 100644 --- a/config_app/util/baseprovider.py +++ b/config_app/config_util/config/baseprovider.py @@ -6,7 +6,7 @@ from six import add_metaclass from jsonschema import validate, ValidationError -from util.config.schema import CONFIG_SCHEMA +from config_util.config.schema import CONFIG_SCHEMA logger = logging.getLogger(__name__) diff --git a/config_app/util/fileprovider.py b/config_app/config_util/config/fileprovider.py similarity index 91% rename from config_app/util/fileprovider.py rename to config_app/config_util/config/fileprovider.py index ac2ceec40..95da64330 100644 --- a/config_app/util/fileprovider.py +++ b/config_app/config_util/config/fileprovider.py @@ -1,8 +1,8 @@ import os import logging -from util.config.provider.baseprovider import export_yaml, CannotWriteConfigException -from util.config.provider.basefileprovider import BaseFileProvider +from config_util.config.baseprovider import export_yaml, CannotWriteConfigException +from config_util.config.basefileprovider import BaseFileProvider logger = logging.getLogger(__name__) diff --git a/config_app/config_util/config/k8sprovider.py b/config_app/config_util/config/k8sprovider.py new file mode 100644 index 000000000..5d65af70b --- /dev/null +++ b/config_app/config_util/config/k8sprovider.py @@ -0,0 +1,170 @@ +import os +import logging +import json +import base64 +import time + +from requests import Request, Session + +from config_util.config.baseprovider import CannotWriteConfigException, get_yaml +from config_util.config.basefileprovider import BaseFileProvider + + +logger = logging.getLogger(__name__) + +KUBERNETES_API_HOST = os.environ.get('KUBERNETES_SERVICE_HOST', '') +port = os.environ.get('KUBERNETES_SERVICE_PORT') +if port: + KUBERNETES_API_HOST += ':' + port + +SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token' + +QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise') +QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret') + +class KubernetesConfigProvider(BaseFileProvider): + """ Implementation of the config provider that reads and writes configuration + data from a Kubernetes Secret. """ + def __init__(self, config_volume, yaml_filename, py_filename): + super(KubernetesConfigProvider, self).__init__(config_volume, yaml_filename, py_filename) + + # Load the service account token from the local store. + if not os.path.exists(SERVICE_ACCOUNT_TOKEN_PATH): + raise Exception('Cannot load Kubernetes service account token') + + with open(SERVICE_ACCOUNT_TOKEN_PATH, 'r') as f: + self._service_token = f.read() + + @property + def provider_id(self): + return 'k8s' + + def get_volume_path(self, directory, filename): + # NOTE: Overridden to ensure we don't have subdirectories, which aren't supported + # in Kubernetes secrets. + return "_".join([directory.rstrip('/'), filename]) + + def volume_file_exists(self, filename): + # NOTE: Overridden because we don't have subdirectories, which aren't supported + # in Kubernetes secrets. + secret = self._lookup_secret() + if not secret or not secret.get('data'): + return False + return filename in secret['data'] + + def list_volume_directory(self, path): + # NOTE: Overridden because we don't have subdirectories, which aren't supported + # in Kubernetes secrets. + secret = self._lookup_secret() + + if not secret: + return [] + + paths = [] + for filename in secret.get('data', {}): + if filename.startswith(path): + paths.append(filename[len(path) + 1:]) + return paths + + def save_config(self, config_obj): + self._update_secret_file(self.yaml_filename, get_yaml(config_obj)) + + def write_volume_file(self, filename, contents): + try: + self._update_secret_file(filename, contents) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + def remove_volume_file(self, filename): + try: + self._update_secret_file(filename, None) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + def save_volume_file(self, filename, flask_file): + filepath = super(KubernetesConfigProvider, self).save_volume_file(filename, flask_file) + with open(filepath, 'r') as f: + self.write_volume_file(filename, f.read()) + + def _assert_success(self, response): + if response.status_code != 200: + logger.error('Kubernetes API call failed with response: %s => %s', response.status_code, + response.text) + raise CannotWriteConfigException('Kubernetes API call failed: %s' % response.text) + + def _update_secret_file(self, filename, value=None): + # Check first that the namespace for Quay Enterprise exists. If it does not, report that + # as an error, as it seems to be a common issue. + namespace_url = 'namespaces/%s' % (QE_NAMESPACE) + response = self._execute_k8s_api('GET', namespace_url) + if response.status_code // 100 != 2: + msg = 'A Kubernetes namespace with name `%s` must be created to save config' % QE_NAMESPACE + raise CannotWriteConfigException(msg) + + # Check if the secret exists. If not, then we create an empty secret and then update the file + # inside. + secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET) + secret = self._lookup_secret() + if secret is None: + self._assert_success(self._execute_k8s_api('POST', secret_url, { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": QE_CONFIG_SECRET + }, + "data": {} + })) + + # Update the secret to reflect the file change. + secret['data'] = secret.get('data', {}) + + if value is not None: + secret['data'][filename] = base64.b64encode(value) + else: + secret['data'].pop(filename) + + self._assert_success(self._execute_k8s_api('PUT', secret_url, secret)) + + # Wait until the local mounted copy of the secret has been updated, as + # this is an eventual consistency operation, but the caller expects immediate + # consistency. + while True: + matching_files = set() + for secret_filename, encoded_value in secret['data'].iteritems(): + expected_value = base64.b64decode(encoded_value) + try: + with self.get_volume_file(secret_filename) as f: + contents = f.read() + + if contents == expected_value: + matching_files.add(secret_filename) + except IOError: + continue + + if matching_files == set(secret['data'].keys()): + break + + # Sleep for a second and then try again. + time.sleep(1) + + def _lookup_secret(self): + secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET) + response = self._execute_k8s_api('GET', secret_url) + if response.status_code != 200: + return None + return json.loads(response.text) + + def _execute_k8s_api(self, method, relative_url, data=None): + headers = { + 'Authorization': 'Bearer ' + self._service_token + } + + if data: + headers['Content-Type'] = 'application/json' + + data = json.dumps(data) if data else None + session = Session() + url = 'https://%s/api/v1/%s' % (KUBERNETES_API_HOST, relative_url) + + request = Request(method, url, data=data, headers=headers) + return session.send(request.prepare(), verify=False, timeout=2) diff --git a/config_app/config_util/config/schema.py b/config_app/config_util/config/schema.py new file mode 100644 index 000000000..db38b8872 --- /dev/null +++ b/config_app/config_util/config/schema.py @@ -0,0 +1,914 @@ +# INTERNAL_ONLY_PROPERTIES defines the properties in the config that, while settable, should +# not be documented for external users. These will generally be used for internal test or only +# given to customers when they have been briefed on the side effects of using them. +INTERNAL_ONLY_PROPERTIES = { + '__module__', + '__doc__', + 'create_transaction', + + 'TESTING', + 'SEND_FILE_MAX_AGE_DEFAULT', + + 'REPLICATION_QUEUE_NAME', + 'DOCKERFILE_BUILD_QUEUE_NAME', + 'CHUNK_CLEANUP_QUEUE_NAME', + 'SECSCAN_NOTIFICATION_QUEUE_NAME', + 'SECURITY_SCANNER_ISSUER_NAME', + 'NOTIFICATION_QUEUE_NAME', + 'NAMESPACE_GC_QUEUE_NAME', + + 'FEATURE_BILLING', + 'FEATURE_SUPPORT_CHAT', + 'BILLING_TYPE', + + 'INSTANCE_SERVICE_KEY_LOCATION', + 'INSTANCE_SERVICE_KEY_REFRESH', + 'INSTANCE_SERVICE_KEY_SERVICE', + 'INSTANCE_SERVICE_KEY_KID_LOCATION', + 'INSTANCE_SERVICE_KEY_EXPIRATION', + 'UNAPPROVED_SERVICE_KEY_TTL_SEC', + 'EXPIRED_SERVICE_KEY_TTL_SEC', + 'REGISTRY_JWT_AUTH_MAX_FRESH_S', + + 'BITTORRENT_FILENAME_PEPPER', + 'BITTORRENT_WEBSEED_LIFETIME', + + 'SERVICE_LOG_ACCOUNT_ID', + 'BUILDLOGS_OPTIONS', + 'LIBRARY_NAMESPACE', + 'STAGGER_WORKERS', + 'QUEUE_WORKER_METRICS_REFRESH_SECONDS', + 'PUSH_TEMP_TAG_EXPIRATION_SEC', + 'GARBAGE_COLLECTION_FREQUENCY', + 'PAGE_TOKEN_KEY', + 'BUILD_MANAGER', + 'JWTPROXY_AUDIENCE', + 'SYSTEM_SERVICE_BLACKLIST', + 'JWTPROXY_SIGNER', + 'SECURITY_SCANNER_INDEXING_MIN_ID', + 'STATIC_SITE_BUCKET', + 'LABEL_KEY_RESERVED_PREFIXES', + 'TEAM_SYNC_WORKER_FREQUENCY', + 'DOCUMENTATION_METADATA', + 'DOCUMENTATION_LOCATION', + 'JSONIFY_PRETTYPRINT_REGULAR', + 'SYSTEM_LOGS_FILE', + 'SYSTEM_LOGS_PATH', + 'SYSTEM_SERVICES_PATH', + 'TUF_GUN_PREFIX', + 'LOGGING_LEVEL', + 'SIGNED_GRANT_EXPIRATION_SEC', + 'PROMETHEUS_AGGREGATOR_URL', + 'DB_TRANSACTION_FACTORY', + 'NOTIFICATION_SEND_TIMEOUT', + 'QUEUE_METRICS_TYPE', + 'MAIL_FAIL_SILENTLY', + 'LOCAL_OAUTH_HANDLER', + 'USE_CDN', + 'ANALYTICS_TYPE', + 'LAST_ACCESSED_UPDATE_THRESHOLD_S', + + 'EXCEPTION_LOG_TYPE', + 'SENTRY_DSN', + 'SENTRY_PUBLIC_DSN', + + 'BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT', + 'THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT', + + 'SECURITY_SCANNER_ENDPOINT_BATCH', + 'SECURITY_SCANNER_API_TIMEOUT_SECONDS', + 'SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS', + 'SECURITY_SCANNER_ENGINE_VERSION_TARGET', + 'SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS', + 'SECURITY_SCANNER_API_VERSION', + + 'DATA_MODEL_CACHE_CONFIG', + + # TODO: move this into the schema once we support signing in QE. + 'FEATURE_SIGNING', + 'TUF_SERVER', +} + +CONFIG_SCHEMA = { + 'type': 'object', + 'description': 'Schema for Quay configuration', + 'required': [ + 'PREFERRED_URL_SCHEME', + 'SERVER_HOSTNAME', + 'DB_URI', + 'AUTHENTICATION_TYPE', + 'DISTRIBUTED_STORAGE_CONFIG', + 'BUILDLOGS_REDIS', + 'USER_EVENTS_REDIS', + 'DISTRIBUTED_STORAGE_PREFERENCE', + 'DEFAULT_TAG_EXPIRATION', + 'TAG_EXPIRATION_OPTIONS', + ], + 'properties': { + # Hosting. + 'PREFERRED_URL_SCHEME': { + 'type': 'string', + 'description': 'The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`', + 'enum': ['http', 'https'], + 'x-example': 'https', + }, + 'SERVER_HOSTNAME': { + 'type': 'string', + 'description': 'The URL at which Quay is accessible, without the scheme.', + 'x-example': 'quay.io', + }, + 'EXTERNAL_TLS_TERMINATION': { + 'type': 'boolean', + 'description': 'If TLS is supported, but terminated at a layer before Quay, must be true.', + 'x-example': True, + }, + + # User-visible configuration. + 'REGISTRY_TITLE': { + 'type': 'string', + 'description': 'If specified, the long-form title for the registry. Defaults to `Quay Enterprise`.', + 'x-example': 'Corp Container Service', + }, + 'REGISTRY_TITLE_SHORT': { + 'type': 'string', + 'description': 'If specified, the short-form title for the registry. Defaults to `Quay Enterprise`.', + 'x-example': 'CCS', + }, + 'CONTACT_INFO': { + 'type': 'array', + 'minItems': 1, + 'uniqueItems': True, + 'description': 'If specified, contact information to display on the contact page. ' + + 'If only a single piece of contact information is specified, the contact footer will link directly.', + 'items': [ + { + 'type': 'string', + 'pattern': '^mailto:(.)+$', + 'x-example': 'mailto:support@quay.io', + 'description': 'Adds a link to send an e-mail', + }, + { + 'type': 'string', + 'pattern': '^irc://(.)+$', + 'x-example': 'irc://chat.freenode.net:6665/quay', + 'description': 'Adds a link to visit an IRC chat room', + }, + { + 'type': 'string', + 'pattern': '^tel:(.)+$', + 'x-example': 'tel:+1-888-930-3475', + 'description': 'Adds a link to call a phone number', + }, + { + 'type': 'string', + 'pattern': '^http(s)?://(.)+$', + 'x-example': 'https://twitter.com/quayio', + 'description': 'Adds a link to a defined URL', + }, + ], + }, + 'SEARCH_RESULTS_PER_PAGE' : { + 'type': 'number', + 'description': 'Number of results returned per page by search page. Defaults to 10', + 'x-example': 10, + }, + 'SEARCH_MAX_RESULT_PAGE_COUNT' : { + 'type': 'number', + 'description': 'Maximum number of pages the user can paginate in search before they are limited. Defaults to 10', + 'x-example': 10, + }, + + # E-mail. + 'FEATURE_MAILING': { + 'type': 'boolean', + 'description': 'Whether emails are enabled. Defaults to True', + 'x-example': True, + }, + 'MAIL_SERVER': { + 'type': 'string', + 'description': 'The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.', + 'x-example': 'smtp.somedomain.com', + }, + 'MAIL_USE_TLS': { + 'type': 'boolean', + 'description': 'If specified, whether to use TLS for sending e-mails.', + 'x-example': True, + }, + 'MAIL_PORT': { + 'type': 'number', + 'description': 'The SMTP port to use. If not specified, defaults to 587.', + 'x-example': 588, + }, + 'MAIL_USERNAME': { + 'type': ['string', 'null'], + 'description': 'The SMTP username to use when sending e-mails.', + 'x-example': 'myuser', + }, + 'MAIL_PASSWORD': { + 'type': ['string', 'null'], + 'description': 'The SMTP password to use when sending e-mails.', + 'x-example': 'mypassword', + }, + 'MAIL_DEFAULT_SENDER': { + 'type': ['string', 'null'], + 'description': 'If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `support@quay.io`.', + 'x-example': 'support@myco.com', + }, + + # Database. + 'DB_URI': { + 'type': 'string', + 'description': 'The URI at which to access the database, including any credentials.', + 'x-example': 'mysql+pymysql://username:password@dns.of.database/quay', + 'x-reference': 'https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495', + }, + 'DB_CONNECTION_ARGS': { + 'type': 'object', + 'description': 'If specified, connection arguments for the database such as timeouts and SSL.', + 'properties': { + 'threadlocals': { + 'type': 'boolean', + 'description': 'Whether to use thread-local connections. Should *ALWAYS* be `true`' + }, + 'autorollback': { + 'type': 'boolean', + 'description': 'Whether to use auto-rollback connections. Should *ALWAYS* be `true`' + }, + 'ssl': { + 'type': 'object', + 'description': 'SSL connection configuration', + 'properties': { + 'ca': { + 'type': 'string', + 'description': '*Absolute container path* to the CA certificate to use for SSL connections', + 'x-example': 'conf/stack/ssl-ca-cert.pem', + }, + }, + 'required': ['ca'], + }, + }, + 'required': ['threadlocals', 'autorollback'], + }, + 'ALLOW_PULLS_WITHOUT_STRICT_LOGGING': { + 'type': 'boolean', + 'description': 'If true, pulls in which the pull audit log entry cannot be written will ' + + 'still succeed. Useful if the database can fallback into a read-only state ' + + 'and it is desired for pulls to continue during that time. Defaults to False.', + 'x-example': True, + }, + + # Storage. + 'FEATURE_STORAGE_REPLICATION': { + 'type': 'boolean', + 'description': 'Whether to automatically replicate between storage engines. Defaults to False', + 'x-example': False, + }, + 'FEATURE_PROXY_STORAGE': { + 'type': 'boolean', + 'description': 'Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False', + 'x-example': False, + }, + 'MAXIMUM_LAYER_SIZE': { + 'type': 'string', + 'description': 'Maximum allowed size of an image layer. Defaults to 20G', + 'x-example': '100G', + 'pattern': '^[0-9]+(G|M)$', + }, + 'DISTRIBUTED_STORAGE_CONFIG': { + 'type': 'object', + 'description': 'Configuration for storage engine(s) to use in Quay. Each key is a unique ID' + + ' for a storage engine, with the value being a tuple of the type and ' + + ' configuration for that engine.', + 'x-example': { + 'local_storage': ['LocalStorage', {'storage_path': 'some/path/'}], + }, + 'items': { + 'type': 'array', + }, + }, + 'DISTRIBUTED_STORAGE_PREFERENCE': { + 'type': 'array', + 'description': 'The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to ' + + 'use. A preferred engine means it is first checked for pullig and images are ' + + 'pushed to it.', + 'items': { + 'type': 'string', + 'uniqueItems': True, + }, + 'x-example': ['s3_us_east', 's3_us_west'], + }, + 'DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS': { + 'type': 'array', + 'description': 'The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose ' + + 'images should be fully replicated, by default, to all other storage engines.', + 'items': { + 'type': 'string', + 'uniqueItems': True, + }, + 'x-example': ['s3_us_east', 's3_us_west'], + }, + 'USERFILES_LOCATION': { + 'type': 'string', + 'description': 'ID of the storage engine in which to place user-uploaded files', + 'x-example': 's3_us_east', + }, + 'USERFILES_PATH': { + 'type': 'string', + 'description': 'Path under storage in which to place user-uploaded files', + 'x-example': 'userfiles', + }, + 'ACTION_LOG_ARCHIVE_LOCATION': { + 'type': 'string', + 'description': 'If action log archiving is enabled, the storage engine in which to place the ' + + 'archived data.', + 'x-example': 's3_us_east', + }, + 'ACTION_LOG_ARCHIVE_PATH': { + 'type': 'string', + 'description': 'If action log archiving is enabled, the path in storage in which to place the ' + + 'archived data.', + 'x-example': 'archives/actionlogs', + }, + 'LOG_ARCHIVE_LOCATION': { + 'type': 'string', + 'description': 'If builds are enabled, the storage engine in which to place the ' + + 'archived build logs.', + 'x-example': 's3_us_east', + }, + 'LOG_ARCHIVE_PATH': { + 'type': 'string', + 'description': 'If builds are enabled, the path in storage in which to place the ' + + 'archived build logs.', + 'x-example': 'archives/buildlogs', + }, + + # Authentication. + 'AUTHENTICATION_TYPE': { + 'type': 'string', + 'description': 'The authentication engine to use for credential authentication.', + 'x-example': 'Database', + 'enum': ['Database', 'LDAP', 'JWT', 'Keystone', 'OIDC'], + }, + 'SUPER_USERS': { + 'type': 'array', + 'description': 'Quay usernames of those users to be granted superuser privileges', + 'uniqueItems': True, + 'items': { + 'type': 'string', + }, + }, + 'DIRECT_OAUTH_CLIENTID_WHITELIST': { + 'type': 'array', + 'description': 'A list of client IDs of *Quay-managed* applications that are allowed ' + + 'to perform direct OAuth approval without user approval.', + 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html', + 'uniqueItems': True, + 'items': { + 'type': 'string', + }, + }, + + # Redis. + 'BUILDLOGS_REDIS': { + 'type': 'object', + 'description': 'Connection information for Redis for build logs caching', + 'required': ['host'], + 'properties': { + 'host': { + 'type': 'string', + 'description': 'The hostname at which Redis is accessible', + 'x-example': 'my.redis.cluster', + }, + 'port': { + 'type': 'number', + 'description': 'The port at which Redis is accessible', + 'x-example': 1234, + }, + 'password': { + 'type': 'string', + 'description': 'The password to connect to the Redis instance', + 'x-example': 'mypassword', + }, + }, + }, + 'USER_EVENTS_REDIS': { + 'type': 'object', + 'description': 'Connection information for Redis for user event handling', + 'required': ['host'], + 'properties': { + 'host': { + 'type': 'string', + 'description': 'The hostname at which Redis is accessible', + 'x-example': 'my.redis.cluster', + }, + 'port': { + 'type': 'number', + 'description': 'The port at which Redis is accessible', + 'x-example': 1234, + }, + 'password': { + 'type': 'string', + 'description': 'The password to connect to the Redis instance', + 'x-example': 'mypassword', + }, + }, + }, + + # OAuth configuration. + 'GITHUB_LOGIN_CONFIG': { + 'type': ['object', 'null'], + 'description': 'Configuration for using GitHub (Enterprise) as an external login provider', + 'required': ['CLIENT_ID', 'CLIENT_SECRET'], + 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-auth.html', + 'properties': { + 'GITHUB_ENDPOINT': { + 'type': 'string', + 'description': 'The endpoint of the GitHub (Enterprise) being hit', + 'x-example': 'https://github.com/', + }, + 'API_ENDPOINT': { + 'type': 'string', + 'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com', + 'x-example': 'https://api.github.com/', + }, + 'CLIENT_ID': { + 'type': 'string', + 'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_TRIGGER_CONFIG', + 'x-example': '0e8dbe15c4c7630b6780', + 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html', + }, + 'CLIENT_SECRET': { + 'type': 'string', + 'description': 'The registered client secret for this Quay instance', + 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', + 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html', + }, + 'ORG_RESTRICT': { + 'type': 'boolean', + 'description': 'If true, only users within the organization whitelist can login using this provider', + 'x-example': True, + }, + 'ALLOWED_ORGANIZATIONS': { + 'type': 'array', + 'description': 'The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option', + 'uniqueItems': True, + 'items': { + 'type': 'string', + }, + }, + }, + }, + 'BITBUCKET_TRIGGER_CONFIG': { + 'type': ['object', 'null'], + 'description': 'Configuration for using BitBucket for build triggers', + 'required': ['CONSUMER_KEY', 'CONSUMER_SECRET'], + 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bitbucket-build.html', + 'properties': { + 'CONSUMER_KEY': { + 'type': 'string', + 'description': 'The registered consumer key (client ID) for this Quay instance', + 'x-example': '0e8dbe15c4c7630b6780', + }, + 'CONSUMER_SECRET': { + 'type': 'string', + 'description': 'The registered consumer secret (client secret) for this Quay instance', + 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', + }, + }, + }, + 'GITHUB_TRIGGER_CONFIG': { + 'type': ['object', 'null'], + 'description': 'Configuration for using GitHub (Enterprise) for build triggers', + 'required': ['GITHUB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'], + 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-build.html', + 'properties': { + 'GITHUB_ENDPOINT': { + 'type': 'string', + 'description': 'The endpoint of the GitHub (Enterprise) being hit', + 'x-example': 'https://github.com/', + }, + 'API_ENDPOINT': { + 'type': 'string', + 'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com', + 'x-example': 'https://api.github.com/', + }, + 'CLIENT_ID': { + 'type': 'string', + 'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_LOGIN_CONFIG', + 'x-example': '0e8dbe15c4c7630b6780', + 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html', + }, + 'CLIENT_SECRET': { + 'type': 'string', + 'description': 'The registered client secret for this Quay instance', + 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', + 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html', + }, + }, + }, + 'GOOGLE_LOGIN_CONFIG': { + 'type': ['object', 'null'], + 'description': 'Configuration for using Google for external authentication', + 'required': ['CLIENT_ID', 'CLIENT_SECRET'], + 'properties': { + 'CLIENT_ID': { + 'type': 'string', + 'description': 'The registered client ID for this Quay instance', + 'x-example': '0e8dbe15c4c7630b6780', + }, + 'CLIENT_SECRET': { + 'type': 'string', + 'description': 'The registered client secret for this Quay instance', + 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', + }, + }, + }, + 'GITLAB_TRIGGER_CONFIG': { + 'type': ['object', 'null'], + 'description': 'Configuration for using Gitlab (Enterprise) for external authentication', + 'required': ['GITLAB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'], + 'properties': { + 'GITLAB_ENDPOINT': { + 'type': 'string', + 'description': 'The endpoint at which Gitlab(Enterprise) is running', + 'x-example': 'https://gitlab.com', + }, + 'CLIENT_ID': { + 'type': 'string', + 'description': 'The registered client ID for this Quay instance', + 'x-example': '0e8dbe15c4c7630b6780', + }, + 'CLIENT_SECRET': { + 'type': 'string', + 'description': 'The registered client secret for this Quay instance', + 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', + }, + }, + }, + + # Health. + 'HEALTH_CHECKER': { + 'description': 'The configured health check.', + 'x-example': ('RDSAwareHealthCheck', {'access_key': 'foo', 'secret_key': 'bar'}), + }, + + # Metrics. + 'PROMETHEUS_NAMESPACE': { + 'type': 'string', + 'description': 'The prefix applied to all exposed Prometheus metrics. Defaults to `quay`', + 'x-example': 'myregistry', + }, + + # Misc configuration. + 'BLACKLIST_V2_SPEC': { + 'type': 'string', + 'description': 'The Docker CLI versions to which Quay will respond that V2 is *unsupported*. Defaults to `<1.6.0`', + 'x-reference': 'http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec', + 'x-example': '<1.8.0', + }, + 'USER_RECOVERY_TOKEN_LIFETIME': { + 'type': 'string', + 'description': 'The length of time a token for recovering a user accounts is valid. Defaults to 30m.', + 'x-example': '10m', + 'pattern': '^[0-9]+(w|m|d|h|s)$', + }, + 'SESSION_COOKIE_SECURE': { + 'type': 'boolean', + 'description': 'Whether the `secure` property should be set on session cookies. ' + + 'Defaults to False. Recommended to be True for all installations using SSL.', + 'x-example': True, + 'x-reference': 'https://en.wikipedia.org/wiki/Secure_cookies', + }, + 'PUBLIC_NAMESPACES': { + 'type': 'array', + 'description': 'If a namespace is defined in the public namespace list, then it will appear on *all*' + + ' user\'s repository list pages, regardless of whether that user is a member of the namespace.' + + ' Typically, this is used by an enterprise customer in configuring a set of "well-known"' + + ' namespaces.', + 'uniqueItems': True, + 'items': { + 'type': 'string', + }, + }, + 'AVATAR_KIND': { + 'type': 'string', + 'description': 'The types of avatars to display, either generated inline (local) or Gravatar (gravatar)', + 'enum': ['local', 'gravatar'], + }, + 'V2_PAGINATION_SIZE': { + 'type': 'number', + 'description': 'The number of results returned per page in V2 registry APIs', + 'x-example': 100, + }, + 'ENABLE_HEALTH_DEBUG_SECRET': { + 'type': ['string', 'null'], + 'description': 'If specified, a secret that can be given to health endpoints to see full debug info when' + + 'not authenticated as a superuser', + 'x-example': 'somesecrethere', + }, + 'BROWSER_API_CALLS_XHR_ONLY': { + 'type': 'boolean', + 'description': 'If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True.', + 'x-example': False, + }, + + # Time machine and tag expiration settings. + 'FEATURE_CHANGE_TAG_EXPIRATION': { + 'type': 'boolean', + 'description': 'Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True.', + 'x-example': False, + }, + 'DEFAULT_TAG_EXPIRATION': { + 'type': 'string', + 'description': 'The default, configurable tag expiration time for time machine. Defaults to `2w`.', + 'pattern': '^[0-9]+(w|m|d|h|s)$', + }, + 'TAG_EXPIRATION_OPTIONS': { + 'type': 'array', + 'description': 'The options that users can select for expiration of tags in their namespace (if enabled)', + 'items': { + 'type': 'string', + 'pattern': '^[0-9]+(w|m|d|h|s)$', + }, + }, + + # Team syncing. + 'FEATURE_TEAM_SYNCING': { + 'type': 'boolean', + 'description': 'Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone)', + 'x-example': True, + }, + 'TEAM_RESYNC_STALE_TIME': { + 'type': 'string', + 'description': 'If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m)', + 'x-example': '2h', + 'pattern': '^[0-9]+(w|m|d|h|s)$', + }, + 'FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP': { + 'type': 'boolean', + 'description': 'If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False.', + 'x-example': True, + }, + + # Security scanning. + 'FEATURE_SECURITY_SCANNER': { + 'type': 'boolean', + 'description': 'Whether to turn of/off the security scanner. Defaults to False', + 'x-example': False, + 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/security-scanning.html', + }, + 'FEATURE_SECURITY_NOTIFICATIONS': { + 'type': 'boolean', + 'description': 'If the security scanner is enabled, whether to turn of/off security notificaitons. Defaults to False', + 'x-example': False, + }, + 'SECURITY_SCANNER_ENDPOINT' : { + 'type': 'string', + 'pattern': '^http(s)?://(.)+$', + 'description': 'The endpoint for the security scanner', + 'x-example': 'http://192.168.99.101:6060' , + }, + 'SECURITY_SCANNER_INDEXING_INTERVAL': { + 'type': 'number', + 'description': 'The number of seconds between indexing intervals in the security scanner. Defaults to 30.', + 'x-example': 30, + }, + + # Bittorrent support. + 'FEATURE_BITTORRENT': { + 'type': 'boolean', + 'description': 'Whether to allow using Bittorrent-based pulls. Defaults to False', + 'x-example': False, + 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bittorrent.html', + }, + 'BITTORRENT_PIECE_SIZE': { + 'type': 'number', + 'description': 'The bittorent piece size to use. If not specified, defaults to 512 * 1024.', + 'x-example': 512 * 1024, + }, + 'BITTORRENT_ANNOUNCE_URL': { + 'type': 'string', + 'pattern': '^http(s)?://(.)+$', + 'description': 'The URL of the announce endpoint on the bittorrent tracker', + 'x-example': 'https://localhost:6881/announce', + }, + + # Build + 'FEATURE_GITHUB_BUILD': { + 'type': 'boolean', + 'description': 'Whether to support GitHub build triggers. Defaults to False', + 'x-example': False, + }, + 'FEATURE_BITBUCKET_BUILD': { + 'type': 'boolean', + 'description': 'Whether to support Bitbucket build triggers. Defaults to False', + 'x-example': False, + }, + 'FEATURE_GITLAB_BUILD': { + 'type': 'boolean', + 'description': 'Whether to support GitLab build triggers. Defaults to False', + 'x-example': False, + }, + 'FEATURE_BUILD_SUPPORT': { + 'type': 'boolean', + 'description': 'Whether to support Dockerfile build. Defaults to True', + 'x-example': True, + }, + 'DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT': { + 'type': ['number', 'null'], + 'description': 'If not None, the default maximum number of builds that can be queued in a namespace.', + 'x-example': 20, + }, + 'SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD': { + 'type': ['number', 'null'], + 'description': 'If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled. Defaults to 5.', + 'x-example': 10, + }, + 'SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD': { + 'type': ['number', 'null'], + 'description': 'If not None, the number of successive failures that can occur before a build trigger is automatically disabled. Defaults to 100.', + 'x-example': 50, + }, + + # Login + 'FEATURE_GITHUB_LOGIN': { + 'type': 'boolean', + 'description': 'Whether GitHub login is supported. Defaults to False', + 'x-example': False, + }, + 'FEATURE_GOOGLE_LOGIN': { + 'type': 'boolean', + 'description': 'Whether Google login is supported. Defaults to False', + 'x-example': False, + }, + + # Recaptcha + 'FEATURE_RECAPTCHA': { + 'type': 'boolean', + 'description': 'Whether Recaptcha is necessary for user login and recovery. Defaults to False', + 'x-example': False, + 'x-reference': 'https://www.google.com/recaptcha/intro/', + }, + 'RECAPTCHA_SITE_KEY': { + 'type': ['string', 'null'], + 'description': 'If recaptcha is enabled, the site key for the Recaptcha service', + }, + 'RECAPTCHA_SECRET_KEY': { + 'type': ['string', 'null'], + 'description': 'If recaptcha is enabled, the secret key for the Recaptcha service', + }, + + # External application tokens. + 'FEATURE_APP_SPECIFIC_TOKENS': { + 'type': 'boolean', + 'description': 'If enabled, users can create tokens for use by the Docker CLI. Defaults to True', + 'x-example': False, + }, + + 'APP_SPECIFIC_TOKEN_EXPIRATION': { + 'type': ['string', 'null'], + 'description': 'The expiration for external app tokens. Defaults to None.', + 'pattern': '^[0-9]+(w|m|d|h|s)$', + }, + + 'EXPIRED_APP_SPECIFIC_TOKEN_GC': { + 'type': ['string', 'null'], + 'description': 'Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d.', + 'pattern': '^[0-9]+(w|m|d|h|s)$', + }, + + # Feature Flag: Permanent Sessions. + 'FEATURE_PERMANENT_SESSIONS': { + 'type': 'boolean', + 'description': 'Whether sessions are permanent. Defaults to True', + 'x-example': True, + }, + + # Feature Flag: Super User Support. + 'FEATURE_SUPER_USERS': { + 'type': 'boolean', + 'description': 'Whether super users are supported. Defaults to True', + 'x-example': True, + }, + + # Feature Flag: Anonymous Users. + 'FEATURE_ANONYMOUS_ACCESS': { + 'type': 'boolean', + 'description': ' Whether to allow anonymous users to browse and pull public repositories. Defaults to True', + 'x-example': True, + }, + + # Feature Flag: User Creation. + 'FEATURE_USER_CREATION': { + 'type': 'boolean', + 'description': 'Whether users can be created (by non-super users). Defaults to True', + 'x-example': True, + }, + + # Feature Flag: Invite Only User Creation. + 'FEATURE_INVITE_ONLY_USER_CREATION': { + 'type': 'boolean', + 'description': 'Whether users being created must be invited by another user. Defaults to False', + 'x-example': False, + }, + + # Feature Flag: Encrypted Basic Auth. + 'FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH': { + 'type': 'boolean', + 'description': 'Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False', + 'x-example': False, + }, + + # Feature Flag: Direct Login. + 'FEATURE_DIRECT_LOGIN': { + 'type': 'boolean', + 'description': 'Whether users can directly login to the UI. Defaults to True', + 'x-example': True, + }, + + # Feature Flag: Advertising V2. + 'FEATURE_ADVERTISE_V2': { + 'type': 'boolean', + 'description': 'Whether the v2/ endpoint is visible. Defaults to True', + 'x-example': True, + }, + + # Feature Flag: Log Rotation. + 'FEATURE_ACTION_LOG_ROTATION': { + 'type': 'boolean', + 'description': 'Whether or not to rotate old action logs to storage. Defaults to False', + 'x-example': False, + }, + + # Feature Flag: ACI Conversion. + 'FEATURE_ACI_CONVERSION': { + 'type': 'boolean', + 'description': 'Whether to enable conversion to ACIs. Defaults to False', + 'x-example': False, + }, + + # Feature Flag: Library Support. + 'FEATURE_LIBRARY_SUPPORT': { + 'type': 'boolean', + 'description': 'Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True', + 'x-example': True, + }, + + # Feature Flag: Require Team Invite. + 'FEATURE_REQUIRE_TEAM_INVITE': { + 'type': 'boolean', + 'description': 'Whether to require invitations when adding a user to a team. Defaults to True', + 'x-example': True, + }, + + # Feature Flag: Collecting and Supporting Metadata. + 'FEATURE_USER_METADATA': { + 'type': 'boolean', + 'description': 'Whether to collect and support user metadata. Defaults to False', + 'x-example': False, + }, + + # Feature Flag: Support App Registry. + 'FEATURE_APP_REGISTRY': { + 'type': 'boolean', + 'description': 'Whether to enable support for App repositories. Defaults to False', + 'x-example': False, + }, + + # Feature Flag: Public Reposiotires in _catalog Endpoint. + 'FEATURE_PUBLIC_CATALOG': { + 'type': 'boolean', + 'description': 'If set to true, the _catalog endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False', + 'x-example': False, + }, + + # Feature Flag: Reader Build Logs. + 'FEATURE_READER_BUILD_LOGS': { + 'type': 'boolean', + 'description': 'If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False', + 'x-example': False, + }, + + # Feature Flag: Usernames Autocomplete. + 'FEATURE_PARTIAL_USER_AUTOCOMPLETE': { + 'type': 'boolean', + 'description': 'If set to true, autocompletion will apply to partial usernames. Defaults to True', + 'x-example': True, + }, + + # Feature Flag: User log access. + 'FEATURE_USER_LOG_ACCESS': { + 'type': 'boolean', + 'description': 'If set to true, users will have access to audit logs for their namespace. Defaults to False', + 'x-example': True, + }, + + # Feature Flag: User renaming. + 'FEATURE_USER_RENAME': { + 'type': 'boolean', + 'description': 'If set to true, users can rename their own namespace. Defaults to False', + 'x-example': True, + }, + }, +} + diff --git a/config_app/config_util/config/testprovider.py b/config_app/config_util/config/testprovider.py new file mode 100644 index 000000000..87f0309c3 --- /dev/null +++ b/config_app/config_util/config/testprovider.py @@ -0,0 +1,81 @@ +import json +import io +import os +from datetime import datetime, timedelta + +from config_util.config.baseprovider import BaseProvider + +REAL_FILES = ['test/data/signing-private.gpg', 'test/data/signing-public.gpg', 'test/data/test.pem'] + + +class TestConfigProvider(BaseProvider): + """ Implementation of the config provider for testing. Everything is kept in-memory instead on + the real file system. """ + def __init__(self): + self.clear() + + def clear(self): + self.files = {} + self._config = {} + + @property + def provider_id(self): + return 'test' + + def update_app_config(self, app_config): + self._config = app_config + + def get_config(self): + if not 'config.yaml' in self.files: + return None + + return json.loads(self.files.get('config.yaml', '{}')) + + def save_config(self, config_obj): + self.files['config.yaml'] = json.dumps(config_obj) + + def config_exists(self): + return 'config.yaml' in self.files + + def volume_exists(self): + return True + + def volume_file_exists(self, filename): + if filename in REAL_FILES: + return True + + return filename in self.files + + def save_volume_file(self, filename, flask_file): + self.files[filename] = flask_file.read() + + def write_volume_file(self, filename, contents): + self.files[filename] = contents + + def get_volume_file(self, filename, mode='r'): + if filename in REAL_FILES: + return open(filename, mode=mode) + + return io.BytesIO(self.files[filename]) + + def remove_volume_file(self, filename): + self.files.pop(filename, None) + + def list_volume_directory(self, path): + paths = [] + for filename in self.files: + if filename.startswith(path): + paths.append(filename[len(path)+1:]) + + return paths + + def requires_restart(self, app_config): + return False + + def reset_for_test(self): + self._config['SUPER_USERS'] = ['devtable'] + self.files = {} + + def get_volume_path(self, directory, filename): + return os.path.join(directory, filename) + diff --git a/config_app/config_util/log.py b/config_app/config_util/log.py new file mode 100644 index 000000000..4a934953a --- /dev/null +++ b/config_app/config_util/log.py @@ -0,0 +1,47 @@ +import os +from _init_config import CONF_DIR + + +def logfile_path(jsonfmt=False, debug=False): + """ + Returns the a logfileconf path following this rules: + - conf/logging_debug_json.conf # jsonfmt=true, debug=true + - conf/logging_json.conf # jsonfmt=true, debug=false + - conf/logging_debug.conf # jsonfmt=false, debug=true + - conf/logging.conf # jsonfmt=false, debug=false + Can be parametrized via envvars: JSONLOG=true, DEBUGLOG=true + """ + _json = "" + _debug = "" + + if jsonfmt or os.getenv('JSONLOG', 'false').lower() == 'true': + _json = "_json" + + if debug or os.getenv('DEBUGLOG', 'false').lower() == 'true': + _debug = "_debug" + + return os.path.join(CONF_DIR, "logging%s%s.conf" % (_debug, _json)) + + +def filter_logs(values, filtered_fields): + """ + Takes a dict and a list of keys to filter. + eg: + with filtered_fields: + [{'key': ['k1', k2'], 'fn': lambda x: 'filtered'}] + and values: + {'k1': {'k2': 'some-secret'}, 'k3': 'some-value'} + the returned dict is: + {'k1': {k2: 'filtered'}, 'k3': 'some-value'} + """ + for field in filtered_fields: + cdict = values + + for key in field['key'][:-1]: + if key in cdict: + cdict = cdict[key] + + last_key = field['key'][-1] + + if last_key in cdict and cdict[last_key]: + cdict[last_key] = field['fn'](cdict[last_key]) diff --git a/config_app/config_util/ssl.py b/config_app/config_util/ssl.py new file mode 100644 index 000000000..f14d2c04e --- /dev/null +++ b/config_app/config_util/ssl.py @@ -0,0 +1,81 @@ +from fnmatch import fnmatch + +import OpenSSL + +class CertInvalidException(Exception): + """ Exception raised when a certificate could not be parsed/loaded. """ + pass + +class KeyInvalidException(Exception): + """ Exception raised when a key could not be parsed/loaded or successfully applied to a cert. """ + pass + + +def load_certificate(cert_contents): + """ Loads the certificate from the given contents and returns it or raises a CertInvalidException + on failure. + """ + try: + cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_contents) + return SSLCertificate(cert) + except OpenSSL.crypto.Error as ex: + raise CertInvalidException(ex.message[0][2]) + + +_SUBJECT_ALT_NAME = 'subjectAltName' + +class SSLCertificate(object): + """ Helper class for easier working with SSL certificates. """ + def __init__(self, openssl_cert): + self.openssl_cert = openssl_cert + + def validate_private_key(self, private_key_path): + """ Validates that the private key found at the given file path applies to this certificate. + Raises a KeyInvalidException on failure. + """ + context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD) + context.use_certificate(self.openssl_cert) + + try: + context.use_privatekey_file(private_key_path) + context.check_privatekey() + except OpenSSL.SSL.Error as ex: + raise KeyInvalidException(ex.message[0][2]) + + def matches_name(self, check_name): + """ Returns true if this SSL certificate matches the given DNS hostname. """ + for dns_name in self.names: + if fnmatch(check_name, dns_name): + return True + + return False + + @property + def expired(self): + """ Returns whether the SSL certificate has expired. """ + return self.openssl_cert.has_expired() + + @property + def common_name(self): + """ Returns the defined common name for the certificate, if any. """ + return self.openssl_cert.get_subject().commonName + + @property + def names(self): + """ Returns all the DNS named to which the certificate applies. May be empty. """ + dns_names = set() + common_name = self.common_name + if common_name is not None: + dns_names.add(common_name) + + # Find the DNS extension, if any. + for i in range(0, self.openssl_cert.get_extension_count()): + ext = self.openssl_cert.get_extension(i) + if ext.get_short_name() == _SUBJECT_ALT_NAME: + value = str(ext) + for san_name in value.split(','): + san_name_trimmed = san_name.strip() + if san_name_trimmed.startswith('DNS:'): + dns_names.add(san_name_trimmed[4:]) + + return dns_names diff --git a/config_app/config_util/workers.py b/config_app/config_util/workers.py new file mode 100644 index 000000000..f86f1d1bf --- /dev/null +++ b/config_app/config_util/workers.py @@ -0,0 +1,32 @@ +import os +import psutil + + +def get_worker_count(worker_kind_name, multiplier, minimum=None, maximum=None): + """ Returns the number of gunicorn workers to run for the given worker kind, + based on a combination of environment variable, multiplier, minimum (if any), + and number of accessible CPU cores. + """ + minimum = minimum or multiplier + maximum = maximum or (multiplier * multiplier) + + # Check for an override via an environment variable. + override_value = os.environ.get('WORKER_COUNT_' + worker_kind_name.upper()) + if override_value is not None: + return max(override_value, minimum) + + override_value = os.environ.get('WORKER_COUNT') + if override_value is not None: + return max(override_value, minimum) + + # Load the number of CPU cores via affinity, and use that to calculate the + # number of workers to run. + p = psutil.Process(os.getpid()) + + try: + cpu_count = len(p.cpu_affinity()) + except AttributeError: + # cpu_affinity isn't supported on this platform. Assume 2. + cpu_count = 2 + + return min(max(cpu_count * multiplier, minimum), maximum) diff --git a/config_app/web.py b/config_app/config_web.py similarity index 64% rename from config_app/web.py rename to config_app/config_web.py index 967a4a11a..29339541f 100644 --- a/config_app/web.py +++ b/config_app/config_web.py @@ -1,8 +1,8 @@ -from app import app as application -from config_endpoints.setup_web import setup_web +from config_app import app as application from config_endpoints.api import api_bp +from config_endpoints.setup_web import setup_web -# application.register_blueprint(setup_web) +application.register_blueprint(setup_web) application.register_blueprint(api_bp, url_prefix='/api') diff --git a/config_app/js/config-app.module.ts b/config_app/js/config-app.module.ts index 59fe1bf20..3f9439c42 100644 --- a/config_app/js/config-app.module.ts +++ b/config_app/js/config-app.module.ts @@ -32,8 +32,6 @@ function provideConfig($provide: ng.auto.IProvideService, // Configure the API provider. RestangularProvider.setBaseUrl('/api/v1/'); - - console.log('i'); } diff --git a/config_app/js/core-config-setup/core-config-setup.js b/config_app/js/core-config-setup/core-config-setup.js index d069ab186..d7f91d9bf 100644 --- a/config_app/js/core-config-setup/core-config-setup.js +++ b/config_app/js/core-config-setup/core-config-setup.js @@ -29,8 +29,6 @@ angular.module("quay-config") 'configurationSaved': '&configurationSaved' }, controller: function($rootScope, $scope, $element, $timeout, ApiService) { - console.log('in the controller of the configSetupTool') - var authPassword = null; $scope.HOSTNAME_REGEX = '^[a-zA-Z-0-9\.]+(:[0-9]+)?$'; @@ -1415,11 +1413,11 @@ angular.module("quay-config") }); }; - UserService.updateUserIn($scope, function(user) { - if (!user.anonymous) { - loadCertificates(); - } - }); + // UserService.updateUserIn($scope, function(user) { + // console.log(user) + // no need to check for user, since it's all local + loadCertificates(); + // }); $scope.handleCertsSelected = function(files, callback) { $scope.certsUploading = true; diff --git a/config_app/js/main.ts b/config_app/js/main.ts index 9ca931a8d..b2e2d9b74 100644 --- a/config_app/js/main.ts +++ b/config_app/js/main.ts @@ -14,11 +14,8 @@ require('../../static/js/tar'); const ng1QuayModule: string = bundle(ConfigAppModule, []).name; angular.module('quay-config', [ng1QuayModule]) .run(() => { - console.log(' init run was called') }); -console.log('Hello world! I\'m the config app'); - declare var require: any; function requireAll(r) { r.keys().forEach(r); diff --git a/config_app/js/services/api-service.js b/config_app/js/services/api-service.js index eaa16b746..ace9cce64 100644 --- a/config_app/js/services/api-service.js +++ b/config_app/js/services/api-service.js @@ -325,7 +325,6 @@ angular.module('quay-config').factory('ApiService', ['Restangular', '$q', 'UtilS }; // todo: remove hacks - apiService.scGetConfig = () => new Promise(() => { hello: true }); apiService.scRegistryStatus = () => new Promise(() => { hello: true }); return apiService; diff --git a/config_app/js/services/user-service.js b/config_app/js/services/user-service.js index 918ad9adb..3203e9185 100644 --- a/config_app/js/services/user-service.js +++ b/config_app/js/services/user-service.js @@ -182,30 +182,6 @@ function(ApiService, CookieService, $rootScope, Config, $location, $timeout) { return externalUsername || userResponse.username; }; - userService.deleteNamespace = function(info, callback) { - var namespace = info.user ? info.user.username : info.organization.name; - if (!namespace) { - return; - } - - var errorDisplay = ApiService.errorDisplay('Could not delete namespace', callback); - var cb = function(resp) { - userService.load(function(currentUser) { - callback(true); - $location.path('/'); - }); - } - - if (info.user) { - ApiService.deleteCurrentUser().then(cb, errorDisplay) - } else { - var delParams = { - 'orgname': info.organization.name - }; - ApiService.deleteAdminedOrganization(null, delParams).then(cb, errorDisplay); - } - }; - userService.currentUser = function() { return userResponse; }; diff --git a/config_app/loghandler_config.py b/config_app/loghandler_config.py new file mode 100755 index 000000000..d3d9948cb --- /dev/null +++ b/config_app/loghandler_config.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +import datetime +import json +import logging +import re +import traceback + + +LOG_FORMAT_REGEXP = re.compile(r'\((.+?)\)', re.IGNORECASE) + + +def _json_default(obj): + """ + Coerce everything to strings. + All objects representing time get output as ISO8601. + """ + if isinstance(obj, (datetime.date, datetime.time, datetime.datetime)): + return obj.isoformat() + + elif isinstance(obj, Exception): + return "Exception: %s" % str(obj) + + return str(obj) + + +# skip natural LogRecord attributes +# http://docs.python.org/library/logging.html#logrecord-attributes +RESERVED_ATTRS = set([ + 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename', 'funcName', 'levelname', + 'levelno', 'lineno', 'module', 'msecs', 'message', 'msg', 'name', 'pathname', 'process', + 'processName', 'relativeCreated', 'stack_info', 'thread', 'threadName' +]) + + +class JsonFormatter(logging.Formatter): + """ + A custom formatter to format logging records as json strings. + extra values will be formatted as str() if nor supported by + json default encoder + """ + + def __init__(self, *args, **kwargs): + """ + :param json_default: a function for encoding non-standard objects + as outlined in http://docs.python.org/2/library/json.html + :param json_encoder: optional custom encoder + :param json_serializer: a :meth:`json.dumps`-compatible callable + that will be used to serialize the log record. + :param prefix: an optional key prefix to nest logs + """ + self.json_default = kwargs.pop("json_default", _json_default) + self.json_encoder = kwargs.pop("json_encoder", None) + self.json_serializer = kwargs.pop("json_serializer", json.dumps) + self.default_values = kwargs.pop("default_extra", {}) + self.prefix_key = kwargs.pop("prefix_key", "data") + + logging.Formatter.__init__(self, *args, **kwargs) + + self._fmt_parameters = self._parse_format_string() + self._skip_fields = set(self._fmt_parameters) + self._skip_fields.update(RESERVED_ATTRS) + + def _parse_format_string(self): + """Parses format string looking for substitutions""" + standard_formatters = LOG_FORMAT_REGEXP + return standard_formatters.findall(self._fmt) + + def add_fields(self, log_record, record, message_dict): + """ + Override this method to implement custom logic for adding fields. + """ + + target = log_record + if self.prefix_key: + log_record[self.prefix_key] = {} + target = log_record[self.prefix_key] + + for field, value in record.__dict__.iteritems(): + if field in self._fmt_parameters and field in RESERVED_ATTRS: + log_record[field] = value + elif field not in RESERVED_ATTRS: + target[field] = value + + target.update(message_dict) + target.update(self.default_values) + + def format(self, record): + """Formats a log record and serializes to json""" + message_dict = {} + if isinstance(record.msg, dict): + message_dict = record.msg + record.message = None + if "message" in message_dict: + record.message = message_dict.pop("message", "") + else: + record.message = record.getMessage() + + # only format time if needed + if "asctime" in self._fmt_parameters: + record.asctime = self.formatTime(record, self.datefmt) + + # Display formatted exception, but allow overriding it in the + # user-supplied dict. + if record.exc_info and not message_dict.get('exc_info'): + message_dict['exc_info'] = traceback.format_list(traceback.extract_tb(record.exc_info[2])) + log_record = {} + + self.add_fields(log_record, record, message_dict) + + return self.json_serializer(log_record, default=self.json_default, cls=self.json_encoder) diff --git a/config_app/util/config.py b/config_app/util/config.py deleted file mode 100644 index f01e2565d..000000000 --- a/config_app/util/config.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -from util.config.provider import TestConfigProvider, KubernetesConfigProvider, FileConfigProvider - -ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) -CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/")) -OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/') - - -def get_config_provider(config_volume, yaml_filename, py_filename, testing=False, kubernetes=False): - """ Loads and returns the config provider for the current environment. """ - if testing: - return TestConfigProvider() - - if kubernetes: - return KubernetesConfigProvider(config_volume, yaml_filename, py_filename) - - return FileConfigProvider(config_volume, yaml_filename, py_filename) - - -config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', - testing=False, kubernetes=False) \ No newline at end of file From 841053f878e0978a9c357f914e378fa7e42b5d37 Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Wed, 23 May 2018 16:57:26 -0400 Subject: [PATCH 07/14] Change import paths to be absolute, change pythonpath for config app --- config_app/Procfile | 2 +- config_app/{config_app.py => c_app.py} | 6 +- config_app/conf/gunicorn_local.py | 4 +- config_app/config_application.py | 2 +- config_app/config_endpoints/api/__init__.py | 14 +- config_app/config_endpoints/api/discovery.py | 6 +- config_app/config_endpoints/api/suconfig.py | 4 +- config_app/config_endpoints/api/superuser.py | 101 ++-- .../api/superuser_models_interface.py | 448 ++++++++++++++++++ .../api/superuser_models_pre_oci.py | 274 +++++++++++ config_app/config_endpoints/api/user.py | 2 +- config_app/config_endpoints/setup_web.py | 4 +- config_app/config_util/config/__init__.py | 6 +- .../config_util/config/basefileprovider.py | 2 +- config_app/config_util/config/baseprovider.py | 2 +- config_app/config_util/config/fileprovider.py | 4 +- config_app/config_util/config/k8sprovider.py | 4 +- config_app/config_util/config/testprovider.py | 2 +- config_app/config_web.py | 6 +- 19 files changed, 814 insertions(+), 79 deletions(-) rename config_app/{config_app.py => c_app.py} (81%) create mode 100644 config_app/config_endpoints/api/superuser_models_interface.py create mode 100644 config_app/config_endpoints/api/superuser_models_pre_oci.py diff --git a/config_app/Procfile b/config_app/Procfile index ac20de89b..22dd965c1 100644 --- a/config_app/Procfile +++ b/config_app/Procfile @@ -1,3 +1,3 @@ -app: PYTHONPATH="./" gunicorn -c conf/gunicorn_local.py config_application:application +app: PYTHONPATH="../" gunicorn -c conf/gunicorn_local.py config_application:application # webpack: npm run watch-config-app diff --git a/config_app/config_app.py b/config_app/c_app.py similarity index 81% rename from config_app/config_app.py rename to config_app/c_app.py index 616ecc157..4fbf1d1a2 100644 --- a/config_app/config_app.py +++ b/config_app/c_app.py @@ -2,7 +2,7 @@ import os import logging from flask import Flask from _init_config import CONF_DIR -from config_util.config import get_config_provider +from config_app.config_util.config import get_config_provider app = Flask(__name__) @@ -18,11 +18,11 @@ config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', testing=is_testing, kubernetes=is_kubernetes) if is_testing: - from config_test.testconfig import TestConfig + from config_app.config_test.testconfig import TestConfig logger.debug('Loading test config.') app.config.from_object(TestConfig()) else: - from config_app_config import DefaultConfig + from config_app.config_app_config import DefaultConfig logger.debug('Loading default config.') app.config.from_object(DefaultConfig()) # app.teardown_request(database.close_db_filter) diff --git a/config_app/conf/gunicorn_local.py b/config_app/conf/gunicorn_local.py index 96cd7d19c..7fcd83a9c 100644 --- a/config_app/conf/gunicorn_local.py +++ b/config_app/conf/gunicorn_local.py @@ -5,8 +5,8 @@ sys.path.append(os.path.join(os.path.dirname(__file__), "../")) import logging from Crypto import Random -from config_util.log import logfile_path -from config_util.workers import get_worker_count +from config_app.config_util.log import logfile_path +from config_app.config_util.workers import get_worker_count logconfig = logfile_path(debug=True) diff --git a/config_app/config_application.py b/config_app/config_application.py index b408a7984..5c8835b66 100644 --- a/config_app/config_application.py +++ b/config_app/config_application.py @@ -1,4 +1,4 @@ -from config_app import app as application +from config_app.c_app import app as application # Bind all of the blueprints import config_web diff --git a/config_app/config_endpoints/api/__init__.py b/config_app/config_endpoints/api/__init__.py index e220e27ed..b5df7b405 100644 --- a/config_app/config_endpoints/api/__init__.py +++ b/config_app/config_endpoints/api/__init__.py @@ -3,10 +3,13 @@ import logging from flask import Blueprint from flask_restful import Resource, Api from flask_restful.utils.cors import crossdomain -from config_app import app +from email.utils import formatdate +from calendar import timegm from functools import partial, wraps from jsonschema import validate, ValidationError -from config_endpoints.exception import InvalidResponse + +from config_app.c_app import app +from config_app.config_endpoints.exception import InvalidResponse logger = logging.getLogger(__name__) api_bp = Blueprint('api', __name__) @@ -28,6 +31,13 @@ api = ApiExceptionHandlingApi() api.init_app(api_bp) +def format_date(date): + """ Output an RFC822 date format. """ + if date is None: + return None + return formatdate(timegm(date.utctimetuple())) + + def verify_not_prod(func): @add_method_metadata('enterprise_only', True) @wraps(func) diff --git a/config_app/config_endpoints/api/discovery.py b/config_app/config_endpoints/api/discovery.py index dda178c62..70246847c 100644 --- a/config_app/config_endpoints/api/discovery.py +++ b/config_app/config_endpoints/api/discovery.py @@ -2,9 +2,9 @@ import logging import sys from collections import OrderedDict -from config_app import app -from config_endpoints.api import method_metadata -from config_endpoints.common import fully_qualified_name, PARAM_REGEX, TYPE_CONVERTER +from config_app.c_app import app +from config_app.config_endpoints.api import method_metadata +from config_app.config_endpoints.common import fully_qualified_name, PARAM_REGEX, TYPE_CONVERTER logger = logging.getLogger(__name__) diff --git a/config_app/config_endpoints/api/suconfig.py b/config_app/config_endpoints/api/suconfig.py index 02a3cb2d4..bc17ce3af 100644 --- a/config_app/config_endpoints/api/suconfig.py +++ b/config_app/config_endpoints/api/suconfig.py @@ -1,7 +1,7 @@ import logging -from config_endpoints.api import resource, ApiResource, verify_not_prod, nickname -from config_app import app, config_provider +from config_app.config_endpoints.api import resource, ApiResource, verify_not_prod, nickname +from config_app.c_app import app, config_provider logger = logging.getLogger(__name__) diff --git a/config_app/config_endpoints/api/superuser.py b/config_app/config_endpoints/api/superuser.py index 227257a56..5cb26fc9d 100644 --- a/config_app/config_endpoints/api/superuser.py +++ b/config_app/config_endpoints/api/superuser.py @@ -1,12 +1,14 @@ import os import logging import pathvalidate -from flask import request +from flask import request, jsonify -from config_endpoints.exception import InvalidRequest -from config_endpoints.api import resource, ApiResource, verify_not_prod, nickname -from config_util.ssl import load_certificate, CertInvalidException -from config_app import app, config_provider +from config_app.config_endpoints.exception import InvalidRequest +from config_app.config_endpoints.api import resource, ApiResource, verify_not_prod, nickname +from config_app.config_util.ssl import load_certificate, CertInvalidException +from config_app.c_app import app, config_provider + +from config_app.config_endpoints.api.superuser_models_pre_oci import pre_oci_model logger = logging.getLogger(__name__) EXTRA_CA_DIRECTORY = 'extra_ca_certs' @@ -104,48 +106,49 @@ class SuperUserCustomCertificates(ApiResource): 'certs': cert_views, } -# TODO(config) port this endpoint when (https://github.com/quay/quay/pull/3055) merged to ensure no conflicts -# @resource('/v1/superuser/keys') -# class SuperUserServiceKeyManagement(ApiResource): -# """ Resource for managing service keys.""" -# schemas = { -# 'CreateServiceKey': { -# 'id': 'CreateServiceKey', -# 'type': 'object', -# 'description': 'Description of creation of a service key', -# 'required': ['service', 'expiration'], -# 'properties': { -# 'service': { -# 'type': 'string', -# 'description': 'The service authenticating with this key', -# }, -# 'name': { -# 'type': 'string', -# 'description': 'The friendly name of a service key', -# }, -# 'metadata': { -# 'type': 'object', -# 'description': 'The key/value pairs of this key\'s metadata', -# }, -# 'notes': { -# 'type': 'string', -# 'description': 'If specified, the extra notes for the key', -# }, -# 'expiration': { -# 'description': 'The expiration date as a unix timestamp', -# 'anyOf': [{'type': 'number'}, {'type': 'null'}], -# }, -# }, -# }, -# } -# -# @verify_not_prod -# @nickname('listServiceKeys') -# def get(self): -# keys = pre_oci_model.list_all_service_keys() -# -# return jsonify({ -# 'keys': [key.to_dict() for key in keys], -# }) -# + +# TODO(config) port this endpoint when (https://github.com/quay/quay/pull/3055) merged to ensure no conflicts +@resource('/v1/superuser/keys') +class SuperUserServiceKeyManagement(ApiResource): + """ Resource for managing service keys.""" + schemas = { + 'CreateServiceKey': { + 'id': 'CreateServiceKey', + 'type': 'object', + 'description': 'Description of creation of a service key', + 'required': ['service', 'expiration'], + 'properties': { + 'service': { + 'type': 'string', + 'description': 'The service authenticating with this key', + }, + 'name': { + 'type': 'string', + 'description': 'The friendly name of a service key', + }, + 'metadata': { + 'type': 'object', + 'description': 'The key/value pairs of this key\'s metadata', + }, + 'notes': { + 'type': 'string', + 'description': 'If specified, the extra notes for the key', + }, + 'expiration': { + 'description': 'The expiration date as a unix timestamp', + 'anyOf': [{'type': 'number'}, {'type': 'null'}], + }, + }, + }, + } + + @verify_not_prod + @nickname('listServiceKeys') + def get(self): + keys = pre_oci_model.list_all_service_keys() + + return jsonify({ + 'keys': [key.to_dict() for key in keys], + }) + diff --git a/config_app/config_endpoints/api/superuser_models_interface.py b/config_app/config_endpoints/api/superuser_models_interface.py new file mode 100644 index 000000000..23d672f2b --- /dev/null +++ b/config_app/config_endpoints/api/superuser_models_interface.py @@ -0,0 +1,448 @@ +import json +from abc import ABCMeta, abstractmethod +from collections import namedtuple +from datetime import datetime + +from dateutil.relativedelta import relativedelta +from six import add_metaclass +from tzlocal import get_localzone + +# from app import avatar, superusers +# from buildtrigger.basehandler import BuildTriggerHandler +from data import model +from config_app.config_endpoints.api import format_date +from util.morecollections import AttrDict + + +def user_view(user): + return { + 'name': user.username, + 'kind': 'user', + 'is_robot': user.robot, + } + + +# class BuildTrigger( +# namedtuple('BuildTrigger', ['uuid', 'service_name', 'pull_robot', 'can_read', 'can_admin', 'for_build'])): +# """ +# BuildTrigger represent a trigger that is associated with a build +# :type uuid: string +# :type service_name: string +# :type pull_robot: User +# :type can_read: boolean +# :type can_admin: boolean +# :type for_build: boolean +# """ +# +# def to_dict(self): +# if not self.uuid: +# return None +# +# build_trigger = BuildTriggerHandler.get_handler(self) +# build_source = build_trigger.config.get('build_source') +# +# repo_url = build_trigger.get_repository_url() if build_source else None +# can_read = self.can_read or self.can_admin +# +# trigger_data = { +# 'id': self.uuid, +# 'service': self.service_name, +# 'is_active': build_trigger.is_active(), +# +# 'build_source': build_source if can_read else None, +# 'repository_url': repo_url if can_read else None, +# +# 'config': build_trigger.config if self.can_admin else {}, +# 'can_invoke': self.can_admin, +# } +# +# if not self.for_build and self.can_admin and self.pull_robot: +# trigger_data['pull_robot'] = user_view(self.pull_robot) +# +# return trigger_data + + +class RepositoryBuild(namedtuple('RepositoryBuild', + ['uuid', 'logs_archived', 'repository_namespace_user_username', 'repository_name', + 'can_write', 'can_read', 'pull_robot', 'resource_key', 'trigger', 'display_name', + 'started', 'job_config', 'phase', 'status', 'error', 'archive_url'])): + """ + RepositoryBuild represents a build associated with a repostiory + :type uuid: string + :type logs_archived: boolean + :type repository_namespace_user_username: string + :type repository_name: string + :type can_write: boolean + :type can_write: boolean + :type pull_robot: User + :type resource_key: string + :type trigger: Trigger + :type display_name: string + :type started: boolean + :type job_config: {Any -> Any} + :type phase: string + :type status: string + :type error: string + :type archive_url: string + """ + + def to_dict(self): + + resp = { + 'id': self.uuid, + 'phase': self.phase, + 'started': format_date(self.started), + 'display_name': self.display_name, + 'status': self.status or {}, + 'subdirectory': self.job_config.get('build_subdir', ''), + 'dockerfile_path': self.job_config.get('build_subdir', ''), + 'context': self.job_config.get('context', ''), + 'tags': self.job_config.get('docker_tags', []), + 'manual_user': self.job_config.get('manual_user', None), + 'is_writer': self.can_write, + 'trigger': self.trigger.to_dict(), + 'trigger_metadata': self.job_config.get('trigger_metadata', None) if self.can_read else None, + 'resource_key': self.resource_key, + 'pull_robot': user_view(self.pull_robot) if self.pull_robot else None, + 'repository': { + 'namespace': self.repository_namespace_user_username, + 'name': self.repository_name + }, + 'error': self.error, + } + + if self.can_write: + if self.resource_key is not None: + resp['archive_url'] = self.archive_url + elif self.job_config.get('archive_url', None): + resp['archive_url'] = self.job_config['archive_url'] + + return resp + + +class Approval(namedtuple('Approval', ['approver', 'approval_type', 'approved_date', 'notes'])): + """ + Approval represents whether a key has been approved or not + :type approver: User + :type approval_type: string + :type approved_date: Date + :type notes: string + """ + + def to_dict(self): + return { + 'approver': self.approver.to_dict() if self.approver else None, + 'approval_type': self.approval_type, + 'approved_date': self.approved_date, + 'notes': self.notes, + } + + +class ServiceKey(namedtuple('ServiceKey', ['name', 'kid', 'service', 'jwk', 'metadata', 'created_date', + 'expiration_date', 'rotation_duration', 'approval'])): + """ + ServiceKey is an apostille signing key + :type name: string + :type kid: int + :type service: string + :type jwk: string + :type metadata: string + :type created_date: Date + :type expiration_date: Date + :type rotation_duration: Date + :type approval: Approval + + """ + + def to_dict(self): + return { + 'name': self.name, + 'kid': self.kid, + 'service': self.service, + 'jwk': self.jwk, + 'metadata': self.metadata, + 'created_date': self.created_date, + 'expiration_date': self.expiration_date, + 'rotation_duration': self.rotation_duration, + 'approval': self.approval.to_dict() if self.approval is not None else None, + } + + +class User(namedtuple('User', ['username', 'email', 'verified', 'enabled', 'robot'])): + """ + User represents a single user. + :type username: string + :type email: string + :type verified: boolean + :type enabled: boolean + :type robot: User + """ + + def to_dict(self): + user_data = { + 'kind': 'user', + 'name': self.username, + 'username': self.username, + 'email': self.email, + 'verified': self.verified, + # todo(config) remove or add these lines from app + # 'avatar': avatar.get_data_for_user(self), + # 'super_user': superusers.is_superuser(self.username), + 'enabled': self.enabled, + } + + return user_data + + +class Organization(namedtuple('Organization', ['username', 'email'])): + """ + Organization represents a single org. + :type username: string + :type email: string + """ + + def to_dict(self): + return { + 'name': self.username, + 'email': self.email, + # todo(config) remove or add these lines from app + # 'avatar': avatar.get_data_for_org(self), + } + + +class LogEntry( + namedtuple('LogEntry', [ + 'metadata_json', 'ip', 'datetime', 'performer_email', 'performer_username', 'performer_robot', + 'account_organization', 'account_username', 'account_email', 'account_robot', 'kind', + ])): + """ + LogEntry a single log entry. + :type metadata_json: string + :type ip: string + :type datetime: string + :type performer_email: int + :type performer_username: string + :type performer_robot: boolean + :type account_organization: boolean + :type account_username: string + :type account_email: string + :type account_robot: boolean + :type kind_id: int + """ + + def to_dict(self): + view = { + 'kind': self.kind, + 'metadata': json.loads(self.metadata_json), + 'ip': self.ip, + 'datetime': format_date(self.datetime), + } + + if self.performer_username: + performer = AttrDict({'username': self.performer_username, 'email': self.performer_email}) + performer.robot = None + if self.performer_robot: + performer.robot = self.performer_robot + + view['performer'] = { + 'kind': 'user', + 'name': self.performer_username, + 'is_robot': self.performer_robot, + # todo(config) remove or add these lines from app + # 'avatar': avatar.get_data_for_user(performer), + } + + if self.account_username: + account = AttrDict({'username': self.account_username, 'email': self.account_email}) + if self.account_organization: + + view['namespace'] = { + 'kind': 'org', + 'name': self.account_username, + # todo(config) remove or add these lines from app + # 'avatar': avatar.get_data_for_org(account), + } + else: + account.robot = None + if self.account_robot: + account.robot = self.account_robot + view['namespace'] = { + 'kind': 'user', + 'name': self.account_username, + # todo(config) remove or add these lines from app + # 'avatar': avatar.get_data_for_user(account), + } + + return view + + +class LogEntryPage( + namedtuple('LogEntryPage', ['logs', 'next_page_token'])): + """ + LogEntryPage represents a single page of logs. + :type logs: [LogEntry] + :type next_page_token: {any -> any} + """ + + +class AggregatedLogEntry( + namedtuple('AggregatedLogEntry', ['count', 'kind_id', 'day', 'start_time'])): + """ + AggregatedLogEntry represents an aggregated view of logs. + :type count: int + :type kind_id: int + :type day: string + :type start_time: Date + """ + + def to_dict(self): + synthetic_date = datetime(self.start_time.year, self.start_time.month, int(self.day), tzinfo=get_localzone()) + if synthetic_date.day < self.start_time.day: + synthetic_date = synthetic_date + relativedelta(months=1) + kinds = model.log.get_log_entry_kinds() + view = { + 'kind': kinds[self.kind_id], + 'count': self.count, + 'datetime': format_date(synthetic_date), + } + + return view + + +@add_metaclass(ABCMeta) +class SuperuserDataInterface(object): + """ + Interface that represents all data store interactions required by a superuser api. + """ + + @abstractmethod + def get_logs_query(self, start_time, end_time, page_token=None): + """ + Returns a LogEntryPage. + """ + + @abstractmethod + def get_aggregated_logs(self, start_time, end_time): + """ + Returns a list of AggregatedLogEntry + """ + + @abstractmethod + def get_organizations(self): + """ + Returns a list of Organization + """ + + @abstractmethod + def get_active_users(self): + """ + Returns a list of User + """ + + @abstractmethod + def create_install_user(self, username, password, email): + """ + Returns the created user and confirmation code for email confirmation + """ + + @abstractmethod + def get_nonrobot_user(self, username): + """ + Returns a User + """ + + @abstractmethod + def create_reset_password_email_code(self, email): + """ + Returns a recover password code + """ + + @abstractmethod + def mark_user_for_deletion(self, username): + """ + Returns None + """ + + @abstractmethod + def change_password(self, username, password): + """ + Returns None + """ + + @abstractmethod + def update_email(self, username, email, auto_verify): + """ + Returns None + """ + + @abstractmethod + def update_enabled(self, username, enabled): + """ + Returns None + """ + + @abstractmethod + def take_ownership(self, namespace, authed_user): + """ + Returns id of entity and whether the entity was a user + """ + + @abstractmethod + def mark_organization_for_deletion(self, name): + """ + Returns None + """ + + @abstractmethod + def change_organization_name(self, old_org_name, new_org_name): + """ + Returns updated Organization + """ + + @abstractmethod + def list_all_service_keys(self): + """ + Returns a list of service keys + """ + + @abstractmethod + def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None): + """ + Returns a tuple of private key and public key id + """ + + @abstractmethod + def approve_service_key(self, kid, approver, approval_type, notes=''): + """ + Returns the approved Key + """ + + @abstractmethod + def get_service_key(self, kid, service=None, alive_only=True, approved_only=True): + """ + Returns ServiceKey + """ + + @abstractmethod + def set_key_expiration(self, kid, expiration_date): + """ + Returns None + """ + + @abstractmethod + def update_service_key(self, kid, name=None, metadata=None): + """ + Returns None + """ + + @abstractmethod + def delete_service_key(self, kid): + """ + Returns deleted ServiceKey + """ + + @abstractmethod + def get_repository_build(self, uuid): + """ + Returns RepositoryBuild + """ diff --git a/config_app/config_endpoints/api/superuser_models_pre_oci.py b/config_app/config_endpoints/api/superuser_models_pre_oci.py new file mode 100644 index 000000000..352c8f38d --- /dev/null +++ b/config_app/config_endpoints/api/superuser_models_pre_oci.py @@ -0,0 +1,274 @@ +from data import model + +from config_app.config_endpoints.api.superuser_models_interface import SuperuserDataInterface, User, ServiceKey, Approval + +# +# def _create_log(log, log_kind): +# account_organization = None +# account_username = None +# account_email = None +# account_robot = None +# try: +# account_organization = log.account.organization +# account_username = log.account.username +# account_email = log.account.email +# account_robot = log.account.robot +# except AttributeError: +# pass +# +# performer_robot = None +# performer_username = None +# performer_email = None +# +# try: +# performer_robot = log.performer.robot +# performer_username = log.performer.username +# performer_email = log.performer.email +# except AttributeError: +# pass +# +# return LogEntry(log.metadata_json, log.ip, log.datetime, performer_email, performer_username, +# performer_robot, account_organization, account_username, +# account_email, account_robot, log_kind[log.kind_id]) + + +def _create_user(user): + if user is None: + return None + return User(user.username, user.email, user.verified, user.enabled, user.robot) + + +def _create_key(key): + approval = None + if key.approval is not None: + approval = Approval(_create_user(key.approval.approver), key.approval.approval_type, key.approval.approved_date, + key.approval.notes) + + return ServiceKey(key.name, key.kid, key.service, key.jwk, key.metadata, key.created_date, key.expiration_date, + key.rotation_duration, approval) +# +# +# class ServiceKeyDoesNotExist(Exception): +# pass +# +# +# class ServiceKeyAlreadyApproved(Exception): +# pass +# +# +# class InvalidRepositoryBuildException(Exception): +# pass + + +class PreOCIModel(SuperuserDataInterface): + """ + PreOCIModel implements the data model for the SuperUser using a database schema + before it was changed to support the OCI specification. + """ + def get_logs_query(self, start_time, end_time, page_token=None): + pass + + def get_aggregated_logs(self, start_time, end_time): + pass + + def get_organizations(self): + pass + + def get_active_users(self): + pass + + def create_install_user(self, username, password, email): + pass + + def get_nonrobot_user(self, username): + pass + + def create_reset_password_email_code(self, email): + pass + + def mark_user_for_deletion(self, username): + pass + + def change_password(self, username, password): + pass + + def update_email(self, username, email, auto_verify): + pass + + def update_enabled(self, username, enabled): + pass + + def take_ownership(self, namespace, authed_user): + pass + + def mark_organization_for_deletion(self, name): + pass + + def change_organization_name(self, old_org_name, new_org_name): + pass + + def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None): + pass + + def approve_service_key(self, kid, approver, approval_type, notes=''): + pass + + def get_service_key(self, kid, service=None, alive_only=True, approved_only=True): + pass + + def set_key_expiration(self, kid, expiration_date): + pass + + def update_service_key(self, kid, name=None, metadata=None): + pass + + def delete_service_key(self, kid): + pass + + def get_repository_build(self, uuid): + pass + + # def get_repository_build(self, uuid): + # try: + # build = model.build.get_repository_build(uuid) + # except model.InvalidRepositoryBuildException as e: + # raise InvalidRepositoryBuildException(e.message) + # + # repo_namespace = build.repository_namespace_user_username + # repo_name = build.repository_name + # + # can_read = ReadRepositoryPermission(repo_namespace, repo_name).can() + # can_write = ModifyRepositoryPermission(repo_namespace, repo_name).can() + # can_admin = AdministerRepositoryPermission(repo_namespace, repo_name).can() + # job_config = get_job_config(build.job_config) + # phase, status, error = _get_build_status(build) + # url = userfiles.get_file_url(self.resource_key, request.remote_addr, requires_cors=True) + # + # return RepositoryBuild(build.uuid, build.logs_archived, repo_namespace, repo_name, can_write, can_read, + # _create_user(build.pull_robot), build.resource_key, + # BuildTrigger(build.trigger.uuid, build.trigger.service.name, + # _create_user(build.trigger.pull_robot), can_read, can_admin, True), + # build.display_name, build.display_name, build.started, job_config, phase, status, error, url) + # + # def delete_service_key(self, kid): + # try: + # key = model.service_keys.delete_service_key(kid) + # except model.ServiceKeyDoesNotExist: + # raise ServiceKeyDoesNotExist + # return _create_key(key) + # + # def update_service_key(self, kid, name=None, metadata=None): + # model.service_keys.update_service_key(kid, name, metadata) + # + # def set_key_expiration(self, kid, expiration_date): + # model.service_keys.set_key_expiration(kid, expiration_date) + # + # def get_service_key(self, kid, service=None, alive_only=True, approved_only=True): + # try: + # key = model.service_keys.get_service_key(kid, approved_only=approved_only, alive_only=alive_only) + # return _create_key(key) + # except model.ServiceKeyDoesNotExist: + # raise ServiceKeyDoesNotExist + # + # def approve_service_key(self, kid, approver, approval_type, notes=''): + # try: + # key = model.service_keys.approve_service_key(kid, approver, approval_type, notes=notes) + # return _create_key(key) + # except model.ServiceKeyDoesNotExist: + # raise ServiceKeyDoesNotExist + # except model.ServiceKeyAlreadyApproved: + # raise ServiceKeyAlreadyApproved + # + # def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None): + # (private_key, key) = model.service_keys.generate_service_key(service, expiration_date, metadata=metadata, name=name) + # + # return private_key, key.kid + + def list_all_service_keys(self): + keys = model.service_keys.list_all_keys() + return [_create_key(key) for key in keys] + + # def change_organization_name(self, old_org_name, new_org_name): + # org = model.organization.get_organization(old_org_name) + # if new_org_name is not None: + # org = model.user.change_username(org.id, new_org_name) + # + # return Organization(org.username, org.email) + # + # def mark_organization_for_deletion(self, name): + # org = model.organization.get_organization(name) + # model.user.mark_namespace_for_deletion(org, all_queues, namespace_gc_queue, force=True) + # + # def take_ownership(self, namespace, authed_user): + # entity = model.user.get_user_or_org(namespace) + # if entity is None: + # return None, False + # + # was_user = not entity.organization + # if entity.organization: + # # Add the superuser as an admin to the owners team of the org. + # model.organization.add_user_as_admin(authed_user, entity) + # else: + # # If the entity is a user, convert it to an organization and add the current superuser + # # as the admin. + # model.organization.convert_user_to_organization(entity, authed_user) + # return entity.id, was_user + # + # def update_enabled(self, username, enabled): + # user = model.user.get_nonrobot_user(username) + # model.user.update_enabled(user, bool(enabled)) + # + # def update_email(self, username, email, auto_verify): + # user = model.user.get_nonrobot_user(username) + # model.user.update_email(user, email, auto_verify) + # + # def change_password(self, username, password): + # user = model.user.get_nonrobot_user(username) + # model.user.change_password(user, password) + # + # def mark_user_for_deletion(self, username): + # user = model.user.get_nonrobot_user(username) + # model.user.mark_namespace_for_deletion(user, all_queues, namespace_gc_queue, force=True) + # + # def create_reset_password_email_code(self, email): + # code = model.user.create_reset_password_email_code(email) + # return code.code + # + # def get_nonrobot_user(self, username): + # user = model.user.get_nonrobot_user(username) + # if user is None: + # return None + # return _create_user(user) + # + # def create_install_user(self, username, password, email): + # prompts = model.user.get_default_user_prompts(features) + # user = model.user.create_user(username, password, email, auto_verify=not features.MAILING, + # email_required=features.MAILING, prompts=prompts) + # + # return_user = _create_user(user) + # # If mailing is turned on, send the user a verification email. + # if features.MAILING: + # confirmation = model.user.create_confirm_email_code(user) + # return return_user, confirmation.code + # return return_user, '' + # + # def get_active_users(self, disabled=True): + # users = model.user.get_active_users(disabled=disabled) + # return [_create_user(user) for user in users] + # + # def get_organizations(self): + # return [Organization(org.username, org.email) for org in model.organization.get_organizations()] + # + # def get_aggregated_logs(self, start_time, end_time): + # aggregated_logs = model.log.get_aggregated_logs(start_time, end_time) + # return [AggregatedLogEntry(log.count, log.kind_id, log.day, start_time) for log in aggregated_logs] + # + # def get_logs_query(self, start_time, end_time, page_token=None): + # logs_query = model.log.get_logs_query(start_time, end_time) + # logs, next_page_token = model.modelutil.paginate(logs_query, database.LogEntry, descending=True, + # page_token=page_token, limit=20) + # kinds = model.log.get_log_entry_kinds() + # return LogEntryPage([_create_log(log, kinds) for log in logs], next_page_token) + + +pre_oci_model = PreOCIModel() diff --git a/config_app/config_endpoints/api/user.py b/config_app/config_endpoints/api/user.py index b7ff870cf..d8a6449c3 100644 --- a/config_app/config_endpoints/api/user.py +++ b/config_app/config_endpoints/api/user.py @@ -1,4 +1,4 @@ -from config_endpoints.api import resource, ApiResource, nickname +from config_app.config_endpoints.api import resource, ApiResource, nickname @resource('/v1/user/') diff --git a/config_app/config_endpoints/setup_web.py b/config_app/config_endpoints/setup_web.py index 44a8f5cbd..541aa3df3 100644 --- a/config_app/config_endpoints/setup_web.py +++ b/config_app/config_endpoints/setup_web.py @@ -1,6 +1,6 @@ from flask import Blueprint -from config_endpoints.common import render_page_template -from config_endpoints.api.discovery import generate_route_data +from config_app.config_endpoints.common import render_page_template +from config_app.config_endpoints.api.discovery import generate_route_data # from config_util.cache import no_cache diff --git a/config_app/config_util/config/__init__.py b/config_app/config_util/config/__init__.py index b7b940d4d..16b3c0ffe 100644 --- a/config_app/config_util/config/__init__.py +++ b/config_app/config_util/config/__init__.py @@ -1,6 +1,6 @@ -from config_util.config.fileprovider import FileConfigProvider -from config_util.config.testprovider import TestConfigProvider -from config_util.config.k8sprovider import KubernetesConfigProvider +from config_app.config_util.config.fileprovider import FileConfigProvider +from config_app.config_util.config.testprovider import TestConfigProvider +from config_app.config_util.config.k8sprovider import KubernetesConfigProvider def get_config_provider(config_volume, yaml_filename, py_filename, testing=False, kubernetes=False): diff --git a/config_app/config_util/config/basefileprovider.py b/config_app/config_util/config/basefileprovider.py index 1bcf497a2..0ed1e9d35 100644 --- a/config_app/config_util/config/basefileprovider.py +++ b/config_app/config_util/config/basefileprovider.py @@ -1,7 +1,7 @@ import os import logging -from config_util.config.baseprovider import (BaseProvider, import_yaml, export_yaml, +from config_app.config_util.config.baseprovider import (BaseProvider, import_yaml, export_yaml, CannotWriteConfigException) logger = logging.getLogger(__name__) diff --git a/config_app/config_util/config/baseprovider.py b/config_app/config_util/config/baseprovider.py index ce6c6589c..6fef3b870 100644 --- a/config_app/config_util/config/baseprovider.py +++ b/config_app/config_util/config/baseprovider.py @@ -6,7 +6,7 @@ from six import add_metaclass from jsonschema import validate, ValidationError -from config_util.config.schema import CONFIG_SCHEMA +from config_app.config_util.config.schema import CONFIG_SCHEMA logger = logging.getLogger(__name__) diff --git a/config_app/config_util/config/fileprovider.py b/config_app/config_util/config/fileprovider.py index 95da64330..385fe501f 100644 --- a/config_app/config_util/config/fileprovider.py +++ b/config_app/config_util/config/fileprovider.py @@ -1,8 +1,8 @@ import os import logging -from config_util.config.baseprovider import export_yaml, CannotWriteConfigException -from config_util.config.basefileprovider import BaseFileProvider +from config_app.config_util.config.baseprovider import export_yaml, CannotWriteConfigException +from config_app.config_util.config.basefileprovider import BaseFileProvider logger = logging.getLogger(__name__) diff --git a/config_app/config_util/config/k8sprovider.py b/config_app/config_util/config/k8sprovider.py index 5d65af70b..57d0a5f8c 100644 --- a/config_app/config_util/config/k8sprovider.py +++ b/config_app/config_util/config/k8sprovider.py @@ -6,8 +6,8 @@ import time from requests import Request, Session -from config_util.config.baseprovider import CannotWriteConfigException, get_yaml -from config_util.config.basefileprovider import BaseFileProvider +from config_app.config_util.config.baseprovider import CannotWriteConfigException, get_yaml +from config_app.config_util.config.basefileprovider import BaseFileProvider logger = logging.getLogger(__name__) diff --git a/config_app/config_util/config/testprovider.py b/config_app/config_util/config/testprovider.py index 87f0309c3..32e0127c8 100644 --- a/config_app/config_util/config/testprovider.py +++ b/config_app/config_util/config/testprovider.py @@ -3,7 +3,7 @@ import io import os from datetime import datetime, timedelta -from config_util.config.baseprovider import BaseProvider +from config_app.config_util.config.baseprovider import BaseProvider REAL_FILES = ['test/data/signing-private.gpg', 'test/data/signing-public.gpg', 'test/data/test.pem'] diff --git a/config_app/config_web.py b/config_app/config_web.py index 29339541f..487f8b78e 100644 --- a/config_app/config_web.py +++ b/config_app/config_web.py @@ -1,6 +1,6 @@ -from config_app import app as application -from config_endpoints.api import api_bp -from config_endpoints.setup_web import setup_web +from config_app.c_app import app as application +from config_app.config_endpoints.api import api_bp +from config_app.config_endpoints.setup_web import setup_web application.register_blueprint(setup_web) From acf242f241901eebc830f697f6587f02c33ecbe2 Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Thu, 24 May 2018 10:52:42 -0400 Subject: [PATCH 08/14] Port some suconfig/superuser endpoints, with data.model references --- config_app/c_app.py | 4 + config_app/config_endpoints/api/__init__.py | 24 +- config_app/config_endpoints/api/suconfig.py | 290 +++++++++++++++++- .../api/suconfig_models_interface.py | 39 +++ .../api/suconfig_models_pre_oci.py | 35 +++ 5 files changed, 379 insertions(+), 13 deletions(-) create mode 100644 config_app/config_endpoints/api/suconfig_models_interface.py create mode 100644 config_app/config_endpoints/api/suconfig_models_pre_oci.py diff --git a/config_app/c_app.py b/config_app/c_app.py index 4fbf1d1a2..449563181 100644 --- a/config_app/c_app.py +++ b/config_app/c_app.py @@ -4,6 +4,9 @@ from flask import Flask from _init_config import CONF_DIR from config_app.config_util.config import get_config_provider + +from util.config.superusermanager import SuperUserManager + app = Flask(__name__) logger = logging.getLogger(__name__) @@ -29,3 +32,4 @@ else: # Load the override config via the provider. config_provider.update_app_config(app.config) +superusers = SuperUserManager(app) diff --git a/config_app/config_endpoints/api/__init__.py b/config_app/config_endpoints/api/__init__.py index b5df7b405..fab34ffdd 100644 --- a/config_app/config_endpoints/api/__init__.py +++ b/config_app/config_endpoints/api/__init__.py @@ -1,6 +1,6 @@ import logging -from flask import Blueprint +from flask import Blueprint, request from flask_restful import Resource, Api from flask_restful.utils.cors import crossdomain from email.utils import formatdate @@ -9,7 +9,7 @@ from functools import partial, wraps from jsonschema import validate, ValidationError from config_app.c_app import app -from config_app.config_endpoints.exception import InvalidResponse +from config_app.config_endpoints.exception import InvalidResponse, InvalidRequest logger = logging.getLogger(__name__) api_bp = Blueprint('api', __name__) @@ -128,6 +128,26 @@ def define_json_response(schema_name): return wrapper +def validate_json_request(schema_name, optional=False): + def wrapper(func): + @add_method_metadata('request_schema', schema_name) + @wraps(func) + def wrapped(self, *args, **kwargs): + schema = self.schemas[schema_name] + try: + json_data = request.get_json() + if json_data is None: + if not optional: + raise InvalidRequest('Missing JSON body') + else: + validate(json_data, schema) + return func(self, *args, **kwargs) + except ValidationError as ex: + raise InvalidRequest(ex.message) + return wrapped + return wrapper + + nickname = partial(add_method_metadata, 'nickname') diff --git a/config_app/config_endpoints/api/suconfig.py b/config_app/config_endpoints/api/suconfig.py index bc17ce3af..79e759ec2 100644 --- a/config_app/config_endpoints/api/suconfig.py +++ b/config_app/config_endpoints/api/suconfig.py @@ -1,11 +1,39 @@ import logging +import os +import subprocess +import signal -from config_app.config_endpoints.api import resource, ApiResource, verify_not_prod, nickname -from config_app.c_app import app, config_provider +from flask import abort, request + +from config_app.config_endpoints.api.suconfig_models_pre_oci import pre_oci_model as model +from config_app.config_endpoints.api import resource, ApiResource, verify_not_prod, nickname, validate_json_request +from config_app.c_app import app, config_provider, superusers, OVERRIDE_CONFIG_DIRECTORY + +from auth.auth_context import get_authenticated_user +from data.users import get_federated_service_name, get_users_handler +from data.database import configure +from data.runmigration import run_alembic_migration +from util.config.configutil import add_enterprise_config_defaults +from util.config.database import sync_database_with_config +# TODO(config) re-add this import when we get the app extracted from validators +# from util.config.validator import validate_service_for_config logger = logging.getLogger(__name__) +def database_is_valid(): + """ Returns whether the database, as configured, is valid. """ + if app.config['TESTING']: + return False + + return model.is_valid() + + +def database_has_users(): + """ Returns whether the database has any users defined. """ + return model.has_users() + + @resource('/v1/superuser/config') class SuperUserConfig(ApiResource): """ Resource for fetching and updating the current configuration, if any. """ @@ -43,6 +71,56 @@ class SuperUserConfig(ApiResource): 'config': config_object } + @nickname('scUpdateConfig') + @verify_not_prod + @validate_json_request('UpdateConfig') + def put(self): + """ Updates the config override file. """ + # Note: This method is called to set the database configuration before super users exists, + # so we also allow it to be called if there is no valid registry configuration setup. + # if not config_provider.config_exists() or SuperUserPermission().can(): + if not config_provider.config_exists(): + config_object = request.get_json()['config'] + hostname = request.get_json()['hostname'] + + # Add any enterprise defaults missing from the config. + add_enterprise_config_defaults(config_object, app.config['SECRET_KEY'], hostname) + + # Write the configuration changes to the config override file. + config_provider.save_config(config_object) + + # If the authentication system is federated, link the superuser account to the + # the authentication system chosen. + service_name = get_federated_service_name(config_object['AUTHENTICATION_TYPE']) + if service_name is not None: + current_user = get_authenticated_user() + if current_user is None: + abort(401) + + service_name = get_federated_service_name(config_object['AUTHENTICATION_TYPE']) + if not model.has_federated_login(current_user.username, service_name): + # Verify the user's credentials and retrieve the user's external username+email. + handler = get_users_handler(config_object, config_provider, OVERRIDE_CONFIG_DIRECTORY) + (result, err_msg) = handler.verify_credentials(current_user.username, + request.get_json().get('password', '')) + if not result: + logger.error('Could not save configuration due to external auth failure: %s', err_msg) + abort(400) + + # Link the existing user to the external user. + model.attach_federated_login(current_user.username, service_name, result.username) + + # Ensure database is up-to-date with config + sync_database_with_config(config_object) + + return { + 'exists': True, + 'config': config_object + } + + abort(403) + + @resource('/v1/superuser/registrystatus') class SuperUserRegistryStatus(ApiResource): @@ -75,13 +153,203 @@ class SuperUserRegistryStatus(ApiResource): } # If the database isn't yet valid, then we need to set it up. - # if not database_is_valid(): - # return { - # 'status': 'setup-db' - # } - # - # return { - # 'status': 'create-superuser' if not database_has_users() else 'config' - # } + if not database_is_valid(): + return { + 'status': 'setup-db' + } - return {} + return { + 'status': 'create-superuser' if not database_has_users() else 'config' + } + + +class _AlembicLogHandler(logging.Handler): + def __init__(self): + super(_AlembicLogHandler, self).__init__() + self.records = [] + + def emit(self, record): + self.records.append({ + 'level': record.levelname, + 'message': record.getMessage() + }) + + +@resource('/v1/superuser/setupdb') +class SuperUserSetupDatabase(ApiResource): + """ Resource for invoking alembic to setup the database. """ + @verify_not_prod + @nickname('scSetupDatabase') + def get(self): + """ Invokes the alembic upgrade process. """ + # Note: This method is called after the database configured is saved, but before the + # database has any tables. Therefore, we only allow it to be run in that unique case. + if config_provider.config_exists() and not database_is_valid(): + # Note: We need to reconfigure the database here as the config has changed. + combined = dict(**app.config) + combined.update(config_provider.get_config()) + + configure(combined) + app.config['DB_URI'] = combined['DB_URI'] + + log_handler = _AlembicLogHandler() + + try: + run_alembic_migration(log_handler) + except Exception as ex: + return { + 'error': str(ex) + } + + return { + 'logs': log_handler.records + } + + abort(403) + + +# From: https://stackoverflow.com/a/44712205 +def get_process_id(name): + """Return process ids found by (partial) name or regex. + + >>> get_process_id('kthreadd') + [2] + >>> get_process_id('watchdog') + [10, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56, 61] # ymmv + >>> get_process_id('non-existent process') + [] + """ + child = subprocess.Popen(['pgrep', name], stdout=subprocess.PIPE, shell=False) + response = child.communicate()[0] + return [int(pid) for pid in response.split()] + + +@resource('/v1/superuser/shutdown') +class SuperUserShutdown(ApiResource): + """ Resource for sending a shutdown signal to the container. """ + + @verify_not_prod + @nickname('scShutdownContainer') + def post(self): + """ Sends a signal to the phusion init system to shut down the container. """ + # Note: This method is called to set the database configuration before super users exists, + # so we also allow it to be called if there is no valid registry configuration setup. + + # if app.config['TESTING'] or not database_has_users() or SuperUserPermission().can(): + if app.config['TESTING'] or not database_has_users(): + # Note: We skip if debugging locally. + if app.config.get('DEBUGGING') == True: + return {} + + os.kill(get_process_id('my_init')[0], signal.SIGINT) + return {} + + abort(403) + + +@resource('/v1/superuser/config/createsuperuser') +class SuperUserCreateInitialSuperUser(ApiResource): + """ Resource for creating the initial super user. """ + schemas = { + 'CreateSuperUser': { + 'type': 'object', + 'description': 'Information for creating the initial super user', + 'required': [ + 'username', + 'password', + 'email' + ], + 'properties': { + 'username': { + 'type': 'string', + 'description': 'The username for the superuser' + }, + 'password': { + 'type': 'string', + 'description': 'The password for the superuser' + }, + 'email': { + 'type': 'string', + 'description': 'The e-mail address for the superuser' + }, + }, + }, + } + + @nickname('scCreateInitialSuperuser') + @validate_json_request('CreateSuperUser') + def post(self): + """ Creates the initial super user, updates the underlying configuration and + sets the current session to have that super user. """ + + # Special security check: This method is only accessible when: + # - There is a valid config YAML file. + # - There are currently no users in the database (clean install) + # + # We do this special security check because at the point this method is called, the database + # is clean but does not (yet) have any super users for our permissions code to check against. + if config_provider.config_exists() and not database_has_users(): + data = request.get_json() + username = data['username'] + password = data['password'] + email = data['email'] + + # Create the user in the database. + superuser_uuid = model.create_superuser(username, password, email) + + # Add the user to the config. + config_object = config_provider.get_config() + config_object['SUPER_USERS'] = [username] + config_provider.save_config(config_object) + + # Update the in-memory config for the new superuser. + # TODO(config): do we need to register a list of the superusers? If so, we can take out the entire superuser in c_app + superusers.register_superuser(username) + + # Conduct login with that user. + # TODO(config): assuming we don't need to login the user + # common_login(superuser_uuid) + + return { + 'status': True + } + + abort(403) + + +@resource('/v1/superuser/config/validate/') +class SuperUserConfigValidate(ApiResource): + """ Resource for validating a block of configuration against an external service. """ + schemas = { + 'ValidateConfig': { + 'type': 'object', + 'description': 'Validates configuration', + 'required': [ + 'config' + ], + 'properties': { + 'config': { + 'type': 'object' + }, + 'password': { + 'type': 'string', + 'description': 'The users password, used for auth validation' + } + }, + }, + } + + @nickname('scValidateConfig') + @verify_not_prod + @validate_json_request('ValidateConfig') + def post(self, service): + """ Validates the given config for the given service. """ + # Note: This method is called to validate the database configuration before super users exists, + # so we also allow it to be called if there is no valid registry configuration setup. Note that + # this is also safe since this method does not access any information not given in the request. + # if not config_provider.config_exists() or SuperUserPermission().can(): + if not config_provider.config_exists(): + config = request.get_json()['config'] + return validate_service_for_config(service, config, request.get_json().get('password', '')) + + abort(403) diff --git a/config_app/config_endpoints/api/suconfig_models_interface.py b/config_app/config_endpoints/api/suconfig_models_interface.py new file mode 100644 index 000000000..4b99170c5 --- /dev/null +++ b/config_app/config_endpoints/api/suconfig_models_interface.py @@ -0,0 +1,39 @@ +from abc import ABCMeta, abstractmethod +from six import add_metaclass + + +@add_metaclass(ABCMeta) +class SuperuserConfigDataInterface(object): + """ + Interface that represents all data store interactions required by the superuser config API. + """ + + @abstractmethod + def is_valid(self): + """ + Returns true if the configured database is valid. + """ + + @abstractmethod + def has_users(self): + """ + Returns true if there are any users defined. + """ + + @abstractmethod + def create_superuser(self, username, password, email): + """ + Creates a new superuser with the given username, password and email. Returns the user's UUID. + """ + + @abstractmethod + def has_federated_login(self, username, service_name): + """ + Returns true if the matching user has a federated login under the matching service. + """ + + @abstractmethod + def attach_federated_login(self, username, service_name, federated_username): + """ + Attaches a federatated login to the matching user, under the given service. + """ diff --git a/config_app/config_endpoints/api/suconfig_models_pre_oci.py b/config_app/config_endpoints/api/suconfig_models_pre_oci.py new file mode 100644 index 000000000..df83b8e9f --- /dev/null +++ b/config_app/config_endpoints/api/suconfig_models_pre_oci.py @@ -0,0 +1,35 @@ +from data import model +from data.database import User +from config_app.config_endpoints.api.suconfig_models_interface import SuperuserConfigDataInterface + + +class PreOCIModel(SuperuserConfigDataInterface): + def is_valid(self): + try: + list(User.select().limit(1)) + return True + except: + return False + + def has_users(self): + return bool(list(User.select().limit(1))) + + def create_superuser(self, username, password, email): + return model.user.create_user(username, password, email, auto_verify=True).uuid + + def has_federated_login(self, username, service_name): + user = model.user.get_user(username) + if user is None: + return False + + return bool(model.user.lookup_federated_login(user, service_name)) + + def attach_federated_login(self, username, service_name, federated_username): + user = model.user.get_user(username) + if user is None: + return False + + model.user.attach_federated_login(user, service_name, federated_username) + + +pre_oci_model = PreOCIModel() From 13293ecdea4851e1102814c8ff67cac03b454967 Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Mon, 4 Jun 2018 14:14:19 -0400 Subject: [PATCH 09/14] Fix dockerfile being able to pass in params Change config directory to local config_app one --- Dockerfile | 3 ++- config_app/Procfile | 2 +- config_app/_init_config.py | 6 ++---- config_app/c_app.py | 7 +++++-- config_app/conf/gunicorn_local.py | 2 +- config_app/config_app_config.py | 4 ++-- config_app/config_endpoints/api/suconfig.py | 14 +++++++------- config_app/config_util/log.py | 2 +- config_app/init/service/gunicorn_web/run | 11 +++++++++++ config_app/js/services/api-service.js | 3 --- config_app/js/setup/setup.component.js | 1 + quay-entrypoint.sh | 8 ++++++-- util/config/validators/validate_secscan.py | 6 +++--- util/config/validators/validate_torrent.py | 6 +++--- 14 files changed, 45 insertions(+), 30 deletions(-) create mode 100755 config_app/init/service/gunicorn_web/run diff --git a/Dockerfile b/Dockerfile index ef4cf3ee1..34d68e93a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -138,4 +138,5 @@ RUN ./scripts/detect-config.sh EXPOSE 443 8443 80 -CMD ./quay-entrypoint.sh +ENTRYPOINT [ "/bin/bash", "./quay-entrypoint.sh"] + diff --git a/config_app/Procfile b/config_app/Procfile index 22dd965c1..16b3fb8a4 100644 --- a/config_app/Procfile +++ b/config_app/Procfile @@ -1,3 +1,3 @@ app: PYTHONPATH="../" gunicorn -c conf/gunicorn_local.py config_application:application -# webpack: npm run watch-config-app +webpack: npm run watch-config-app diff --git a/config_app/_init_config.py b/config_app/_init_config.py index 494edad57..1ab66a338 100644 --- a/config_app/_init_config.py +++ b/config_app/_init_config.py @@ -3,7 +3,8 @@ import re import subprocess -ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +# Note: this currently points to the directory above, since we're in the quay config_app dir. When extracting, revert +ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/")) STATIC_DIR = os.path.join(ROOT_DIR, 'static/') STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/') @@ -11,9 +12,6 @@ STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/') TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/') -# TODO(config): Remove this external folder dependency -EXTERNAL_REPO_REQUIRE_PATH = os.path.dirname(ROOT_DIR) - def _get_version_number_changelog(): try: diff --git a/config_app/c_app.py b/config_app/c_app.py index 449563181..6235701da 100644 --- a/config_app/c_app.py +++ b/config_app/c_app.py @@ -1,8 +1,9 @@ import os import logging from flask import Flask -from _init_config import CONF_DIR +from _init_config import ROOT_DIR from config_app.config_util.config import get_config_provider +from util.ipresolver import NoopIPResolver from util.config.superusermanager import SuperUserManager @@ -11,7 +12,8 @@ app = Flask(__name__) logger = logging.getLogger(__name__) -OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/') +# OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, 'stack/') +OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, 'config_app/conf/stack') is_testing = 'TEST' in os.environ @@ -33,3 +35,4 @@ else: # Load the override config via the provider. config_provider.update_app_config(app.config) superusers = SuperUserManager(app) +ip_resolver = NoopIPResolver() \ No newline at end of file diff --git a/config_app/conf/gunicorn_local.py b/config_app/conf/gunicorn_local.py index 7fcd83a9c..377c1ba0f 100644 --- a/config_app/conf/gunicorn_local.py +++ b/config_app/conf/gunicorn_local.py @@ -10,7 +10,7 @@ from config_app.config_util.workers import get_worker_count logconfig = logfile_path(debug=True) -bind = '127.0.0.1:5000' +bind = '0.0.0.0:5000' workers = get_worker_count('local', 2, minimum=2, maximum=8) worker_class = 'gevent' daemon = False diff --git a/config_app/config_app_config.py b/config_app/config_app_config.py index 2b8204115..cd2d35be4 100644 --- a/config_app/config_app_config.py +++ b/config_app/config_app_config.py @@ -3,7 +3,7 @@ from uuid import uuid4 import os.path import requests -from _init_config import ROOT_DIR, CONF_DIR, EXTERNAL_REPO_REQUIRE_PATH +from _init_config import ROOT_DIR, CONF_DIR def build_requests_session(): @@ -48,7 +48,7 @@ class ImmutableConfig(object): # Status tag config STATUS_TAGS = {} for tag_name in ['building', 'failed', 'none', 'ready', 'cancelled']: - tag_path = os.path.join(EXTERNAL_REPO_REQUIRE_PATH, 'buildstatus', tag_name + '.svg') + tag_path = os.path.join(ROOT_DIR, 'buildstatus', tag_name + '.svg') with open(tag_path) as tag_svg: STATUS_TAGS[tag_name] = tag_svg.read() diff --git a/config_app/config_endpoints/api/suconfig.py b/config_app/config_endpoints/api/suconfig.py index 79e759ec2..03cb895e4 100644 --- a/config_app/config_endpoints/api/suconfig.py +++ b/config_app/config_endpoints/api/suconfig.py @@ -7,7 +7,7 @@ from flask import abort, request from config_app.config_endpoints.api.suconfig_models_pre_oci import pre_oci_model as model from config_app.config_endpoints.api import resource, ApiResource, verify_not_prod, nickname, validate_json_request -from config_app.c_app import app, config_provider, superusers, OVERRIDE_CONFIG_DIRECTORY +from config_app.c_app import app, config_provider, superusers, OVERRIDE_CONFIG_DIRECTORY, ip_resolver from auth.auth_context import get_authenticated_user from data.users import get_federated_service_name, get_users_handler @@ -15,8 +15,7 @@ from data.database import configure from data.runmigration import run_alembic_migration from util.config.configutil import add_enterprise_config_defaults from util.config.database import sync_database_with_config -# TODO(config) re-add this import when we get the app extracted from validators -# from util.config.validator import validate_service_for_config +from util.config.validator import validate_service_for_config, ValidatorContext logger = logging.getLogger(__name__) @@ -64,9 +63,6 @@ class SuperUserConfig(ApiResource): def get(self): """ Returns the currently defined configuration, if any. """ config_object = config_provider.get_config() - logger.debug(config_object) - logger.debug(config_provider) - # Todo: do we even need this endpoint? Since we'll be loading the config in browser return { 'config': config_object } @@ -350,6 +346,10 @@ class SuperUserConfigValidate(ApiResource): # if not config_provider.config_exists() or SuperUserPermission().can(): if not config_provider.config_exists(): config = request.get_json()['config'] - return validate_service_for_config(service, config, request.get_json().get('password', '')) + validator_context = ValidatorContext.from_app(app, config, request.get_json().get('password', ''), + ip_resolver=ip_resolver, + config_provider=config_provider) + return validate_service_for_config(service, validator_context) + abort(403) diff --git a/config_app/config_util/log.py b/config_app/config_util/log.py index 4a934953a..9d91b3d68 100644 --- a/config_app/config_util/log.py +++ b/config_app/config_util/log.py @@ -1,5 +1,5 @@ import os -from _init_config import CONF_DIR +from config_app._init_config import CONF_DIR def logfile_path(jsonfmt=False, debug=False): diff --git a/config_app/init/service/gunicorn_web/run b/config_app/init/service/gunicorn_web/run new file mode 100755 index 000000000..e7564a2c9 --- /dev/null +++ b/config_app/init/service/gunicorn_web/run @@ -0,0 +1,11 @@ +#! /bin/bash + +echo 'Starting gunicon' + +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYDIR/config_app/conf/gunicorn_local.py config_application:application + +echo 'Gunicorn exited' \ No newline at end of file diff --git a/config_app/js/services/api-service.js b/config_app/js/services/api-service.js index ace9cce64..09b88bcfa 100644 --- a/config_app/js/services/api-service.js +++ b/config_app/js/services/api-service.js @@ -324,8 +324,5 @@ angular.module('quay-config').factory('ApiService', ['Restangular', '$q', 'UtilS }; }; - // todo: remove hacks - apiService.scRegistryStatus = () => new Promise(() => { hello: true }); - return apiService; }]); diff --git a/config_app/js/setup/setup.component.js b/config_app/js/setup/setup.component.js index 3e828214c..d8d4f2b6e 100644 --- a/config_app/js/setup/setup.component.js +++ b/config_app/js/setup/setup.component.js @@ -196,6 +196,7 @@ const templateUrl = require('./setup.html'); }; $scope.isStep = function(step) { + console.log('current step is', step); for (var i = 1; i < arguments.length; ++i) { if (arguments[i] == step) { return true; diff --git a/quay-entrypoint.sh b/quay-entrypoint.sh index a67c2c5eb..9e9a60fe5 100755 --- a/quay-entrypoint.sh +++ b/quay-entrypoint.sh @@ -3,8 +3,8 @@ MODE="$1" display_usage() { - echo "This script takes one arguments." - echo -e "\nUsage: ${0} \n" + echo "This script takes one argument." + echo -e "\nUsage: ${0} \n" } if [[ "${MODE}" = "help" ]] @@ -32,6 +32,10 @@ EOF venv/bin/python -m displayversion case "$MODE" in + "config") + echo "Entering config mode, only copying config-app entrypoints" + cp -r ${QUAYDIR}/config_app/init/service/* /etc/service + ;; "interactive") echo "Copying $MODE files" cp -r ${QUAYCONF}/init/service/interactive/* /etc/service diff --git a/util/config/validators/validate_secscan.py b/util/config/validators/validate_secscan.py index 9f7c2d67f..c690e68d4 100644 --- a/util/config/validators/validate_secscan.py +++ b/util/config/validators/validate_secscan.py @@ -1,6 +1,6 @@ import time -from boot import setup_jwt_proxy +# from boot import setup_jwt_proxy from util.secscan.api import SecurityScannerAPI from util.config.validators import BaseValidator, ConfigValidationException @@ -23,9 +23,9 @@ class SecurityScannerValidator(BaseValidator): api = SecurityScannerAPI(config, None, server_hostname, client=client, skip_validation=True, uri_creator=uri_creator) - if not is_testing: + # if not is_testing: # Generate a temporary Quay key to use for signing the outgoing requests. - setup_jwt_proxy() + # setup_jwt_proxy() # We have to wait for JWT proxy to restart with the newly generated key. max_tries = 5 diff --git a/util/config/validators/validate_torrent.py b/util/config/validators/validate_torrent.py index d8137e12c..dce091efa 100644 --- a/util/config/validators/validate_torrent.py +++ b/util/config/validators/validate_torrent.py @@ -3,7 +3,7 @@ import logging from hashlib import sha1 from util.config.validators import BaseValidator, ConfigValidationException -from util.registry.torrent import jwt_from_infohash +# from util.registry.torrent import jwt_from_infohash logger = logging.getLogger(__name__) @@ -31,8 +31,8 @@ class BittorrentValidator(BaseValidator): 'port': 80, } - encoded_jwt = jwt_from_infohash(params['info_hash']) - params['jwt'] = encoded_jwt + # encoded_jwt = jwt_from_infohash(params['info_hash']) + # params['jwt'] = encoded_jwt resp = client.get(announce_url, timeout=5, params=params) logger.debug('Got tracker response: %s: %s', resp.status_code, resp.text) From d5db3462b926073515c73f314e63f6a85643622d Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Tue, 5 Jun 2018 11:26:24 -0400 Subject: [PATCH 10/14] Add cor progress bar --- .../cor-progress/cor-progress-bar.html | 4 + .../cor-progress/cor-progress-components.js | 74 + .../components/cor-progress/cor-step-bar.html | 3 + .../js/components/cor-progress/cor-step.html | 6 + config_app/js/components/file-upload-box.html | 46 + config_app/js/components/file-upload-box.js | 3 +- config_app/js/main.ts | 6 +- config_app/js/setup/setup.component.js | 1 - config_app/static/css/core-ui.css | 1500 ----------------- config_app/webpack.config.js | 1 + 10 files changed, 140 insertions(+), 1504 deletions(-) create mode 100644 config_app/js/components/cor-progress/cor-progress-bar.html create mode 100644 config_app/js/components/cor-progress/cor-progress-components.js create mode 100644 config_app/js/components/cor-progress/cor-step-bar.html create mode 100644 config_app/js/components/cor-progress/cor-step.html create mode 100644 config_app/js/components/file-upload-box.html delete mode 100644 config_app/static/css/core-ui.css diff --git a/config_app/js/components/cor-progress/cor-progress-bar.html b/config_app/js/components/cor-progress/cor-progress-bar.html new file mode 100644 index 000000000..6ccd75fe3 --- /dev/null +++ b/config_app/js/components/cor-progress/cor-progress-bar.html @@ -0,0 +1,4 @@ +
+
+
\ No newline at end of file diff --git a/config_app/js/components/cor-progress/cor-progress-components.js b/config_app/js/components/cor-progress/cor-progress-components.js new file mode 100644 index 000000000..a8bc9b3b9 --- /dev/null +++ b/config_app/js/components/cor-progress/cor-progress-components.js @@ -0,0 +1,74 @@ + + +const corStepBarUrl = require('./cor-step-bar.html'); +const corStepUrl = require('./cor-step.html'); +const corProgressBarUrl = require('./cor-progress-bar.html'); + +angular.module('quay-config') + .directive('corStepBar', () => { + const directiveDefinitionObject = { + priority: 4, + templateUrl: corStepBarUrl, + replace: true, + transclude: true, + restrict: 'C', + scope: { + 'progress': '=progress' + }, + controller: function($rootScope, $scope, $element) { + $scope.$watch('progress', function(progress) { + if (!progress) { return; } + + var index = 0; + for (var i = 0; i < progress.length; ++i) { + if (progress[i]) { + index = i; + } + } + + $element.find('.transclude').children('.co-step-element').each(function(i, elem) { + $(elem).removeClass('active'); + if (i <= index) { + $(elem).addClass('active'); + } + }); + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('corStep', function() { + var directiveDefinitionObject = { + priority: 4, + templateUrl: corStepUrl, + replace: true, + transclude: false, + requires: '^corStepBar', + restrict: 'C', + scope: { + 'icon': '@icon', + 'title': '@title', + 'text': '@text' + }, + controller: function($rootScope, $scope, $element) { + } + }; + return directiveDefinitionObject; + }) + + .directive('corProgressBar', function() { + var directiveDefinitionObject = { + priority: 4, + templateUrl: corProgressBarUrl, + replace: true, + transclude: true, + restrict: 'C', + scope: { + 'progress': '=progress' + }, + controller: function($rootScope, $scope, $element) { + } + }; + return directiveDefinitionObject; + }); diff --git a/config_app/js/components/cor-progress/cor-step-bar.html b/config_app/js/components/cor-progress/cor-step-bar.html new file mode 100644 index 000000000..117f8185d --- /dev/null +++ b/config_app/js/components/cor-progress/cor-step-bar.html @@ -0,0 +1,3 @@ +
+ +
diff --git a/config_app/js/components/cor-progress/cor-step.html b/config_app/js/components/cor-progress/cor-step.html new file mode 100644 index 000000000..acc9baee4 --- /dev/null +++ b/config_app/js/components/cor-progress/cor-step.html @@ -0,0 +1,6 @@ + + + {{ text }} + + + diff --git a/config_app/js/components/file-upload-box.html b/config_app/js/components/file-upload-box.html new file mode 100644 index 000000000..65cdf9d6c --- /dev/null +++ b/config_app/js/components/file-upload-box.html @@ -0,0 +1,46 @@ +
+
+
+
+ + +
+
+ +
+ +
+
+
+
+
+ + Uploading file {{ currentlyUploadingFile.name }}... +
+ +
{{ selectMessage }}
+
+ + {{ message }} +
+
+ + {{ message }} +
+
+
\ No newline at end of file diff --git a/config_app/js/components/file-upload-box.js b/config_app/js/components/file-upload-box.js index 7005c21ed..2a48b06d0 100644 --- a/config_app/js/components/file-upload-box.js +++ b/config_app/js/components/file-upload-box.js @@ -1,10 +1,11 @@ +const templateUrl = require('./file-upload-box.html'); /** * An element which adds a stylize box for uploading a file. */ angular.module('quay-config').directive('fileUploadBox', function () { var directiveDefinitionObject = { priority: 0, - templateUrl: '/static/directives/file-upload-box.html', + templateUrl, replace: false, transclude: true, restrict: 'C', diff --git a/config_app/js/main.ts b/config_app/js/main.ts index b2e2d9b74..7be3b1163 100644 --- a/config_app/js/main.ts +++ b/config_app/js/main.ts @@ -1,8 +1,6 @@ // imports shims, etc import 'core-js'; -import '../static/css/core-ui.css'; - import * as angular from 'angular'; import { ConfigAppModule } from './config-app.module'; import { bundle } from 'ng-metadata/core'; @@ -29,5 +27,9 @@ requireAll(require.context('./services', true, /\.js$/)); // load all the components after services requireAll(require.context('./setup', true, /\.js$/)); requireAll(require.context('./core-config-setup', true, /\.js$/)); +requireAll(require.context('./components', true, /\.js$/)); +// Load all the main quay css +requireAll(require.context('../../static/css', true, /\.css$/)); +requireAll(require.context('../../static/lib', true, /\.css$/)); diff --git a/config_app/js/setup/setup.component.js b/config_app/js/setup/setup.component.js index d8d4f2b6e..3e828214c 100644 --- a/config_app/js/setup/setup.component.js +++ b/config_app/js/setup/setup.component.js @@ -196,7 +196,6 @@ const templateUrl = require('./setup.html'); }; $scope.isStep = function(step) { - console.log('current step is', step); for (var i = 1; i < arguments.length; ++i) { if (arguments[i] == step) { return true; diff --git a/config_app/static/css/core-ui.css b/config_app/static/css/core-ui.css deleted file mode 100644 index 2a7fdaf15..000000000 --- a/config_app/static/css/core-ui.css +++ /dev/null @@ -1,1500 +0,0 @@ -/* Global Brand Bar */ -.co-m-global-nav { - background: white; - height: 30px; - line-height: 36px; - position: relative; - z-index: 90; -} - -.co-m-global-nav svg { - width: auto !important; -} - -.co-m-global-nav .co-m-global-nav-left { - text-align: left; - padding-left: 28px; -} - -.co-m-global-nav .co-m-global-nav-right { - text-align: right; - font-size: 16px; - line-height: 30px; - padding-right: 25px; -} - -.co-m-global-nav .co-m-global-nav-item { - padding: 0 20px 0 15px; - border-right: 1px solid #eee; - display: inline-block; - height: 16px; - line-height: 16px; -} - -.co-m-global-nav .co-m-global-nav-item:first-of-type { - padding-left: 0; -} - -.co-m-global-nav .co-m-global-nav-item:last-of-type { - padding-right: 0; - border-right: 0; -} - -/* Tweaks for small screens */ -@media screen and (max-width: 767px) { - .co-m-global-nav { - display: none; /* hide the whole thing */ - } -} - -a:active { - outline: none !important; -} - -a:focus { - outline: none !important; -} - -.co-form-table label { - white-space: nowrap; -} - -.co-form-table td { - padding: 8px; -} - -.co-form-table td:first-child { - vertical-align: top; - padding-top: 14px; -} - -.co-form-table td .co-help-text { - margin-top: 10px; - margin-bottom: 4px; -} - -.co-help-text { - margin-top: 6px; - color: #aaa; - display: inline-block; -} - -.co-options-menu .fa-gear { - color: #999; - cursor: pointer; -} - -.co-options-menu .dropdown.open .fa-gear { - color: #428BCA; -} - -.co-img-bg-network { - background: url('/static/img/network-tile.png') left top repeat, linear-gradient(30deg, #2277ad, #144768) no-repeat left top fixed; - background-color: #2277ad; - background-size: auto, 100% 100%; -} - -.co-m-navbar { - background-color: white; - margin: 0; - padding-left: 10px; -} - -.co-fx-box-shadow { - -webkit-box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); - -moz-box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); - -ms-box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); - -o-box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); - box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); -} - -.co-fx-box-shadow-heavy { - -webkit-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); - -moz-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); - -ms-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); - -o-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); - box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); -} - -.co-fx-text-shadow { - text-shadow: rgba(0, 0, 0, 1) 1px 1px 2px; -} - -.co-nav-title { - margin-top: -22px; - height: 70px; -} - -.co-nav-title .co-nav-title-content { - color: white; - text-align: center; - white-space: nowrap; -} - -.co-nav-title .co-nav-title-action { - display: block; - color: white; - text-align: center; - line-height: 70px; - font-size: 18px; -} - -.co-nav-title .co-nav-title-action a { - color: white; -} - -.co-nav-title .co-nav-title-action .fa { - margin-right: 6px; -} - - -@media (max-width: 767px) { - .co-nav-title { - height: auto; - min-height: 70px; - } - - .co-nav-title .co-nav-title-content { - height: 34px; - overflow: hidden; - text-overflow: ellipsis; - font-size: 22px; - } -} - -.co-main-content-panel { - margin-bottom: 20px; - background-color: #fff; - border: 1px solid transparent; - padding: 10px; - - -webkit-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); - -moz-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); - -ms-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); - -o-box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); - box-shadow: 0px 2px 2px rgba(0, 0, 0, 0.4); -} - -.cor-log-box { - width: 100%; - height: 550px; - position: relative; -} - -.co-log-viewer { - position: absolute; - top: 20px; - left: 20px; - right: 20px; - height: 500px; - - padding: 20px; - - background: rgb(55, 55, 55); - border: 1px solid black; - color: white; - - overflow: scroll; -} - -.co-log-viewer .co-log-content { - font-family: Consolas, "Lucida Console", Monaco, monospace; - font-size: 12px; - white-space: pre; -} - -.cor-log-box .co-log-viewer-new-logs i { - margin-left: 10px; - display: inline-block; -} - -.cor-log-box .co-log-viewer-new-logs { - cursor: pointer; - position: absolute; - bottom: 40px; - right: 30px; - padding: 10px; - color: white; - border-radius: 10px; - background: rgba(72, 158, 72, 0.8); -} - -.co-panel { - margin-bottom: 40px; - - /*border: 1px solid #eee;*/ -} - -.co-panel .co-panel-heading img { - margin-right: 6px; - width: 24px; -} - -.co-panel .co-panel-heading > i.fa { - margin-right: 6px; - width: 24px; - text-align: center; -} - -.co-panel .co-panel-heading { - padding: 6px; - /*background: #eee;*/ - border-bottom: 1px solid #eee; - - margin-bottom: 4px; - font-size: 135%; - padding-left: 10px; -} - -.co-panel .co-panel-body { - padding: 10px; -} - -@media (max-width: 767px) { - .co-panel > .co-panel-body { - padding: 0px; - padding-top: 10px; - padding-bottom: 10px; - } - - .co-panel > .panel-body { - padding: 0px; - padding-top: 10px; - padding-bottom: 10px; - } -} - - -.co-panel .co-panel-button-bar { - margin-top: 10px; - padding-top: 10px; - border-top: 1px solid #eee; -} - -.co-panel-body .co-panel-heading { - font-size: 120%; - border-bottom: 0px; - margin: 0px; - margin-bottom: -6px; -} - -.co-panel-body .co-panel-body { - padding-left: 38px; -} - - -.config-bool-field-element input { - margin-right: 6px; - font-size: 24px; -} - -.config-setup-tool-element .help-text { - margin-top: 6px; - color: #aaa; -} - -.config-setup-tool-element .description { - padding: 6px; -} - -.config-setup-tool-element .config-table > tbody > tr > td:first-child { - padding-top: 14px; - font-weight: bold; -} - -.config-setup-tool-element .config-table > tbody > tr > td.non-input { - padding-top: 8px; -} - -.config-setup-tool-element .config-table > tbody > tr > td { - padding: 8px; - vertical-align: top; -} - -.config-setup-tool-element .config-table > tbody > tr > td .config-numeric-field-element { - width: 100px; -} - -.config-setup-tool-element .config-table > tbody > tr > td .config-string-field-element { - width: 400px; -} - -.config-setup-tool-element .config-table > tbody > tr > td .config-string-list-field-element { - width: 400px; -} - -.config-map-field-element table { - margin-bottom: 10px; -} - -.config-map-field-element .form-control-container { - border-top: 1px solid #eee; - padding-top: 10px; -} - -.config-map-field-element .form-control-container select, .config-map-field-element .form-control-container input { - margin-bottom: 10px; -} - -.config-map-field-element .empty { - color: #ccc; - margin-bottom: 10px; - display: block; -} - -.config-map-field-element .item-title { - font-weight: bold; -} - -.config-contact-field { - margin-bottom: 4px; -} - -.config-contact-field .dropdown button { - width: 100px; - text-align: left; -} - -.config-contact-field .dropdown button .caret { - float: right; - margin-top: 9px; -} - -.config-contact-field .dropdown button i.fa { - margin-right: 6px; - width: 14px; - text-align: center; - display: inline-block; -} - -.config-contact-field .form-control { - width: 350px; -} - -.config-certificates-field-element .dns-name { - display: inline-block; - margin-right: 10px; -} - -.config-certificates-field-element .cert-status .fa { - margin-right: 4px; -} - -.config-certificates-field-element .cert-status .green { - color: #2FC98E; -} - -.config-certificates-field-element .cert-status .orange { - color: #FCA657; -} - -.config-certificates-field-element .cert-status .red { - color: #D64456; -} - -.config-certificates-field-element .file-upload-box-element .file-input-container { - padding: 0px; - text-align: left; -} - -.config-certificates-field-element .file-upload-box-element .file-drop + label { - margin-top: 0px; - margin-bottom: 4px; -} - -.config-list-field-element .empty { - color: #ccc; - margin-bottom: 10px; - display: block; -} - -.config-list-field-element input { - vertical-align: middle; -} - -.config-list-field-element .item-delete { - display: inline-block; - margin-left: 20px; -} - -.config-list-field-element input { - width: 350px; -} - -.config-setup-tool-element .inner-table { - margin-left: 10px; -} - -.config-setup-tool-element .inner-table tr td:first-child { - font-weight: bold; -} - -.config-setup-tool-element .inner-table td { - padding: 6px; -} - -.config-file-field-element input { - display: inline-block; - margin-left: 10px; -} - -.config-service-key-field-element { - position: relative; -} - -.config-service-key-field-element .co-modify-link { - margin-left: 10px; -} - -.config-service-key-field-element .fa-check { - margin-right: 4px; -} - -.co-checkbox { - position: relative; -} - -.co-checkbox input { - display: none; -} - -.co-checkbox label { - position: relative; - padding-left: 28px; - cursor: pointer; -} - -.co-checkbox label:before { - content: ''; - cursor: pointer; - position: absolute; - width: 20px; - height: 20px; - top: 0; - left: 0; - border-radius: 4px; - - -webkit-box-shadow: inset 0px 1px 1px rgba(0,0,0,0.5), 0px 1px 0px rgba(255,255,255,.4); - -moz-box-shadow: inset 0px 1px 1px rgba(0,0,0,0.5), 0px 1px 0px rgba(255,255,255,.4); - box-shadow: inset 0px 1px 1px rgba(0,0,0,0.5), 0px 1px 0px rgba(255,255,255,.4); - - background: -webkit-linear-gradient(top, #222 0%, #45484d 100%); - background: -moz-linear-gradient(top, #222 0%, #45484d 100%); - background: -o-linear-gradient(top, #222 0%, #45484d 100%); - background: -ms-linear-gradient(top, #222 0%, #45484d 100%); - background: linear-gradient(top, #222 0%, #45484d 100%); - filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#222', endColorstr='#45484d',GradientType=0 ); -} - -.co-checkbox label:after { - -ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=0)"; - filter: alpha(opacity=0); - opacity: 0; - content: ''; - position: absolute; - width: 11px; - height: 7px; - background: transparent; - top: 5px; - left: 4px; - border: 3px solid #fcfff4; - border-top: none; - border-right: none; - - -webkit-transform: rotate(-45deg); - -moz-transform: rotate(-45deg); - -o-transform: rotate(-45deg); - -ms-transform: rotate(-45deg); - transform: rotate(-45deg); -} - -.co-checkbox label:hover::after { - -ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=30)"; - filter: alpha(opacity=30); - opacity: 0.3; -} - -.co-checkbox input[type=checkbox]:checked + label:after { - -ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=100)"; - filter: alpha(opacity=100); - opacity: 1; - border: 3px solid rgb(26, 255, 26); - border-top: none; - border-right: none; -} - -.co-floating-bottom-bar { - height: 50px; -} - -.co-floating-bottom-bar.floating { - position: fixed; - bottom: 0px; -} - -.config-setup-tool .cor-floating-bottom-bar button i.fa { - margin-right: 6px; -} - -.config-setup-tool .service-verification { - padding: 20px; - background: #343434; - color: white; - margin-bottom: -14px; -} - -.config-setup-tool .service-verification-row { - margin-bottom: 6px; -} - -.config-setup-tool .service-verification-row .service-title { - font-variant: small-caps; - font-size: 145%; - vertical-align: middle; -} - -#validateAndSaveModal .fa-warning { - font-size: 22px; - margin-right: 10px; - vertical-align: middle; - color: rgb(255, 186, 53); -} - -#validateAndSaveModal .fa-check-circle { - font-size: 22px; - margin-right: 10px; - vertical-align: middle; - color: rgb(53, 186, 53); -} - -.config-setup-tool .service-verification-error { - white-space: pre-wrap; - margin-top: 10px; - margin-left: 36px; - margin-bottom: 20px; - max-height: 250px; - overflow: auto; - border: 1px solid #797979; - background: black; - padding: 6px; - font-family: Consolas, "Lucida Console", Monaco, monospace; - font-size: 12px; -} - -.co-m-loader, .co-m-inline-loader { - min-width: 28px; } - -.co-m-loader { - display: block; - position: absolute; - left: 50%; - top: 50%; - margin: -11px 0 0 -13px; } - -.co-m-inline-loader { - display: inline-block; - cursor: default; } - .co-m-inline-loader:hover { - text-decoration: none; } - -.co-m-loader-dot__one, .co-m-loader-dot__two, .co-m-loader-dot__three { - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - -ms-border-radius: 3px; - -o-border-radius: 3px; - border-radius: 3px; - animation-fill-mode: both; - -webkit-animation-fill-mode: both; - -moz-animation-fill-mode: both; - -ms-animation-fill-mode: both; - -o-animation-fill-mode: both; - animation-name: bouncedelay; - animation-duration: 1s; - animation-timing-function: ease-in-out; - animation-delay: 0; - animation-direction: normal; - animation-iteration-count: infinite; - animation-fill-mode: forwards; - animation-play-state: running; - -webkit-animation-name: bouncedelay; - -webkit-animation-duration: 1s; - -webkit-animation-timing-function: ease-in-out; - -webkit-animation-delay: 0; - -webkit-animation-direction: normal; - -webkit-animation-iteration-count: infinite; - -webkit-animation-fill-mode: forwards; - -webkit-animation-play-state: running; - -moz-animation-name: bouncedelay; - -moz-animation-duration: 1s; - -moz-animation-timing-function: ease-in-out; - -moz-animation-delay: 0; - -moz-animation-direction: normal; - -moz-animation-iteration-count: infinite; - -moz-animation-fill-mode: forwards; - -moz-animation-play-state: running; - display: inline-block; - height: 6px; - width: 6px; - background: #419eda; - border-radius: 100%; - display: inline-block; } - -.co-m-loader-dot__one { - animation-delay: -0.32s; - -webkit-animation-delay: -0.32s; - -moz-animation-delay: -0.32s; - -ms-animation-delay: -0.32s; - -o-animation-delay: -0.32s; } - -.co-m-loader-dot__two { - animation-delay: -0.16s; - -webkit-animation-delay: -0.16s; - -moz-animation-delay: -0.16s; - -ms-animation-delay: -0.16s; - -o-animation-delay: -0.16s; } - -@-webkit-keyframes bouncedelay { - 0%, 80%, 100% { - -webkit-transform: scale(0.25, 0.25); - -moz-transform: scale(0.25, 0.25); - -ms-transform: scale(0.25, 0.25); - -o-transform: scale(0.25, 0.25); - transform: scale(0.25, 0.25); } - - 40% { - -webkit-transform: scale(1, 1); - -moz-transform: scale(1, 1); - -ms-transform: scale(1, 1); - -o-transform: scale(1, 1); - transform: scale(1, 1); } } - -@-moz-keyframes bouncedelay { - 0%, 80%, 100% { - -webkit-transform: scale(0.25, 0.25); - -moz-transform: scale(0.25, 0.25); - -ms-transform: scale(0.25, 0.25); - -o-transform: scale(0.25, 0.25); - transform: scale(0.25, 0.25); } - - 40% { - -webkit-transform: scale(1, 1); - -moz-transform: scale(1, 1); - -ms-transform: scale(1, 1); - -o-transform: scale(1, 1); - transform: scale(1, 1); } } - -@-ms-keyframes bouncedelay { - 0%, 80%, 100% { - -webkit-transform: scale(0.25, 0.25); - -moz-transform: scale(0.25, 0.25); - -ms-transform: scale(0.25, 0.25); - -o-transform: scale(0.25, 0.25); - transform: scale(0.25, 0.25); } - - 40% { - -webkit-transform: scale(1, 1); - -moz-transform: scale(1, 1); - -ms-transform: scale(1, 1); - -o-transform: scale(1, 1); - transform: scale(1, 1); } } - -@keyframes bouncedelay { - 0%, 80%, 100% { - -webkit-transform: scale(0.25, 0.25); - -moz-transform: scale(0.25, 0.25); - -ms-transform: scale(0.25, 0.25); - -o-transform: scale(0.25, 0.25); - transform: scale(0.25, 0.25); } - - 40% { - -webkit-transform: scale(1, 1); - -moz-transform: scale(1, 1); - -ms-transform: scale(1, 1); - -o-transform: scale(1, 1); - transform: scale(1, 1); } } - -.co-dialog .modal-body { - padding: 10px; - min-height: 100px; -} - -.co-dialog .modal-body h4 { - margin-bottom: 20px; -} - -.co-dialog .modal-content { - border-radius: 0px; -} - -.co-dialog.fatal-error .modal-content { - padding-left: 175px; -} - -.co-dialog.fatal-error .alert-icon-container-container { - position: absolute; - top: -36px; - left: -175px; - bottom: 20px; -} - -.co-dialog.fatal-error .alert-icon-container { - height: 100%; - display: table; -} - -.co-dialog.fatal-error .alert-icon { - display: table-cell; - vertical-align: middle; - border-right: 1px solid #eee; - margin-right: 20px; -} - -.co-dialog.fatal-error .alert-icon:before { - content: "\f071"; - font-family: FontAwesome; - font-size: 60px; - padding-left: 50px; - padding-right: 50px; - color: #c53c3f; - text-align: center; -} - - -.co-dialog .modal-header .cor-step-bar { - float: right; -} - -.co-dialog .modal-footer.working { - text-align: left; -} - -.co-dialog .modal-footer.working .btn { - float: right; -} - -.co-dialog .modal-footer.working .cor-loader-inline { - margin-right: 10px; -} - -.co-dialog .modal-footer .left-align { - float: left; - vertical-align: middle; - font-size: 16px; - margin-top: 8px; -} - -.co-dialog .modal-footer .left-align i.fa-warning { - color: #ffba35; - display: inline-block; - margin-right: 6px; -} - -.co-dialog .modal-footer .left-align i.fa-check { - color: green; - display: inline-block; - margin-right: 6px; -} - -.co-dialog .co-single-field-dialog { - padding: 10px; -} - -.co-dialog .co-single-field-dialog input { - margin-top: 10px; -} - -.co-step-bar .co-step-element { - cursor: default; - display: inline-block; - width: 28px; - height: 28px; - - position: relative; - color: #ddd; - - text-align: center; - line-height: 24px; - font-size: 16px; -} - -.co-step-bar .co-step-element.text { - margin-left: 24px; - background: white; -} - -.co-step-bar .co-step-element.icon { - margin-left: 22px; -} - -.co-step-bar .co-step-element:first-child { - margin-left: 0px; -} - -.co-step-bar .co-step-element.active { - color: #53a3d9; -} - -.co-step-bar .co-step-element:first-child:before { - display: none; -} - -.co-step-bar .co-step-element:before { - content: ""; - position: absolute; - top: 12px; - width: 14px; - border-top: 2px solid #ddd; -} - -.co-step-bar .co-step-element.icon:before { - left: -20px; -} - -.co-step-bar .co-step-element.text:before { - left: -22px; -} - -.co-step-bar .co-step-element.active:before { - border-top: 2px solid #53a3d9; -} - - -.co-step-bar .co-step-element.text { - border-radius: 100%; - border: 2px solid #ddd; -} - -.co-step-bar .co-step-element.text.active { - border: 2px solid #53a3d9; -} - -@media screen and (min-width: 900px) { - .co-dialog .modal-dialog { - width: 800px; - } -} - -@media screen and (min-width: 1200px) { - .co-dialog.wider .modal-dialog { - width: 1000px; - } -} - -.co-alert .co-step-bar { - float: right; - margin-top: 6px; -} - -.cor-container { - padding-left: 15px; - padding-right: 15px; -} - -.cor-title-link { - font-weight: 300; - line-height: 30px; - margin-top: 22px; - margin-bottom: 10px; - font-size: 16px; - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; - color: white; -} - -.cor-title-link a { - color: white; - text-decoration: none !important; -} - -.cor-title-link a.back-link .avatar { - margin-right: 6px; -} - -.cor-title-link a.back-link:before { - content: "\f060"; - color: white; - display: inline-block; - margin-right: 10px; - vertical-align: middle; - font-family: FontAwesome; -} - -.co-table { - width: 100%; -} - -.co-fixed-table { - table-layout: fixed; -} - -.co-fixed-table .co-flowing-col { - overflow: hidden; - text-overflow: ellipsis; - padding-left: 16px; - vertical-align: middle; -} - -.co-fixed-table .nowrap-col { - white-space: nowrap; - overflow: hidden; -} - -.co-table td { - border-bottom: 1px solid #eee; - padding: 10px; -} - -.co-table.no-lines td { - border-bottom: 0px; - padding: 6px; -} - -.co-table thead td { - color: #999; - font-size: 90%; - text-transform: uppercase; - font-weight: 300; - padding-top: 0px !important; -} - -.co-table thead td a { - color: #666; -} - -.co-table thead td:after { - content: "\f175"; - font-family: FontAwesome; - font-size: 12px; - margin-left: 10px; - visibility: hidden; -} - -.co-table thead td.unorderable-col:after { - display: none; -} - -.co-table thead td.current:after { - content: "\f175"; - visibility: visible; -} - -.co-table thead td.current.reversed:after { - content: "\f176"; - visibility: visible; -} - -.co-table thead td.current a { - color: #337ab7; -} - -.co-table .checkbox-col { - width: 24px; - text-align: center; -} - -.co-table .checkbox-col .co-checkable-menu a { - color: black; - text-transform: none; -} - -.co-table thead td.checkbox-menu-col:after { - display: none; -} - -.co-table .offset-check-col { - padding-left: 22px; -} - -.co-table td.options-col { - width: 36px; - text-align: center; -} - -.co-table td.caret-col { - width: 10px; - padding-left: 6px; - padding-right: 0px; - color: #aaa; - text-align: center; - max-width: 20px; -} - -.co-table td.caret-col i.fa { - cursor: pointer; -} - -.co-table td.caret-col i.fa.fa-caret-down { - color: black; -} - -.co-table .add-row-spacer td { - padding: 5px; -} - -.co-table .add-row td { - padding-top: 10px; - border-top: 2px solid #eee; - border-bottom: none; -} - -.co-table tr.co-table-header-row td { - font-size: 12px; - text-transform: uppercase; - color: #ccc; - border-bottom: none; - padding-left: 10px; - padding-top: 10px; - padding-bottom: 4px; -} - -.co-table tr.co-table-header-row td i.fa { - margin-right: 4px; -} - -.co-table tr.indented-row td:first-child { - padding-left: 28px; -} - -@media (max-width: 767px) { - .co-table tr.indented-row td:first-child { - padding-left: 10px; - } -} - -.co-table .mobile-row { - border-bottom: 2px solid #eee; - padding-bottom: 10px; - margin-bottom: 10px; - - position: relative; -} - -.co-table .mobile-row:last-child { - border-bottom: 0px solid #eee; - padding-bottom: 0px; - margin-bottom: 0px; -} - -.co-table .mobile-row .mobile-col-header { - font-weight: bold; - color: #444; -} - -.co-table .mobile-row .mobile-col-value { - padding: 6px; -} - -.co-table .mobile-row .options-col { - position: absolute; - top: -6px; - right: 0px; -} - - -.cor-checkable-menu { - display: inline-block; -} - -.co-checkable-menu .co-checkable-menu-state { - display: inline-block; - margin-left: -1px; - margin-right: 4px; -} - -.co-checkable-menu .dropdown { - display: inline-block; -} - -.co-checkable-item, .co-checkable-menu-state { - width: 18px; - height: 18px; - cursor: pointer; - border: 1px solid #ddd; - display: inline-block; - vertical-align: middle; - - position: relative -} - -.co-checkable-item:after, .co-checkable-menu-state:after { - content: "\f00c"; - font-family: FontAwesome; - color: #ccc; - - position: absolute; - top: -1px; - left: 1px; - - visibility: hidden; -} - -.co-checkable-menu-state.some:after { - content: "-"; - font-size: 24px; - top: -10px; - left: 4px; -} - -@media (min-width: 768px) { - .co-checkable-item:hover:after { - visibility: visible; - } -} - -.co-checkable-item.checked:after, .co-checkable-menu-state.all:after, .co-checkable-menu-state.some:after { - visibility: visible; - color: #428bca; -} - -.co-table .co-checkable-row.checked { - background: #F6FCFF; -} - -.co-filter-box { - position: relative;; -} - -.co-filter-box input { - display: inline-block; - width: auto !important; -} - -.co-filter-box .filter-message { - display: inline-block; - position: absolute; - left: -220px; - top: 7px; - color: #ccc; -} - -.co-filter-box .filter-options { - margin-top: 4px; - font-size: 14px; - text-align: right; - display: inline-block; -} - -.co-filter-box .filter-options label input { - margin-right: 4px; -} - - -.co-filter-box.with-options > input { - display: inline-block; - width: 200px; - margin-right: 4px; -} - -.co-check-bar { - margin-bottom: 10px; -} - -.co-check-bar .co-checked-actions { - display: inline-block; - border-left: 1px solid #eee; - margin-left: 10px; - padding-left: 4px; -} - -.co-top-bar { - height: 50px; - padding-bottom: 40px; -} - -.co-check-bar .co-checked-actions .btn { - margin-left: 6px; -} - -.co-check-bar .co-checked-actions .btn .fa { - margin-right: 4px; -} - -.co-check-bar .co-filter-box, .co-top-bar .co-filter-box { - float: right; -} - -.co-check-bar .co-filter-box .page-controls, .co-top-bar .co-filter-box .page-controls { - margin-right: 6px; - margin-bottom: 6px; -} - -.co-check-bar .co-filter-box input, .co-top-bar .co-filter-box input[type="text"] { - width: 300px; - display: inline-block; - vertical-align: middle; -} - -.co-check-bar .co-filter-box input, .co-top-bar .co-filter-box label { - margin-left: 6px; -} - -.co-top-bar .co-filter-box input { - vertical-align: top; -} - -@media screen and (max-width: 640px) { - .co-top-bar .page-controls { - margin-bottom: 10px; - text-align: right; - } - - .co-top-bar .co-filter-box { - display: block; - margin-bottom: 10px; - } - - .co-top-bar .filter-options { - display: block; - margin-bottom: 10px; - } - - .co-filter-box input { - display: block !important; - } -} - -.empty { - border-bottom: none !important; -} - -.empty-icon { - color: #aaa; - font-size: 60px; - margin-bottom: 0px; - text-align: center; -} - -.empty-primary-msg { - font-size: 18px; - margin-bottom: 10px; - text-align: center; -} - -.empty-secondary-msg { - font-size: 14px; - color: #999; - text-align: center; - margin-bottom: 10px; -} - -.co-alert { - padding: 16px; - padding-left: 46px; - position: relative; - margin-bottom: 20px; - position: relative; - border: 1px solid #eee; -} - -.co-alert.co-alert-success { - background: #F0FFF4; -} - -.co-alert.co-alert-success:before { - font-family: FontAwesome; - content: "\f058"; - position: absolute; - top: 11px; - left: 12px; - font-size: 22px; - color: #83D29C; -} - -.co-alert.co-alert-info { - background: #F0FAFF; -} - -.co-alert.co-alert-info:before { - font-family: FontAwesome; - content: "\f05a"; - position: absolute; - top: 11px; - left: 12px; - font-size: 22px; - color: #83B7D2; -} - -.co-alert.co-alert-warning { - background: #FFFBF0; -} - -.co-alert.co-alert-warning:before { - font-family: FontAwesome; - content: "\f071"; - position: absolute; - top: 11px; - left: 12px; - font-size: 22px; - color: #FCA657; -} - -.co-alert.co-alert-danger { - background: #FFF0F0; -} - -.co-alert.co-alert-danger:before { - font-family: core-icons; - content: "\f109"; - position: absolute; - top: 11px; - left: 12px; - font-size: 22px; - color: red; -} - -.co-alert.co-alert-danger:after { - font-family: FontAwesome; - content: "\f12a"; - position: absolute; - top: 16px; - left: 20px; - font-size: 16px; - color: white; - z-index: 2; -} - -.co-alert.thin { - padding: 6px; - padding-left: 38px; - margin-bottom: 0px; -} - -.co-alert.thin:before { - top: 5px; - font-size: 18px; -} - -.co-alert.thin:after { - top: 9px; - font-size: 13px; - left: 19px; -} - -.co-alert-inline:before { - position: relative !important; - top: auto !important; - left: auto !important; - vertical-align: middle; - margin-right: 10px; -} - -.co-alert-popin-warning { - margin-left: 10px; -} - -@media screen and (max-width: 767px) { - .co-alert-popin-warning { - display: block; - margin: 0px; - margin-top: 10px; - float: none; - } -} - -.co-alert-inline { - border: 0px; - display: inline-block; - background-color: transparent !important; - margin: 0px; - padding: 4px; -} - -.co-list-table tr td:first-child { - font-weight: bold; - padding-right: 10px; - vertical-align: top; - width: 120px; - padding-left: 0px; -} - -.co-list-table tr td { - padding: 10px; - font-size: 15px; -} - -.co-list-table .help-text { - margin-top: 6px; - font-size: 14px; - color: #aaa; -} - -.co-modify-link:after { - font-family: FontAwesome; - content: "\f054"; - color: #ccc; - vertical-align: middle; - display: inline-block; - margin-left: 10px; - font-size: 10px; - line-height: 16px; -} - -.co-option-table tr td:first-child { - padding-left: 16px; - padding-right: 16px; - padding-top: 0px; - vertical-align: top; -} - -.co-option-table tr td:last-child { - padding-bottom: 10px; -} - -.co-option-table .help-text { - margin-top: 4px; - margin-bottom: 10px; - font-size: 14px; - color: #aaa; -} - -.co-modal-body-scrollable { - overflow-y: auto; - overflow-x: hidden; - max-height: 400px; -} - -.cor-confirm-dialog-element .modal-body { - padding: 20px; -} - -.cor-confirm-dialog-element .progress-message { - margin-bottom: 10px; - font-size: 16px; -} - -.co-top-tab-bar { - padding: 0px; - margin: 0px; - padding-left: 10px; - - margin-bottom: 10px; - border-bottom: 1px solid #eee; -} - -.co-top-tab-bar li { - display: inline-block; - list-style: none; - text-align: center; - padding: 6px; - padding-left: 10px; - padding-right: 10px; - border-bottom: 1px solid #eee; - font-size: 15px; - cursor: pointer; - color: #666; - - bottom: -2px; - position: relative; -} - -.co-top-tab-bar li.active { - color: #51a3d9; - border-bottom: 2px solid #51a3d9; - top: 2px; -} - -.modal-header.ahead-of-tabs { - border-bottom: 0px; - padding-bottom: 4px; -} diff --git a/config_app/webpack.config.js b/config_app/webpack.config.js index 4b52f243e..7ba392c27 100644 --- a/config_app/webpack.config.js +++ b/config_app/webpack.config.js @@ -14,6 +14,7 @@ let config = { modules: [ // Allows us to use the top-level node modules path.resolve(__dirname, '../node_modules'), + path.resolve(__dirname, '../static/css/') ] }, externals: { From e9d24dc5ff1e4f3de34951b64302c53d8803a654 Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Tue, 5 Jun 2018 13:43:01 -0400 Subject: [PATCH 11/14] Make script paths work in docker and locally for config_app --- Dockerfile | 5 +- config_app/_init_config.py | 4 +- config_app/c_app.py | 28 +- config_app/conf/gunicorn_local.py | 3 +- config_app/conf/gunicorn_web.py | 26 + config_app/config_app_config.py | 544 ----------- config_app/config_endpoints/api/__init__.py | 19 - config_app/config_endpoints/api/discovery.py | 1 + config_app/config_endpoints/api/suconfig.py | 13 +- config_app/config_endpoints/api/superuser.py | 7 +- .../api/superuser_models_interface.py | 278 ------ .../api/superuser_models_pre_oci.py | 243 ----- config_app/config_endpoints/api/user.py | 13 +- config_app/config_endpoints/common.py | 6 +- config_app/config_endpoints/exception.py | 56 -- config_app/config_endpoints/setup_web.py | 8 +- config_app/config_test/testconfig.py | 108 --- .../config_util/config/basefileprovider.py | 2 +- config_app/config_util/config/baseprovider.py | 2 +- config_app/config_util/config/schema.py | 914 ------------------ config_app/config_util/workers.py | 32 - config_app/init/service/gunicorn_web/run | 2 +- config_app/js/services/api-service.js | 7 +- endpoints/api/discovery.py | 1 + local-config-app.sh | 2 +- package.json | 3 +- 26 files changed, 79 insertions(+), 2248 deletions(-) create mode 100644 config_app/conf/gunicorn_web.py delete mode 100644 config_app/config_app_config.py delete mode 100644 config_app/config_test/testconfig.py delete mode 100644 config_app/config_util/config/schema.py delete mode 100644 config_app/config_util/workers.py diff --git a/Dockerfile b/Dockerfile index 34d68e93a..b9f9074d5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -106,6 +106,10 @@ RUN yarn build \ && jpegoptim static/img/**/*.jpg \ && optipng -clobber -quiet static/img/**/*.png +# Config app js compile +COPY config_app/ config_app/ +RUN yarn build-config-app + COPY . . RUN PYTHONPATH=$QUAYPATH venv/bin/python -m external_libraries @@ -139,4 +143,3 @@ RUN ./scripts/detect-config.sh EXPOSE 443 8443 80 ENTRYPOINT [ "/bin/bash", "./quay-entrypoint.sh"] - diff --git a/config_app/_init_config.py b/config_app/_init_config.py index 1ab66a338..a1628321a 100644 --- a/config_app/_init_config.py +++ b/config_app/_init_config.py @@ -3,8 +3,10 @@ import re import subprocess -# Note: this currently points to the directory above, since we're in the quay config_app dir. When extracting, revert +# Note: this currently points to the directory above, since we're in the quay config_app dir +# TODO(config_extract): revert to root directory rather than the one above ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/")) STATIC_DIR = os.path.join(ROOT_DIR, 'static/') STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/') diff --git a/config_app/c_app.py b/config_app/c_app.py index 6235701da..57155a5be 100644 --- a/config_app/c_app.py +++ b/config_app/c_app.py @@ -1,38 +1,40 @@ import os import logging + from flask import Flask -from _init_config import ROOT_DIR -from config_app.config_util.config import get_config_provider + +from data import database +from util.config.superusermanager import SuperUserManager from util.ipresolver import NoopIPResolver - -from util.config.superusermanager import SuperUserManager +from config_app._init_config import ROOT_DIR +from config_app.config_util.config import get_config_provider app = Flask(__name__) logger = logging.getLogger(__name__) -# OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, 'stack/') OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, 'config_app/conf/stack') - is_testing = 'TEST' in os.environ -is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ -config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config_app_config.py', - testing=is_testing, kubernetes=is_kubernetes) +# TODO(config kubernetes): reinstate when enabling kubernetes in config app +# is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ + +config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', + testing=is_testing) if is_testing: - from config_app.config_test.testconfig import TestConfig + from test.testconfig import TestConfig logger.debug('Loading test config.') app.config.from_object(TestConfig()) else: - from config_app.config_app_config import DefaultConfig + from config import DefaultConfig logger.debug('Loading default config.') app.config.from_object(DefaultConfig()) - # app.teardown_request(database.close_db_filter) + app.teardown_request(database.close_db_filter) # Load the override config via the provider. config_provider.update_app_config(app.config) superusers = SuperUserManager(app) -ip_resolver = NoopIPResolver() \ No newline at end of file +ip_resolver = NoopIPResolver() diff --git a/config_app/conf/gunicorn_local.py b/config_app/conf/gunicorn_local.py index 377c1ba0f..d0ea0a758 100644 --- a/config_app/conf/gunicorn_local.py +++ b/config_app/conf/gunicorn_local.py @@ -6,12 +6,11 @@ import logging from Crypto import Random from config_app.config_util.log import logfile_path -from config_app.config_util.workers import get_worker_count logconfig = logfile_path(debug=True) bind = '0.0.0.0:5000' -workers = get_worker_count('local', 2, minimum=2, maximum=8) +workers = 1 worker_class = 'gevent' daemon = False pythonpath = '.' diff --git a/config_app/conf/gunicorn_web.py b/config_app/conf/gunicorn_web.py new file mode 100644 index 000000000..4c4e1a152 --- /dev/null +++ b/config_app/conf/gunicorn_web.py @@ -0,0 +1,26 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +import logging + +from Crypto import Random +from config_app.config_util.log import logfile_path + + +logconfig = logfile_path(debug=True) + +bind = '0.0.0.0:80' +workers = 1 +worker_class = 'gevent' +pythonpath = '.' +preload_app = True + +def post_fork(server, worker): + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + +def when_ready(server): + logger = logging.getLogger(__name__) + logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class) diff --git a/config_app/config_app_config.py b/config_app/config_app_config.py deleted file mode 100644 index cd2d35be4..000000000 --- a/config_app/config_app_config.py +++ /dev/null @@ -1,544 +0,0 @@ -from uuid import uuid4 - -import os.path -import requests - -from _init_config import ROOT_DIR, CONF_DIR - - -def build_requests_session(): - sess = requests.Session() - adapter = requests.adapters.HTTPAdapter(pool_connections=100, - pool_maxsize=100) - sess.mount('http://', adapter) - sess.mount('https://', adapter) - return sess - - -# The set of configuration key names that will be accessible in the client. Since these -# values are sent to the frontend, DO NOT PLACE ANY SECRETS OR KEYS in this list. -CLIENT_WHITELIST = ['SERVER_HOSTNAME', 'PREFERRED_URL_SCHEME', 'MIXPANEL_KEY', - 'STRIPE_PUBLISHABLE_KEY', 'ENTERPRISE_LOGO_URL', 'SENTRY_PUBLIC_DSN', - 'AUTHENTICATION_TYPE', 'REGISTRY_TITLE', 'REGISTRY_TITLE_SHORT', - 'CONTACT_INFO', 'AVATAR_KIND', 'LOCAL_OAUTH_HANDLER', 'DOCUMENTATION_LOCATION', - 'DOCUMENTATION_METADATA', 'SETUP_COMPLETE', 'DEBUG', 'MARKETO_MUNCHKIN_ID', - 'STATIC_SITE_BUCKET', 'RECAPTCHA_SITE_KEY', 'CHANNEL_COLORS', - 'TAG_EXPIRATION_OPTIONS', 'INTERNAL_OIDC_SERVICE_ID', - 'SEARCH_RESULTS_PER_PAGE', 'SEARCH_MAX_RESULT_PAGE_COUNT'] - - -def frontend_visible_config(config_dict): - visible_dict = {} - for name in CLIENT_WHITELIST: - if name.lower().find('secret') >= 0: - raise Exception('Cannot whitelist secrets: %s' % name) - - if name in config_dict: - visible_dict[name] = config_dict.get(name, None) - - return visible_dict - - -# Configuration that should not be changed by end users -class ImmutableConfig(object): - - # Requests based HTTP client with a large request pool - HTTPCLIENT = build_requests_session() - - # Status tag config - STATUS_TAGS = {} - for tag_name in ['building', 'failed', 'none', 'ready', 'cancelled']: - tag_path = os.path.join(ROOT_DIR, 'buildstatus', tag_name + '.svg') - with open(tag_path) as tag_svg: - STATUS_TAGS[tag_name] = tag_svg.read() - - # Reverse DNS prefixes that are reserved for internal use on labels and should not be allowable - # to be set via the API. - DEFAULT_LABEL_KEY_RESERVED_PREFIXES = ['com.docker.', 'io.docker.', 'org.dockerproject.', - 'org.opencontainers.', 'io.cncf.', - 'io.kubernetes.', 'io.k8s.', - 'io.quay', 'com.coreos', 'com.tectonic', - 'internal', 'quay'] - - # Colors for local avatars. - AVATAR_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', - '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', - '#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79', - '#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b', - '#8c6d31', '#ad494a', '#e7ba52', '#a55194'] - - # Colors for channels. - CHANNEL_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', - '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', - '#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79', - '#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b', - '#8c6d31', '#ad494a', '#e7ba52', '#a55194'] - - PROPAGATE_EXCEPTIONS = True - - -class DefaultConfig(ImmutableConfig): - # Flask config - JSONIFY_PRETTYPRINT_REGULAR = False - SESSION_COOKIE_SECURE = False - - LOGGING_LEVEL = 'DEBUG' - SEND_FILE_MAX_AGE_DEFAULT = 0 - PREFERRED_URL_SCHEME = 'http' - SERVER_HOSTNAME = 'localhost:5000' - - REGISTRY_TITLE = 'Quay Enterprise' - REGISTRY_TITLE_SHORT = 'Quay Enterprise' - - CONTACT_INFO = [ - 'mailto:support@quay.io', - 'irc://chat.freenode.net:6665/quay', - 'tel:+1-888-930-3475', - 'https://twitter.com/quayio', - ] - - # Mail config - MAIL_SERVER = '' - MAIL_USE_TLS = True - MAIL_PORT = 587 - MAIL_USERNAME = None - MAIL_PASSWORD = None - MAIL_DEFAULT_SENDER = 'support@quay.io' - MAIL_FAIL_SILENTLY = False - TESTING = True - - # DB config - DB_URI = 'sqlite:///test/data/test.db' - DB_CONNECTION_ARGS = { - 'threadlocals': True, - 'autorollback': True, - } - - @staticmethod - def create_transaction(db): - return db.transaction() - - DB_TRANSACTION_FACTORY = create_transaction - - # If set to true, TLS is used, but is terminated by an external service (such as a load balancer). - # Note that PREFERRED_URL_SCHEME must be `https` when this flag is set or it can lead to undefined - # behavior. - EXTERNAL_TLS_TERMINATION = False - - # If true, CDN URLs will be used for our external dependencies, rather than the local - # copies. - USE_CDN = False - - # Authentication - AUTHENTICATION_TYPE = 'Database' - - # Build logs - BUILDLOGS_REDIS = {'host': 'localhost'} - BUILDLOGS_OPTIONS = [] - - # Real-time user events - USER_EVENTS_REDIS = {'host': 'localhost'} - - # Stripe config - BILLING_TYPE = 'FakeStripe' - - # Analytics - ANALYTICS_TYPE = 'FakeAnalytics' - - # Build Queue Metrics - QUEUE_METRICS_TYPE = 'Null' - QUEUE_WORKER_METRICS_REFRESH_SECONDS = 300 - - # Exception logging - EXCEPTION_LOG_TYPE = 'FakeSentry' - SENTRY_DSN = None - SENTRY_PUBLIC_DSN = None - - # Github Config - GITHUB_LOGIN_CONFIG = None - GITHUB_TRIGGER_CONFIG = None - - # Google Config. - GOOGLE_LOGIN_CONFIG = None - - # Bitbucket Config. - BITBUCKET_TRIGGER_CONFIG = None - - # Gitlab Config. - GITLAB_TRIGGER_CONFIG = None - - NOTIFICATION_QUEUE_NAME = 'notification' - DOCKERFILE_BUILD_QUEUE_NAME = 'dockerfilebuild' - REPLICATION_QUEUE_NAME = 'imagestoragereplication' - SECSCAN_NOTIFICATION_QUEUE_NAME = 'security_notification' - CHUNK_CLEANUP_QUEUE_NAME = 'chunk_cleanup' - NAMESPACE_GC_QUEUE_NAME = 'namespacegc' - - # Super user config. Note: This MUST BE an empty list for the default config. - SUPER_USERS = [] - - # Feature Flag: Whether sessions are permanent. - FEATURE_PERMANENT_SESSIONS = True - - # Feature Flag: Whether super users are supported. - FEATURE_SUPER_USERS = True - - # Feature Flag: Whether to allow anonymous users to browse and pull public repositories. - FEATURE_ANONYMOUS_ACCESS = True - - # Feature Flag: Whether billing is required. - FEATURE_BILLING = False - - # Feature Flag: Whether user accounts automatically have usage log access. - FEATURE_USER_LOG_ACCESS = False - - # Feature Flag: Whether GitHub login is supported. - FEATURE_GITHUB_LOGIN = False - - # Feature Flag: Whether Google login is supported. - FEATURE_GOOGLE_LOGIN = False - - # Feature Flag: whether to enable support chat - FEATURE_SUPPORT_CHAT = False - - # Feature Flag: Whether to support GitHub build triggers. - FEATURE_GITHUB_BUILD = False - - # Feature Flag: Whether to support Bitbucket build triggers. - FEATURE_BITBUCKET_BUILD = False - - # Feature Flag: Whether to support GitLab build triggers. - FEATURE_GITLAB_BUILD = False - - # Feature Flag: Dockerfile build support. - FEATURE_BUILD_SUPPORT = True - - # Feature Flag: Whether emails are enabled. - FEATURE_MAILING = True - - # Feature Flag: Whether users can be created (by non-super users). - FEATURE_USER_CREATION = True - - # Feature Flag: Whether users being created must be invited by another user. If FEATURE_USER_CREATION is off, - # this flag has no effect. - FEATURE_INVITE_ONLY_USER_CREATION = False - - # Feature Flag: Whether users can be renamed - FEATURE_USER_RENAME = False - - # Feature Flag: Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for - # basic auth. - FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH = False - - # Feature Flag: Whether to automatically replicate between storage engines. - FEATURE_STORAGE_REPLICATION = False - - # Feature Flag: Whether users can directly login to the UI. - FEATURE_DIRECT_LOGIN = True - - # Feature Flag: Whether the v2/ endpoint is visible - FEATURE_ADVERTISE_V2 = True - - # Semver spec for which Docker versions we will blacklist - # Documentation: http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec - BLACKLIST_V2_SPEC = '<1.6.0' - - # Feature Flag: Whether or not to rotate old action logs to storage. - FEATURE_ACTION_LOG_ROTATION = False - - # Feature Flag: Whether to enable conversion to ACIs. - FEATURE_ACI_CONVERSION = False - - # Feature Flag: Whether to allow for "namespace-less" repositories when pulling and pushing from - # Docker. - FEATURE_LIBRARY_SUPPORT = True - - # Feature Flag: Whether to require invitations when adding a user to a team. - FEATURE_REQUIRE_TEAM_INVITE = True - - # Feature Flag: Whether to proxy all direct download URLs in storage via the registry's nginx. - FEATURE_PROXY_STORAGE = False - - # Feature Flag: Whether to collect and support user metadata. - FEATURE_USER_METADATA = False - - # Feature Flag: Whether to support signing - FEATURE_SIGNING = False - - # Feature Flag: Whether to enable support for App repositories. - FEATURE_APP_REGISTRY = False - - # Feature Flag: If set to true, the _catalog endpoint returns public repositories. Otherwise, - # only private repositories can be returned. - FEATURE_PUBLIC_CATALOG = False - - # Feature Flag: If set to true, build logs may be read by those with read access to the repo, - # rather than only write access or admin access. - FEATURE_READER_BUILD_LOGS = False - - # Feature Flag: If set to true, autocompletion will apply to partial usernames. - FEATURE_PARTIAL_USER_AUTOCOMPLETE = True - - # If a namespace is defined in the public namespace list, then it will appear on *all* - # user's repository list pages, regardless of whether that user is a member of the namespace. - # Typically, this is used by an enterprise customer in configuring a set of "well-known" - # namespaces. - PUBLIC_NAMESPACES = [] - - # The namespace to use for library repositories. - # Note: This must remain 'library' until Docker removes their hard-coded namespace for libraries. - # See: https://github.com/docker/docker/blob/master/registry/session.go#L320 - LIBRARY_NAMESPACE = 'library' - - BUILD_MANAGER = ('enterprise', {}) - - DISTRIBUTED_STORAGE_CONFIG = { - 'local_eu': ['LocalStorage', {'storage_path': 'test/data/registry/eu'}], - 'local_us': ['LocalStorage', {'storage_path': 'test/data/registry/us'}], - } - - DISTRIBUTED_STORAGE_PREFERENCE = ['local_us'] - DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS = ['local_us'] - - # Health checker. - HEALTH_CHECKER = ('LocalHealthCheck', {}) - - # Userfiles - USERFILES_LOCATION = 'local_us' - USERFILES_PATH = 'userfiles/' - - # Build logs archive - LOG_ARCHIVE_LOCATION = 'local_us' - LOG_ARCHIVE_PATH = 'logarchive/' - - # Action logs archive - ACTION_LOG_ARCHIVE_LOCATION = 'local_us' - ACTION_LOG_ARCHIVE_PATH = 'actionlogarchive/' - - # System logs. - SYSTEM_LOGS_PATH = "/var/log/" - SYSTEM_LOGS_FILE = "/var/log/syslog" - SYSTEM_SERVICES_PATH = os.path.join(CONF_DIR, "init/service/") - - # Allow registry pulls when unable to write to the audit log - ALLOW_PULLS_WITHOUT_STRICT_LOGGING = False - - # Services that should not be shown in the logs view. - SYSTEM_SERVICE_BLACKLIST = [] - - # Temporary tag expiration in seconds, this may actually be longer based on GC policy - PUSH_TEMP_TAG_EXPIRATION_SEC = 60 * 60 # One hour per layer - - # Signed registry grant token expiration in seconds - SIGNED_GRANT_EXPIRATION_SEC = 60 * 60 * 24 # One day to complete a push/pull - - # Registry v2 JWT Auth config - REGISTRY_JWT_AUTH_MAX_FRESH_S = 60 * 60 + 60 # At most signed one hour, accounting for clock skew - - # The URL endpoint to which we redirect OAuth when generating a token locally. - LOCAL_OAUTH_HANDLER = '/oauth/localapp' - - # The various avatar background colors. - AVATAR_KIND = 'local' - - # The location of the Quay documentation. - DOCUMENTATION_LOCATION = 'http://docs.quay.io' - DOCUMENTATION_METADATA = 'https://coreos.github.io/quay-docs/search.json' - - # How often the Garbage Collection worker runs. - GARBAGE_COLLECTION_FREQUENCY = 30 # seconds - - # How long notifications will try to send before timing out. - NOTIFICATION_SEND_TIMEOUT = 10 - - # Security scanner - FEATURE_SECURITY_SCANNER = False - FEATURE_SECURITY_NOTIFICATIONS = False - - # The endpoint for the security scanner. - SECURITY_SCANNER_ENDPOINT = 'http://192.168.99.101:6060' - - # The number of seconds between indexing intervals in the security scanner - SECURITY_SCANNER_INDEXING_INTERVAL = 30 - - # If specified, the security scanner will only index images newer than the provided ID. - SECURITY_SCANNER_INDEXING_MIN_ID = None - - # If specified, the endpoint to be used for all POST calls to the security scanner. - SECURITY_SCANNER_ENDPOINT_BATCH = None - - # If specified, GET requests that return non-200 will be retried at the following instances. - SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS = [] - - # The indexing engine version running inside the security scanner. - SECURITY_SCANNER_ENGINE_VERSION_TARGET = 3 - - # The version of the API to use for the security scanner. - SECURITY_SCANNER_API_VERSION = 'v1' - - # API call timeout for the security scanner. - SECURITY_SCANNER_API_TIMEOUT_SECONDS = 10 - - # POST call timeout for the security scanner. - SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS = 480 - - # The issuer name for the security scanner. - SECURITY_SCANNER_ISSUER_NAME = 'security_scanner' - - # JWTProxy Settings - # The address (sans schema) to proxy outgoing requests through the jwtproxy - # to be signed - JWTPROXY_SIGNER = 'localhost:8080' - - # The audience that jwtproxy should verify on incoming requests - # If None, will be calculated off of the SERVER_HOSTNAME (default) - JWTPROXY_AUDIENCE = None - - # Torrent management flags - FEATURE_BITTORRENT = False - BITTORRENT_PIECE_SIZE = 512 * 1024 - BITTORRENT_ANNOUNCE_URL = 'https://localhost:6881/announce' - BITTORRENT_FILENAME_PEPPER = str(uuid4()) - BITTORRENT_WEBSEED_LIFETIME = 3600 - - # "Secret" key for generating encrypted paging tokens. Only needed to be secret to - # hide the ID range for production (in which this value is overridden). Should *not* - # be relied upon for secure encryption otherwise. - # This value is a Fernet key and should be 32bytes URL-safe base64 encoded. - PAGE_TOKEN_KEY = '0OYrc16oBuksR8T3JGB-xxYSlZ2-7I_zzqrLzggBJ58=' - - # The timeout for service key approval. - UNAPPROVED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 # One day - - # How long to wait before GCing an expired service key. - EXPIRED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 * 7 # One week - - # The ID of the user account in the database to be used for service audit logs. If none, the - # lowest user in the database will be used. - SERVICE_LOG_ACCOUNT_ID = None - - # The service key ID for the instance service. - # NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated. - INSTANCE_SERVICE_KEY_SERVICE = 'quay' - - # The location of the key ID file generated for this instance. - INSTANCE_SERVICE_KEY_KID_LOCATION = os.path.join(CONF_DIR, 'quay.kid') - - # The location of the private key generated for this instance. - # NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated. - INSTANCE_SERVICE_KEY_LOCATION = os.path.join(CONF_DIR, 'quay.pem') - - # This instance's service key expiration in minutes. - INSTANCE_SERVICE_KEY_EXPIRATION = 120 - - # Number of minutes between expiration refresh in minutes. Should be the expiration / 2 minus - # some additional window time. - INSTANCE_SERVICE_KEY_REFRESH = 55 - - # The whitelist of client IDs for OAuth applications that allow for direct login. - DIRECT_OAUTH_CLIENTID_WHITELIST = [] - - # URL that specifies the location of the prometheus stats aggregator. - PROMETHEUS_AGGREGATOR_URL = 'http://localhost:9092' - - # Namespace prefix for all prometheus metrics. - PROMETHEUS_NAMESPACE = 'quay' - - # Overridable list of reverse DNS prefixes that are reserved for internal use on labels. - LABEL_KEY_RESERVED_PREFIXES = [] - - # Delays workers from starting until a random point in time between 0 and their regular interval. - STAGGER_WORKERS = True - - # Location of the static marketing site. - STATIC_SITE_BUCKET = None - - # Site key and secret key for using recaptcha. - FEATURE_RECAPTCHA = False - RECAPTCHA_SITE_KEY = None - RECAPTCHA_SECRET_KEY = None - - # Server where TUF metadata can be found - TUF_SERVER = None - - # Prefix to add to metadata e.g. // - TUF_GUN_PREFIX = None - - # Maximum size allowed for layers in the registry. - MAXIMUM_LAYER_SIZE = '20G' - - # Feature Flag: Whether team syncing from the backing auth is enabled. - FEATURE_TEAM_SYNCING = False - TEAM_RESYNC_STALE_TIME = '30m' - TEAM_SYNC_WORKER_FREQUENCY = 60 # seconds - - # Feature Flag: If enabled, non-superusers can setup team syncing. - FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP = False - - # The default configurable tag expiration time for time machine. - DEFAULT_TAG_EXPIRATION = '2w' - - # The options to present in namespace settings for the tag expiration. If empty, no option - # will be given and the default will be displayed read-only. - TAG_EXPIRATION_OPTIONS = ['0s', '1d', '1w', '2w', '4w'] - - # Feature Flag: Whether users can view and change their tag expiration. - FEATURE_CHANGE_TAG_EXPIRATION = True - - # Defines a secret for enabling the health-check endpoint's debug information. - ENABLE_HEALTH_DEBUG_SECRET = None - - # The lifetime for a user recovery token before it becomes invalid. - USER_RECOVERY_TOKEN_LIFETIME = '30m' - - # If specified, when app specific passwords expire by default. - APP_SPECIFIC_TOKEN_EXPIRATION = None - - # Feature Flag: If enabled, users can create and use app specific tokens to login via the CLI. - FEATURE_APP_SPECIFIC_TOKENS = True - - # How long expired app specific tokens should remain visible to users before being automatically - # deleted. Set to None to turn off garbage collection. - EXPIRED_APP_SPECIFIC_TOKEN_GC = '1d' - - # The size of pages returned by the Docker V2 API. - V2_PAGINATION_SIZE = 50 - - # If enabled, ensures that API calls are made with the X-Requested-With header - # when called from a browser. - BROWSER_API_CALLS_XHR_ONLY = True - - # If set to a non-None integer value, the default number of maximum builds for a namespace. - DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT = None - - # If set to a non-None integer value, the default number of maximum builds for a namespace whose - # creator IP is deemed a threat. - THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT = None - - # For Billing Support Only: The number of allowed builds on a namespace that has been billed - # successfully. - BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT = None - - # Configuration for the data model cache. - DATA_MODEL_CACHE_CONFIG = { - 'engine': 'memcached', - 'endpoint': ('127.0.0.1', 18080), - } - - # Defines the number of successive failures of a build trigger's build before the trigger is - # automatically disabled. - SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD = 100 - - # Defines the number of successive internal errors of a build trigger's build before the - # trigger is automatically disabled. - SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD = 5 - - # Defines the delay required (in seconds) before the last_accessed field of a user/robot or access - # token will be updated after the previous update. - LAST_ACCESSED_UPDATE_THRESHOLD_S = 60 - - # Defines the number of results per page used to show search results - SEARCH_RESULTS_PER_PAGE = 10 - - # Defines the maximum number of pages the user can paginate before they are limited - SEARCH_MAX_RESULT_PAGE_COUNT = 10 diff --git a/config_app/config_endpoints/api/__init__.py b/config_app/config_endpoints/api/__init__.py index fab34ffdd..d539639eb 100644 --- a/config_app/config_endpoints/api/__init__.py +++ b/config_app/config_endpoints/api/__init__.py @@ -22,7 +22,6 @@ class ApiExceptionHandlingApi(Api): @crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS) def handle_error(self, error): - print('HANDLING ERROR IN API') return super(ApiExceptionHandlingApi, self).handle_error(error) @@ -38,30 +37,12 @@ def format_date(date): return formatdate(timegm(date.utctimetuple())) -def verify_not_prod(func): - @add_method_metadata('enterprise_only', True) - @wraps(func) - def wrapped(*args, **kwargs): - # Verify that we are not running on a production (i.e. hosted) stack. If so, we fail. - # This should never happen (because of the feature-flag on SUPER_USERS), but we want to be - # absolutely sure. - # if app.config['SERVER_HOSTNAME'].find('quay.io') >= 0: - # TODO(config_port) fixme - if False: - logger.error('!!! Super user method called IN PRODUCTION !!!') - raise StandardError() - - return func(*args, **kwargs) - - return wrapped - def resource(*urls, **kwargs): def wrapper(api_resource): if not api_resource: return None - print('registering resource: ', urls) api_resource.registered = True api.add_resource(api_resource, *urls, **kwargs) return api_resource diff --git a/config_app/config_endpoints/api/discovery.py b/config_app/config_endpoints/api/discovery.py index 70246847c..2b0f0ceb3 100644 --- a/config_app/config_endpoints/api/discovery.py +++ b/config_app/config_endpoints/api/discovery.py @@ -1,3 +1,4 @@ +# TODO to extract the discovery stuff into a util at the top level and then use it both here and old discovery.py import logging import sys from collections import OrderedDict diff --git a/config_app/config_endpoints/api/suconfig.py b/config_app/config_endpoints/api/suconfig.py index 03cb895e4..29d617cfa 100644 --- a/config_app/config_endpoints/api/suconfig.py +++ b/config_app/config_endpoints/api/suconfig.py @@ -6,7 +6,7 @@ import signal from flask import abort, request from config_app.config_endpoints.api.suconfig_models_pre_oci import pre_oci_model as model -from config_app.config_endpoints.api import resource, ApiResource, verify_not_prod, nickname, validate_json_request +from config_app.config_endpoints.api import resource, ApiResource, nickname, validate_json_request from config_app.c_app import app, config_provider, superusers, OVERRIDE_CONFIG_DIRECTORY, ip_resolver from auth.auth_context import get_authenticated_user @@ -58,7 +58,6 @@ class SuperUserConfig(ApiResource): }, } - @verify_not_prod @nickname('scGetConfig') def get(self): """ Returns the currently defined configuration, if any. """ @@ -68,13 +67,11 @@ class SuperUserConfig(ApiResource): } @nickname('scUpdateConfig') - @verify_not_prod @validate_json_request('UpdateConfig') def put(self): """ Updates the config override file. """ # Note: This method is called to set the database configuration before super users exists, # so we also allow it to be called if there is no valid registry configuration setup. - # if not config_provider.config_exists() or SuperUserPermission().can(): if not config_provider.config_exists(): config_object = request.get_json()['config'] hostname = request.get_json()['hostname'] @@ -124,7 +121,6 @@ class SuperUserRegistryStatus(ApiResource): if a database is configured, and if it has any defined users. """ @nickname('scRegistryStatus') - @verify_not_prod def get(self): """ Returns the status of the registry. """ @@ -174,7 +170,6 @@ class _AlembicLogHandler(logging.Handler): @resource('/v1/superuser/setupdb') class SuperUserSetupDatabase(ApiResource): """ Resource for invoking alembic to setup the database. """ - @verify_not_prod @nickname('scSetupDatabase') def get(self): """ Invokes the alembic upgrade process. """ @@ -224,14 +219,12 @@ def get_process_id(name): class SuperUserShutdown(ApiResource): """ Resource for sending a shutdown signal to the container. """ - @verify_not_prod @nickname('scShutdownContainer') def post(self): """ Sends a signal to the phusion init system to shut down the container. """ # Note: This method is called to set the database configuration before super users exists, # so we also allow it to be called if there is no valid registry configuration setup. - # if app.config['TESTING'] or not database_has_users() or SuperUserPermission().can(): if app.config['TESTING'] or not database_has_users(): # Note: We skip if debugging locally. if app.config.get('DEBUGGING') == True: @@ -303,7 +296,7 @@ class SuperUserCreateInitialSuperUser(ApiResource): superusers.register_superuser(username) # Conduct login with that user. - # TODO(config): assuming we don't need to login the user + # TODO(config): figure out if we need validation for checking logged in user stuff # common_login(superuser_uuid) return { @@ -336,14 +329,12 @@ class SuperUserConfigValidate(ApiResource): } @nickname('scValidateConfig') - @verify_not_prod @validate_json_request('ValidateConfig') def post(self, service): """ Validates the given config for the given service. """ # Note: This method is called to validate the database configuration before super users exists, # so we also allow it to be called if there is no valid registry configuration setup. Note that # this is also safe since this method does not access any information not given in the request. - # if not config_provider.config_exists() or SuperUserPermission().can(): if not config_provider.config_exists(): config = request.get_json()['config'] validator_context = ValidatorContext.from_app(app, config, request.get_json().get('password', ''), diff --git a/config_app/config_endpoints/api/superuser.py b/config_app/config_endpoints/api/superuser.py index 5cb26fc9d..c061adacf 100644 --- a/config_app/config_endpoints/api/superuser.py +++ b/config_app/config_endpoints/api/superuser.py @@ -4,7 +4,7 @@ import pathvalidate from flask import request, jsonify from config_app.config_endpoints.exception import InvalidRequest -from config_app.config_endpoints.api import resource, ApiResource, verify_not_prod, nickname +from config_app.config_endpoints.api import resource, ApiResource, nickname from config_app.config_util.ssl import load_certificate, CertInvalidException from config_app.c_app import app, config_provider @@ -19,7 +19,6 @@ class SuperUserCustomCertificate(ApiResource): """ Resource for managing a custom certificate. """ @nickname('uploadCustomCertificate') - @verify_not_prod def post(self, certpath): uploaded_file = request.files['file'] if not uploaded_file: @@ -58,7 +57,6 @@ class SuperUserCustomCertificate(ApiResource): return '', 204 @nickname('deleteCustomCertificate') - @verify_not_prod def delete(self, certpath): cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) config_provider.remove_volume_file(cert_full_path) @@ -70,7 +68,6 @@ class SuperUserCustomCertificates(ApiResource): """ Resource for managing custom certificates. """ @nickname('getCustomCertificates') - @verify_not_prod def get(self): has_extra_certs_path = config_provider.volume_file_exists(EXTRA_CA_DIRECTORY) extra_certs_found = config_provider.list_volume_directory(EXTRA_CA_DIRECTORY) @@ -107,7 +104,6 @@ class SuperUserCustomCertificates(ApiResource): } -# TODO(config) port this endpoint when (https://github.com/quay/quay/pull/3055) merged to ensure no conflicts @resource('/v1/superuser/keys') class SuperUserServiceKeyManagement(ApiResource): """ Resource for managing service keys.""" @@ -142,7 +138,6 @@ class SuperUserServiceKeyManagement(ApiResource): }, } - @verify_not_prod @nickname('listServiceKeys') def get(self): keys = pre_oci_model.list_all_service_keys() diff --git a/config_app/config_endpoints/api/superuser_models_interface.py b/config_app/config_endpoints/api/superuser_models_interface.py index 23d672f2b..f9971fdd1 100644 --- a/config_app/config_endpoints/api/superuser_models_interface.py +++ b/config_app/config_endpoints/api/superuser_models_interface.py @@ -1,17 +1,8 @@ -import json from abc import ABCMeta, abstractmethod from collections import namedtuple -from datetime import datetime - -from dateutil.relativedelta import relativedelta from six import add_metaclass -from tzlocal import get_localzone -# from app import avatar, superusers -# from buildtrigger.basehandler import BuildTriggerHandler -from data import model from config_app.config_endpoints.api import format_date -from util.morecollections import AttrDict def user_view(user): @@ -22,46 +13,6 @@ def user_view(user): } -# class BuildTrigger( -# namedtuple('BuildTrigger', ['uuid', 'service_name', 'pull_robot', 'can_read', 'can_admin', 'for_build'])): -# """ -# BuildTrigger represent a trigger that is associated with a build -# :type uuid: string -# :type service_name: string -# :type pull_robot: User -# :type can_read: boolean -# :type can_admin: boolean -# :type for_build: boolean -# """ -# -# def to_dict(self): -# if not self.uuid: -# return None -# -# build_trigger = BuildTriggerHandler.get_handler(self) -# build_source = build_trigger.config.get('build_source') -# -# repo_url = build_trigger.get_repository_url() if build_source else None -# can_read = self.can_read or self.can_admin -# -# trigger_data = { -# 'id': self.uuid, -# 'service': self.service_name, -# 'is_active': build_trigger.is_active(), -# -# 'build_source': build_source if can_read else None, -# 'repository_url': repo_url if can_read else None, -# -# 'config': build_trigger.config if self.can_admin else {}, -# 'can_invoke': self.can_admin, -# } -# -# if not self.for_build and self.can_admin and self.pull_robot: -# trigger_data['pull_robot'] = user_view(self.pull_robot) -# -# return trigger_data - - class RepositoryBuild(namedtuple('RepositoryBuild', ['uuid', 'logs_archived', 'repository_namespace_user_username', 'repository_name', 'can_write', 'can_read', 'pull_robot', 'resource_key', 'trigger', 'display_name', @@ -185,9 +136,6 @@ class User(namedtuple('User', ['username', 'email', 'verified', 'enabled', 'robo 'username': self.username, 'email': self.email, 'verified': self.verified, - # todo(config) remove or add these lines from app - # 'avatar': avatar.get_data_for_user(self), - # 'super_user': superusers.is_superuser(self.username), 'enabled': self.enabled, } @@ -205,108 +153,9 @@ class Organization(namedtuple('Organization', ['username', 'email'])): return { 'name': self.username, 'email': self.email, - # todo(config) remove or add these lines from app - # 'avatar': avatar.get_data_for_org(self), } -class LogEntry( - namedtuple('LogEntry', [ - 'metadata_json', 'ip', 'datetime', 'performer_email', 'performer_username', 'performer_robot', - 'account_organization', 'account_username', 'account_email', 'account_robot', 'kind', - ])): - """ - LogEntry a single log entry. - :type metadata_json: string - :type ip: string - :type datetime: string - :type performer_email: int - :type performer_username: string - :type performer_robot: boolean - :type account_organization: boolean - :type account_username: string - :type account_email: string - :type account_robot: boolean - :type kind_id: int - """ - - def to_dict(self): - view = { - 'kind': self.kind, - 'metadata': json.loads(self.metadata_json), - 'ip': self.ip, - 'datetime': format_date(self.datetime), - } - - if self.performer_username: - performer = AttrDict({'username': self.performer_username, 'email': self.performer_email}) - performer.robot = None - if self.performer_robot: - performer.robot = self.performer_robot - - view['performer'] = { - 'kind': 'user', - 'name': self.performer_username, - 'is_robot': self.performer_robot, - # todo(config) remove or add these lines from app - # 'avatar': avatar.get_data_for_user(performer), - } - - if self.account_username: - account = AttrDict({'username': self.account_username, 'email': self.account_email}) - if self.account_organization: - - view['namespace'] = { - 'kind': 'org', - 'name': self.account_username, - # todo(config) remove or add these lines from app - # 'avatar': avatar.get_data_for_org(account), - } - else: - account.robot = None - if self.account_robot: - account.robot = self.account_robot - view['namespace'] = { - 'kind': 'user', - 'name': self.account_username, - # todo(config) remove or add these lines from app - # 'avatar': avatar.get_data_for_user(account), - } - - return view - - -class LogEntryPage( - namedtuple('LogEntryPage', ['logs', 'next_page_token'])): - """ - LogEntryPage represents a single page of logs. - :type logs: [LogEntry] - :type next_page_token: {any -> any} - """ - - -class AggregatedLogEntry( - namedtuple('AggregatedLogEntry', ['count', 'kind_id', 'day', 'start_time'])): - """ - AggregatedLogEntry represents an aggregated view of logs. - :type count: int - :type kind_id: int - :type day: string - :type start_time: Date - """ - - def to_dict(self): - synthetic_date = datetime(self.start_time.year, self.start_time.month, int(self.day), tzinfo=get_localzone()) - if synthetic_date.day < self.start_time.day: - synthetic_date = synthetic_date + relativedelta(months=1) - kinds = model.log.get_log_entry_kinds() - view = { - 'kind': kinds[self.kind_id], - 'count': self.count, - 'datetime': format_date(synthetic_date), - } - - return view @add_metaclass(ABCMeta) @@ -314,135 +163,8 @@ class SuperuserDataInterface(object): """ Interface that represents all data store interactions required by a superuser api. """ - - @abstractmethod - def get_logs_query(self, start_time, end_time, page_token=None): - """ - Returns a LogEntryPage. - """ - - @abstractmethod - def get_aggregated_logs(self, start_time, end_time): - """ - Returns a list of AggregatedLogEntry - """ - - @abstractmethod - def get_organizations(self): - """ - Returns a list of Organization - """ - - @abstractmethod - def get_active_users(self): - """ - Returns a list of User - """ - - @abstractmethod - def create_install_user(self, username, password, email): - """ - Returns the created user and confirmation code for email confirmation - """ - - @abstractmethod - def get_nonrobot_user(self, username): - """ - Returns a User - """ - - @abstractmethod - def create_reset_password_email_code(self, email): - """ - Returns a recover password code - """ - - @abstractmethod - def mark_user_for_deletion(self, username): - """ - Returns None - """ - - @abstractmethod - def change_password(self, username, password): - """ - Returns None - """ - - @abstractmethod - def update_email(self, username, email, auto_verify): - """ - Returns None - """ - - @abstractmethod - def update_enabled(self, username, enabled): - """ - Returns None - """ - - @abstractmethod - def take_ownership(self, namespace, authed_user): - """ - Returns id of entity and whether the entity was a user - """ - - @abstractmethod - def mark_organization_for_deletion(self, name): - """ - Returns None - """ - - @abstractmethod - def change_organization_name(self, old_org_name, new_org_name): - """ - Returns updated Organization - """ - @abstractmethod def list_all_service_keys(self): """ Returns a list of service keys """ - - @abstractmethod - def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None): - """ - Returns a tuple of private key and public key id - """ - - @abstractmethod - def approve_service_key(self, kid, approver, approval_type, notes=''): - """ - Returns the approved Key - """ - - @abstractmethod - def get_service_key(self, kid, service=None, alive_only=True, approved_only=True): - """ - Returns ServiceKey - """ - - @abstractmethod - def set_key_expiration(self, kid, expiration_date): - """ - Returns None - """ - - @abstractmethod - def update_service_key(self, kid, name=None, metadata=None): - """ - Returns None - """ - - @abstractmethod - def delete_service_key(self, kid): - """ - Returns deleted ServiceKey - """ - - @abstractmethod - def get_repository_build(self, uuid): - """ - Returns RepositoryBuild - """ diff --git a/config_app/config_endpoints/api/superuser_models_pre_oci.py b/config_app/config_endpoints/api/superuser_models_pre_oci.py index 352c8f38d..3002d5686 100644 --- a/config_app/config_endpoints/api/superuser_models_pre_oci.py +++ b/config_app/config_endpoints/api/superuser_models_pre_oci.py @@ -2,36 +2,6 @@ from data import model from config_app.config_endpoints.api.superuser_models_interface import SuperuserDataInterface, User, ServiceKey, Approval -# -# def _create_log(log, log_kind): -# account_organization = None -# account_username = None -# account_email = None -# account_robot = None -# try: -# account_organization = log.account.organization -# account_username = log.account.username -# account_email = log.account.email -# account_robot = log.account.robot -# except AttributeError: -# pass -# -# performer_robot = None -# performer_username = None -# performer_email = None -# -# try: -# performer_robot = log.performer.robot -# performer_username = log.performer.username -# performer_email = log.performer.email -# except AttributeError: -# pass -# -# return LogEntry(log.metadata_json, log.ip, log.datetime, performer_email, performer_username, -# performer_robot, account_organization, account_username, -# account_email, account_robot, log_kind[log.kind_id]) - - def _create_user(user): if user is None: return None @@ -46,18 +16,6 @@ def _create_key(key): return ServiceKey(key.name, key.kid, key.service, key.jwk, key.metadata, key.created_date, key.expiration_date, key.rotation_duration, approval) -# -# -# class ServiceKeyDoesNotExist(Exception): -# pass -# -# -# class ServiceKeyAlreadyApproved(Exception): -# pass -# -# -# class InvalidRepositoryBuildException(Exception): -# pass class PreOCIModel(SuperuserDataInterface): @@ -65,210 +23,9 @@ class PreOCIModel(SuperuserDataInterface): PreOCIModel implements the data model for the SuperUser using a database schema before it was changed to support the OCI specification. """ - def get_logs_query(self, start_time, end_time, page_token=None): - pass - - def get_aggregated_logs(self, start_time, end_time): - pass - - def get_organizations(self): - pass - - def get_active_users(self): - pass - - def create_install_user(self, username, password, email): - pass - - def get_nonrobot_user(self, username): - pass - - def create_reset_password_email_code(self, email): - pass - - def mark_user_for_deletion(self, username): - pass - - def change_password(self, username, password): - pass - - def update_email(self, username, email, auto_verify): - pass - - def update_enabled(self, username, enabled): - pass - - def take_ownership(self, namespace, authed_user): - pass - - def mark_organization_for_deletion(self, name): - pass - - def change_organization_name(self, old_org_name, new_org_name): - pass - - def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None): - pass - - def approve_service_key(self, kid, approver, approval_type, notes=''): - pass - - def get_service_key(self, kid, service=None, alive_only=True, approved_only=True): - pass - - def set_key_expiration(self, kid, expiration_date): - pass - - def update_service_key(self, kid, name=None, metadata=None): - pass - - def delete_service_key(self, kid): - pass - - def get_repository_build(self, uuid): - pass - - # def get_repository_build(self, uuid): - # try: - # build = model.build.get_repository_build(uuid) - # except model.InvalidRepositoryBuildException as e: - # raise InvalidRepositoryBuildException(e.message) - # - # repo_namespace = build.repository_namespace_user_username - # repo_name = build.repository_name - # - # can_read = ReadRepositoryPermission(repo_namespace, repo_name).can() - # can_write = ModifyRepositoryPermission(repo_namespace, repo_name).can() - # can_admin = AdministerRepositoryPermission(repo_namespace, repo_name).can() - # job_config = get_job_config(build.job_config) - # phase, status, error = _get_build_status(build) - # url = userfiles.get_file_url(self.resource_key, request.remote_addr, requires_cors=True) - # - # return RepositoryBuild(build.uuid, build.logs_archived, repo_namespace, repo_name, can_write, can_read, - # _create_user(build.pull_robot), build.resource_key, - # BuildTrigger(build.trigger.uuid, build.trigger.service.name, - # _create_user(build.trigger.pull_robot), can_read, can_admin, True), - # build.display_name, build.display_name, build.started, job_config, phase, status, error, url) - # - # def delete_service_key(self, kid): - # try: - # key = model.service_keys.delete_service_key(kid) - # except model.ServiceKeyDoesNotExist: - # raise ServiceKeyDoesNotExist - # return _create_key(key) - # - # def update_service_key(self, kid, name=None, metadata=None): - # model.service_keys.update_service_key(kid, name, metadata) - # - # def set_key_expiration(self, kid, expiration_date): - # model.service_keys.set_key_expiration(kid, expiration_date) - # - # def get_service_key(self, kid, service=None, alive_only=True, approved_only=True): - # try: - # key = model.service_keys.get_service_key(kid, approved_only=approved_only, alive_only=alive_only) - # return _create_key(key) - # except model.ServiceKeyDoesNotExist: - # raise ServiceKeyDoesNotExist - # - # def approve_service_key(self, kid, approver, approval_type, notes=''): - # try: - # key = model.service_keys.approve_service_key(kid, approver, approval_type, notes=notes) - # return _create_key(key) - # except model.ServiceKeyDoesNotExist: - # raise ServiceKeyDoesNotExist - # except model.ServiceKeyAlreadyApproved: - # raise ServiceKeyAlreadyApproved - # - # def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None): - # (private_key, key) = model.service_keys.generate_service_key(service, expiration_date, metadata=metadata, name=name) - # - # return private_key, key.kid - def list_all_service_keys(self): keys = model.service_keys.list_all_keys() return [_create_key(key) for key in keys] - # def change_organization_name(self, old_org_name, new_org_name): - # org = model.organization.get_organization(old_org_name) - # if new_org_name is not None: - # org = model.user.change_username(org.id, new_org_name) - # - # return Organization(org.username, org.email) - # - # def mark_organization_for_deletion(self, name): - # org = model.organization.get_organization(name) - # model.user.mark_namespace_for_deletion(org, all_queues, namespace_gc_queue, force=True) - # - # def take_ownership(self, namespace, authed_user): - # entity = model.user.get_user_or_org(namespace) - # if entity is None: - # return None, False - # - # was_user = not entity.organization - # if entity.organization: - # # Add the superuser as an admin to the owners team of the org. - # model.organization.add_user_as_admin(authed_user, entity) - # else: - # # If the entity is a user, convert it to an organization and add the current superuser - # # as the admin. - # model.organization.convert_user_to_organization(entity, authed_user) - # return entity.id, was_user - # - # def update_enabled(self, username, enabled): - # user = model.user.get_nonrobot_user(username) - # model.user.update_enabled(user, bool(enabled)) - # - # def update_email(self, username, email, auto_verify): - # user = model.user.get_nonrobot_user(username) - # model.user.update_email(user, email, auto_verify) - # - # def change_password(self, username, password): - # user = model.user.get_nonrobot_user(username) - # model.user.change_password(user, password) - # - # def mark_user_for_deletion(self, username): - # user = model.user.get_nonrobot_user(username) - # model.user.mark_namespace_for_deletion(user, all_queues, namespace_gc_queue, force=True) - # - # def create_reset_password_email_code(self, email): - # code = model.user.create_reset_password_email_code(email) - # return code.code - # - # def get_nonrobot_user(self, username): - # user = model.user.get_nonrobot_user(username) - # if user is None: - # return None - # return _create_user(user) - # - # def create_install_user(self, username, password, email): - # prompts = model.user.get_default_user_prompts(features) - # user = model.user.create_user(username, password, email, auto_verify=not features.MAILING, - # email_required=features.MAILING, prompts=prompts) - # - # return_user = _create_user(user) - # # If mailing is turned on, send the user a verification email. - # if features.MAILING: - # confirmation = model.user.create_confirm_email_code(user) - # return return_user, confirmation.code - # return return_user, '' - # - # def get_active_users(self, disabled=True): - # users = model.user.get_active_users(disabled=disabled) - # return [_create_user(user) for user in users] - # - # def get_organizations(self): - # return [Organization(org.username, org.email) for org in model.organization.get_organizations()] - # - # def get_aggregated_logs(self, start_time, end_time): - # aggregated_logs = model.log.get_aggregated_logs(start_time, end_time) - # return [AggregatedLogEntry(log.count, log.kind_id, log.day, start_time) for log in aggregated_logs] - # - # def get_logs_query(self, start_time, end_time, page_token=None): - # logs_query = model.log.get_logs_query(start_time, end_time) - # logs, next_page_token = model.modelutil.paginate(logs_query, database.LogEntry, descending=True, - # page_token=page_token, limit=20) - # kinds = model.log.get_log_entry_kinds() - # return LogEntryPage([_create_log(log, kinds) for log in logs], next_page_token) - pre_oci_model = PreOCIModel() diff --git a/config_app/config_endpoints/api/user.py b/config_app/config_endpoints/api/user.py index d8a6449c3..68d573873 100644 --- a/config_app/config_endpoints/api/user.py +++ b/config_app/config_endpoints/api/user.py @@ -1,4 +1,6 @@ +from auth.auth_context import get_authenticated_user from config_app.config_endpoints.api import resource, ApiResource, nickname +from config_app.config_endpoints.api.superuser_models_interface import user_view @resource('/v1/user/') @@ -8,11 +10,10 @@ class User(ApiResource): @nickname('getLoggedInUser') def get(self): """ Get user information for the authenticated user. """ - # user = get_authenticated_user() + user = get_authenticated_user() + # TODO(config): figure out if we need user validation + # if user is None or user.organization or not UserReadPermission(user.username).can(): + # raise InvalidToken("Requires authentication", payload={'session_required': False}) - # return user_view(user) - return { - 'anonymous': False, - # 'username': user.username, - } + return user_view(user) diff --git a/config_app/config_endpoints/common.py b/config_app/config_endpoints/common.py index 160cf7068..2cb1c2dc8 100644 --- a/config_app/config_endpoints/common.py +++ b/config_app/config_endpoints/common.py @@ -5,6 +5,8 @@ import re from flask import make_response, render_template from flask_restful import reqparse +from config_app._init_config import ROOT_DIR + def truthy_bool(param): return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'} @@ -30,9 +32,9 @@ def _list_files(path, extension, contains=""): def join_path(dp, f): # Remove the static/ prefix. It is added in the template. - return os.path.join(dp, f)[len('static/'):] + return os.path.join(dp, f)[len(ROOT_DIR) + 1 + len('config_app/static/'):] - filepath = os.path.join('static/', path) + filepath = os.path.join(os.path.join(ROOT_DIR, 'config_app/static/'), path) return [join_path(dp, f) for dp, _, files in os.walk(filepath) for f in files if matches(f)] diff --git a/config_app/config_endpoints/exception.py b/config_app/config_endpoints/exception.py index 20b0976b3..33cb161d2 100644 --- a/config_app/config_endpoints/exception.py +++ b/config_app/config_endpoints/exception.py @@ -5,30 +5,7 @@ from werkzeug.exceptions import HTTPException class ApiErrorType(Enum): - external_service_timeout = 'external_service_timeout' invalid_request = 'invalid_request' - invalid_response = 'invalid_response' - invalid_token = 'invalid_token' - expired_token = 'expired_token' - insufficient_scope = 'insufficient_scope' - fresh_login_required = 'fresh_login_required' - exceeds_license = 'exceeds_license' - not_found = 'not_found' - downstream_issue = 'downstream_issue' - - -ERROR_DESCRIPTION = { - ApiErrorType.external_service_timeout.value: "An external service timed out. Retrying the request may resolve the issue.", - ApiErrorType.invalid_request.value: "The request was invalid. It may have contained invalid values or was improperly formatted.", - ApiErrorType.invalid_response.value: "The response was invalid.", - ApiErrorType.invalid_token.value: "The access token provided was invalid.", - ApiErrorType.expired_token.value: "The access token provided has expired.", - ApiErrorType.insufficient_scope.value: "The access token did not have sufficient scope to access the requested resource.", - ApiErrorType.fresh_login_required.value: "The action requires a fresh login to succeed.", - ApiErrorType.exceeds_license.value: "The action was refused because the current license does not allow it.", - ApiErrorType.not_found.value: "The resource was not found.", - ApiErrorType.downstream_issue.value: "An error occurred in a downstream service.", -} class ApiException(HTTPException): @@ -79,10 +56,6 @@ class ApiException(HTTPException): return rv -class ExternalServiceError(ApiException): - def __init__(self, error_description, payload=None): - ApiException.__init__(self, ApiErrorType.external_service_timeout, 520, error_description, payload) - class InvalidRequest(ApiException): def __init__(self, error_description, payload=None): @@ -92,32 +65,3 @@ class InvalidRequest(ApiException): class InvalidResponse(ApiException): def __init__(self, error_description, payload=None): ApiException.__init__(self, ApiErrorType.invalid_response, 400, error_description, payload) - - -class InvalidToken(ApiException): - def __init__(self, error_description, payload=None): - ApiException.__init__(self, ApiErrorType.invalid_token, 401, error_description, payload) - -class ExpiredToken(ApiException): - def __init__(self, error_description, payload=None): - ApiException.__init__(self, ApiErrorType.expired_token, 401, error_description, payload) - - -class FreshLoginRequired(ApiException): - def __init__(self, payload=None): - ApiException.__init__(self, ApiErrorType.fresh_login_required, 401, "Requires fresh login", payload) - - -class ExceedsLicenseException(ApiException): - def __init__(self, payload=None): - ApiException.__init__(self, ApiErrorType.exceeds_license, 402, 'Payment Required', payload) - - -class NotFound(ApiException): - def __init__(self, payload=None): - ApiException.__init__(self, ApiErrorType.not_found, 404, 'Not Found', payload) - - -class DownstreamIssue(ApiException): - def __init__(self, error_description, payload=None): - ApiException.__init__(self, ApiErrorType.downstream_issue, 520, error_description, payload) diff --git a/config_app/config_endpoints/setup_web.py b/config_app/config_endpoints/setup_web.py index 541aa3df3..b9aba06c4 100644 --- a/config_app/config_endpoints/setup_web.py +++ b/config_app/config_endpoints/setup_web.py @@ -1,13 +1,15 @@ from flask import Blueprint +from cachetools import lru_cache + from config_app.config_endpoints.common import render_page_template from config_app.config_endpoints.api.discovery import generate_route_data -# from config_util.cache import no_cache +from config_app.config_endpoints.api import no_cache setup_web = Blueprint('setup_web', __name__, template_folder='templates') -# @lru_cache(maxsize=1) +@lru_cache(maxsize=1) def _get_route_data(): return generate_route_data() @@ -16,7 +18,7 @@ def render_page_template_with_routedata(name, *args, **kwargs): return render_page_template(name, _get_route_data(), *args, **kwargs) -# @no_cache +@no_cache @setup_web.route('/', methods=['GET'], defaults={'path': ''}) def index(path, **kwargs): return render_page_template_with_routedata('index.html', js_bundle_name='configapp', **kwargs) diff --git a/config_app/config_test/testconfig.py b/config_app/config_test/testconfig.py deleted file mode 100644 index 2ba731589..000000000 --- a/config_app/config_test/testconfig.py +++ /dev/null @@ -1,108 +0,0 @@ -import os - -from datetime import datetime, timedelta -from tempfile import NamedTemporaryFile - -from config import DefaultConfig - - -class FakeTransaction(object): - def __enter__(self): - return self - - def __exit__(self, exc_type, value, traceback): - pass - - -TEST_DB_FILE = NamedTemporaryFile(delete=True) - - -class TestConfig(DefaultConfig): - TESTING = True - SECRET_KEY = 'a36c9d7d-25a9-4d3f-a586-3d2f8dc40a83' - BILLING_TYPE = 'FakeStripe' - - TEST_DB_FILE = TEST_DB_FILE - DB_URI = os.environ.get('TEST_DATABASE_URI', 'sqlite:///{0}'.format(TEST_DB_FILE.name)) - DB_CONNECTION_ARGS = { - 'threadlocals': True, - 'autorollback': True, - } - - @staticmethod - def create_transaction(db): - return FakeTransaction() - - DB_TRANSACTION_FACTORY = create_transaction - - DISTRIBUTED_STORAGE_CONFIG = {'local_us': ['FakeStorage', {}], 'local_eu': ['FakeStorage', {}]} - DISTRIBUTED_STORAGE_PREFERENCE = ['local_us'] - - BUILDLOGS_MODULE_AND_CLASS = ('test.testlogs', 'testlogs.TestBuildLogs') - BUILDLOGS_OPTIONS = ['devtable', 'building', 'deadbeef-dead-beef-dead-beefdeadbeef', False] - - USERFILES_LOCATION = 'local_us' - - FEATURE_SUPER_USERS = True - FEATURE_BILLING = True - FEATURE_MAILING = True - SUPER_USERS = ['devtable'] - - LICENSE_USER_LIMIT = 500 - LICENSE_EXPIRATION = datetime.now() + timedelta(weeks=520) - LICENSE_EXPIRATION_WARNING = datetime.now() + timedelta(weeks=520) - - FEATURE_GITHUB_BUILD = True - FEATURE_BITTORRENT = True - FEATURE_ACI_CONVERSION = True - - CLOUDWATCH_NAMESPACE = None - - FEATURE_SECURITY_SCANNER = True - FEATURE_SECURITY_NOTIFICATIONS = True - SECURITY_SCANNER_ENDPOINT = 'http://fakesecurityscanner/' - SECURITY_SCANNER_API_VERSION = 'v1' - SECURITY_SCANNER_ENGINE_VERSION_TARGET = 1 - SECURITY_SCANNER_API_TIMEOUT_SECONDS = 1 - - FEATURE_SIGNING = True - - SIGNING_ENGINE = 'gpg2' - - GPG2_PRIVATE_KEY_NAME = 'EEB32221' - GPG2_PRIVATE_KEY_FILENAME = 'test/data/signing-private.gpg' - GPG2_PUBLIC_KEY_FILENAME = 'test/data/signing-public.gpg' - - INSTANCE_SERVICE_KEY_KID_LOCATION = 'test/data/test.kid' - INSTANCE_SERVICE_KEY_LOCATION = 'test/data/test.pem' - - PROMETHEUS_AGGREGATOR_URL = None - - GITHUB_LOGIN_CONFIG = {} - GOOGLE_LOGIN_CONFIG = {} - - FEATURE_GITHUB_LOGIN = True - FEATURE_GOOGLE_LOGIN = True - - TESTOIDC_LOGIN_CONFIG = { - 'CLIENT_ID': 'foo', - 'CLIENT_SECRET': 'bar', - 'OIDC_SERVER': 'http://fakeoidc', - 'DEBUGGING': True, - 'LOGIN_BINDING_FIELD': 'sub', - } - - RECAPTCHA_SITE_KEY = 'somekey' - RECAPTCHA_SECRET_KEY = 'somesecretkey' - - FEATURE_APP_REGISTRY = True - FEATURE_TEAM_SYNCING = True - FEATURE_CHANGE_TAG_EXPIRATION = True - - TAG_EXPIRATION_OPTIONS = ['0s', '1s', '1d', '1w', '2w', '4w'] - - DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT = None - - DATA_MODEL_CACHE_CONFIG = { - 'engine': 'inmemory', - } diff --git a/config_app/config_util/config/basefileprovider.py b/config_app/config_util/config/basefileprovider.py index 0ed1e9d35..8929845c8 100644 --- a/config_app/config_util/config/basefileprovider.py +++ b/config_app/config_util/config/basefileprovider.py @@ -2,7 +2,7 @@ import os import logging from config_app.config_util.config.baseprovider import (BaseProvider, import_yaml, export_yaml, - CannotWriteConfigException) + CannotWriteConfigException) logger = logging.getLogger(__name__) diff --git a/config_app/config_util/config/baseprovider.py b/config_app/config_util/config/baseprovider.py index 6fef3b870..5a616895f 100644 --- a/config_app/config_util/config/baseprovider.py +++ b/config_app/config_util/config/baseprovider.py @@ -6,7 +6,7 @@ from six import add_metaclass from jsonschema import validate, ValidationError -from config_app.config_util.config.schema import CONFIG_SCHEMA +from util.config.schema import CONFIG_SCHEMA logger = logging.getLogger(__name__) diff --git a/config_app/config_util/config/schema.py b/config_app/config_util/config/schema.py deleted file mode 100644 index db38b8872..000000000 --- a/config_app/config_util/config/schema.py +++ /dev/null @@ -1,914 +0,0 @@ -# INTERNAL_ONLY_PROPERTIES defines the properties in the config that, while settable, should -# not be documented for external users. These will generally be used for internal test or only -# given to customers when they have been briefed on the side effects of using them. -INTERNAL_ONLY_PROPERTIES = { - '__module__', - '__doc__', - 'create_transaction', - - 'TESTING', - 'SEND_FILE_MAX_AGE_DEFAULT', - - 'REPLICATION_QUEUE_NAME', - 'DOCKERFILE_BUILD_QUEUE_NAME', - 'CHUNK_CLEANUP_QUEUE_NAME', - 'SECSCAN_NOTIFICATION_QUEUE_NAME', - 'SECURITY_SCANNER_ISSUER_NAME', - 'NOTIFICATION_QUEUE_NAME', - 'NAMESPACE_GC_QUEUE_NAME', - - 'FEATURE_BILLING', - 'FEATURE_SUPPORT_CHAT', - 'BILLING_TYPE', - - 'INSTANCE_SERVICE_KEY_LOCATION', - 'INSTANCE_SERVICE_KEY_REFRESH', - 'INSTANCE_SERVICE_KEY_SERVICE', - 'INSTANCE_SERVICE_KEY_KID_LOCATION', - 'INSTANCE_SERVICE_KEY_EXPIRATION', - 'UNAPPROVED_SERVICE_KEY_TTL_SEC', - 'EXPIRED_SERVICE_KEY_TTL_SEC', - 'REGISTRY_JWT_AUTH_MAX_FRESH_S', - - 'BITTORRENT_FILENAME_PEPPER', - 'BITTORRENT_WEBSEED_LIFETIME', - - 'SERVICE_LOG_ACCOUNT_ID', - 'BUILDLOGS_OPTIONS', - 'LIBRARY_NAMESPACE', - 'STAGGER_WORKERS', - 'QUEUE_WORKER_METRICS_REFRESH_SECONDS', - 'PUSH_TEMP_TAG_EXPIRATION_SEC', - 'GARBAGE_COLLECTION_FREQUENCY', - 'PAGE_TOKEN_KEY', - 'BUILD_MANAGER', - 'JWTPROXY_AUDIENCE', - 'SYSTEM_SERVICE_BLACKLIST', - 'JWTPROXY_SIGNER', - 'SECURITY_SCANNER_INDEXING_MIN_ID', - 'STATIC_SITE_BUCKET', - 'LABEL_KEY_RESERVED_PREFIXES', - 'TEAM_SYNC_WORKER_FREQUENCY', - 'DOCUMENTATION_METADATA', - 'DOCUMENTATION_LOCATION', - 'JSONIFY_PRETTYPRINT_REGULAR', - 'SYSTEM_LOGS_FILE', - 'SYSTEM_LOGS_PATH', - 'SYSTEM_SERVICES_PATH', - 'TUF_GUN_PREFIX', - 'LOGGING_LEVEL', - 'SIGNED_GRANT_EXPIRATION_SEC', - 'PROMETHEUS_AGGREGATOR_URL', - 'DB_TRANSACTION_FACTORY', - 'NOTIFICATION_SEND_TIMEOUT', - 'QUEUE_METRICS_TYPE', - 'MAIL_FAIL_SILENTLY', - 'LOCAL_OAUTH_HANDLER', - 'USE_CDN', - 'ANALYTICS_TYPE', - 'LAST_ACCESSED_UPDATE_THRESHOLD_S', - - 'EXCEPTION_LOG_TYPE', - 'SENTRY_DSN', - 'SENTRY_PUBLIC_DSN', - - 'BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT', - 'THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT', - - 'SECURITY_SCANNER_ENDPOINT_BATCH', - 'SECURITY_SCANNER_API_TIMEOUT_SECONDS', - 'SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS', - 'SECURITY_SCANNER_ENGINE_VERSION_TARGET', - 'SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS', - 'SECURITY_SCANNER_API_VERSION', - - 'DATA_MODEL_CACHE_CONFIG', - - # TODO: move this into the schema once we support signing in QE. - 'FEATURE_SIGNING', - 'TUF_SERVER', -} - -CONFIG_SCHEMA = { - 'type': 'object', - 'description': 'Schema for Quay configuration', - 'required': [ - 'PREFERRED_URL_SCHEME', - 'SERVER_HOSTNAME', - 'DB_URI', - 'AUTHENTICATION_TYPE', - 'DISTRIBUTED_STORAGE_CONFIG', - 'BUILDLOGS_REDIS', - 'USER_EVENTS_REDIS', - 'DISTRIBUTED_STORAGE_PREFERENCE', - 'DEFAULT_TAG_EXPIRATION', - 'TAG_EXPIRATION_OPTIONS', - ], - 'properties': { - # Hosting. - 'PREFERRED_URL_SCHEME': { - 'type': 'string', - 'description': 'The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`', - 'enum': ['http', 'https'], - 'x-example': 'https', - }, - 'SERVER_HOSTNAME': { - 'type': 'string', - 'description': 'The URL at which Quay is accessible, without the scheme.', - 'x-example': 'quay.io', - }, - 'EXTERNAL_TLS_TERMINATION': { - 'type': 'boolean', - 'description': 'If TLS is supported, but terminated at a layer before Quay, must be true.', - 'x-example': True, - }, - - # User-visible configuration. - 'REGISTRY_TITLE': { - 'type': 'string', - 'description': 'If specified, the long-form title for the registry. Defaults to `Quay Enterprise`.', - 'x-example': 'Corp Container Service', - }, - 'REGISTRY_TITLE_SHORT': { - 'type': 'string', - 'description': 'If specified, the short-form title for the registry. Defaults to `Quay Enterprise`.', - 'x-example': 'CCS', - }, - 'CONTACT_INFO': { - 'type': 'array', - 'minItems': 1, - 'uniqueItems': True, - 'description': 'If specified, contact information to display on the contact page. ' + - 'If only a single piece of contact information is specified, the contact footer will link directly.', - 'items': [ - { - 'type': 'string', - 'pattern': '^mailto:(.)+$', - 'x-example': 'mailto:support@quay.io', - 'description': 'Adds a link to send an e-mail', - }, - { - 'type': 'string', - 'pattern': '^irc://(.)+$', - 'x-example': 'irc://chat.freenode.net:6665/quay', - 'description': 'Adds a link to visit an IRC chat room', - }, - { - 'type': 'string', - 'pattern': '^tel:(.)+$', - 'x-example': 'tel:+1-888-930-3475', - 'description': 'Adds a link to call a phone number', - }, - { - 'type': 'string', - 'pattern': '^http(s)?://(.)+$', - 'x-example': 'https://twitter.com/quayio', - 'description': 'Adds a link to a defined URL', - }, - ], - }, - 'SEARCH_RESULTS_PER_PAGE' : { - 'type': 'number', - 'description': 'Number of results returned per page by search page. Defaults to 10', - 'x-example': 10, - }, - 'SEARCH_MAX_RESULT_PAGE_COUNT' : { - 'type': 'number', - 'description': 'Maximum number of pages the user can paginate in search before they are limited. Defaults to 10', - 'x-example': 10, - }, - - # E-mail. - 'FEATURE_MAILING': { - 'type': 'boolean', - 'description': 'Whether emails are enabled. Defaults to True', - 'x-example': True, - }, - 'MAIL_SERVER': { - 'type': 'string', - 'description': 'The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.', - 'x-example': 'smtp.somedomain.com', - }, - 'MAIL_USE_TLS': { - 'type': 'boolean', - 'description': 'If specified, whether to use TLS for sending e-mails.', - 'x-example': True, - }, - 'MAIL_PORT': { - 'type': 'number', - 'description': 'The SMTP port to use. If not specified, defaults to 587.', - 'x-example': 588, - }, - 'MAIL_USERNAME': { - 'type': ['string', 'null'], - 'description': 'The SMTP username to use when sending e-mails.', - 'x-example': 'myuser', - }, - 'MAIL_PASSWORD': { - 'type': ['string', 'null'], - 'description': 'The SMTP password to use when sending e-mails.', - 'x-example': 'mypassword', - }, - 'MAIL_DEFAULT_SENDER': { - 'type': ['string', 'null'], - 'description': 'If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `support@quay.io`.', - 'x-example': 'support@myco.com', - }, - - # Database. - 'DB_URI': { - 'type': 'string', - 'description': 'The URI at which to access the database, including any credentials.', - 'x-example': 'mysql+pymysql://username:password@dns.of.database/quay', - 'x-reference': 'https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495', - }, - 'DB_CONNECTION_ARGS': { - 'type': 'object', - 'description': 'If specified, connection arguments for the database such as timeouts and SSL.', - 'properties': { - 'threadlocals': { - 'type': 'boolean', - 'description': 'Whether to use thread-local connections. Should *ALWAYS* be `true`' - }, - 'autorollback': { - 'type': 'boolean', - 'description': 'Whether to use auto-rollback connections. Should *ALWAYS* be `true`' - }, - 'ssl': { - 'type': 'object', - 'description': 'SSL connection configuration', - 'properties': { - 'ca': { - 'type': 'string', - 'description': '*Absolute container path* to the CA certificate to use for SSL connections', - 'x-example': 'conf/stack/ssl-ca-cert.pem', - }, - }, - 'required': ['ca'], - }, - }, - 'required': ['threadlocals', 'autorollback'], - }, - 'ALLOW_PULLS_WITHOUT_STRICT_LOGGING': { - 'type': 'boolean', - 'description': 'If true, pulls in which the pull audit log entry cannot be written will ' + - 'still succeed. Useful if the database can fallback into a read-only state ' + - 'and it is desired for pulls to continue during that time. Defaults to False.', - 'x-example': True, - }, - - # Storage. - 'FEATURE_STORAGE_REPLICATION': { - 'type': 'boolean', - 'description': 'Whether to automatically replicate between storage engines. Defaults to False', - 'x-example': False, - }, - 'FEATURE_PROXY_STORAGE': { - 'type': 'boolean', - 'description': 'Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False', - 'x-example': False, - }, - 'MAXIMUM_LAYER_SIZE': { - 'type': 'string', - 'description': 'Maximum allowed size of an image layer. Defaults to 20G', - 'x-example': '100G', - 'pattern': '^[0-9]+(G|M)$', - }, - 'DISTRIBUTED_STORAGE_CONFIG': { - 'type': 'object', - 'description': 'Configuration for storage engine(s) to use in Quay. Each key is a unique ID' + - ' for a storage engine, with the value being a tuple of the type and ' + - ' configuration for that engine.', - 'x-example': { - 'local_storage': ['LocalStorage', {'storage_path': 'some/path/'}], - }, - 'items': { - 'type': 'array', - }, - }, - 'DISTRIBUTED_STORAGE_PREFERENCE': { - 'type': 'array', - 'description': 'The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to ' + - 'use. A preferred engine means it is first checked for pullig and images are ' + - 'pushed to it.', - 'items': { - 'type': 'string', - 'uniqueItems': True, - }, - 'x-example': ['s3_us_east', 's3_us_west'], - }, - 'DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS': { - 'type': 'array', - 'description': 'The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose ' + - 'images should be fully replicated, by default, to all other storage engines.', - 'items': { - 'type': 'string', - 'uniqueItems': True, - }, - 'x-example': ['s3_us_east', 's3_us_west'], - }, - 'USERFILES_LOCATION': { - 'type': 'string', - 'description': 'ID of the storage engine in which to place user-uploaded files', - 'x-example': 's3_us_east', - }, - 'USERFILES_PATH': { - 'type': 'string', - 'description': 'Path under storage in which to place user-uploaded files', - 'x-example': 'userfiles', - }, - 'ACTION_LOG_ARCHIVE_LOCATION': { - 'type': 'string', - 'description': 'If action log archiving is enabled, the storage engine in which to place the ' + - 'archived data.', - 'x-example': 's3_us_east', - }, - 'ACTION_LOG_ARCHIVE_PATH': { - 'type': 'string', - 'description': 'If action log archiving is enabled, the path in storage in which to place the ' + - 'archived data.', - 'x-example': 'archives/actionlogs', - }, - 'LOG_ARCHIVE_LOCATION': { - 'type': 'string', - 'description': 'If builds are enabled, the storage engine in which to place the ' + - 'archived build logs.', - 'x-example': 's3_us_east', - }, - 'LOG_ARCHIVE_PATH': { - 'type': 'string', - 'description': 'If builds are enabled, the path in storage in which to place the ' + - 'archived build logs.', - 'x-example': 'archives/buildlogs', - }, - - # Authentication. - 'AUTHENTICATION_TYPE': { - 'type': 'string', - 'description': 'The authentication engine to use for credential authentication.', - 'x-example': 'Database', - 'enum': ['Database', 'LDAP', 'JWT', 'Keystone', 'OIDC'], - }, - 'SUPER_USERS': { - 'type': 'array', - 'description': 'Quay usernames of those users to be granted superuser privileges', - 'uniqueItems': True, - 'items': { - 'type': 'string', - }, - }, - 'DIRECT_OAUTH_CLIENTID_WHITELIST': { - 'type': 'array', - 'description': 'A list of client IDs of *Quay-managed* applications that are allowed ' + - 'to perform direct OAuth approval without user approval.', - 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html', - 'uniqueItems': True, - 'items': { - 'type': 'string', - }, - }, - - # Redis. - 'BUILDLOGS_REDIS': { - 'type': 'object', - 'description': 'Connection information for Redis for build logs caching', - 'required': ['host'], - 'properties': { - 'host': { - 'type': 'string', - 'description': 'The hostname at which Redis is accessible', - 'x-example': 'my.redis.cluster', - }, - 'port': { - 'type': 'number', - 'description': 'The port at which Redis is accessible', - 'x-example': 1234, - }, - 'password': { - 'type': 'string', - 'description': 'The password to connect to the Redis instance', - 'x-example': 'mypassword', - }, - }, - }, - 'USER_EVENTS_REDIS': { - 'type': 'object', - 'description': 'Connection information for Redis for user event handling', - 'required': ['host'], - 'properties': { - 'host': { - 'type': 'string', - 'description': 'The hostname at which Redis is accessible', - 'x-example': 'my.redis.cluster', - }, - 'port': { - 'type': 'number', - 'description': 'The port at which Redis is accessible', - 'x-example': 1234, - }, - 'password': { - 'type': 'string', - 'description': 'The password to connect to the Redis instance', - 'x-example': 'mypassword', - }, - }, - }, - - # OAuth configuration. - 'GITHUB_LOGIN_CONFIG': { - 'type': ['object', 'null'], - 'description': 'Configuration for using GitHub (Enterprise) as an external login provider', - 'required': ['CLIENT_ID', 'CLIENT_SECRET'], - 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-auth.html', - 'properties': { - 'GITHUB_ENDPOINT': { - 'type': 'string', - 'description': 'The endpoint of the GitHub (Enterprise) being hit', - 'x-example': 'https://github.com/', - }, - 'API_ENDPOINT': { - 'type': 'string', - 'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com', - 'x-example': 'https://api.github.com/', - }, - 'CLIENT_ID': { - 'type': 'string', - 'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_TRIGGER_CONFIG', - 'x-example': '0e8dbe15c4c7630b6780', - 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html', - }, - 'CLIENT_SECRET': { - 'type': 'string', - 'description': 'The registered client secret for this Quay instance', - 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', - 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html', - }, - 'ORG_RESTRICT': { - 'type': 'boolean', - 'description': 'If true, only users within the organization whitelist can login using this provider', - 'x-example': True, - }, - 'ALLOWED_ORGANIZATIONS': { - 'type': 'array', - 'description': 'The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option', - 'uniqueItems': True, - 'items': { - 'type': 'string', - }, - }, - }, - }, - 'BITBUCKET_TRIGGER_CONFIG': { - 'type': ['object', 'null'], - 'description': 'Configuration for using BitBucket for build triggers', - 'required': ['CONSUMER_KEY', 'CONSUMER_SECRET'], - 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bitbucket-build.html', - 'properties': { - 'CONSUMER_KEY': { - 'type': 'string', - 'description': 'The registered consumer key (client ID) for this Quay instance', - 'x-example': '0e8dbe15c4c7630b6780', - }, - 'CONSUMER_SECRET': { - 'type': 'string', - 'description': 'The registered consumer secret (client secret) for this Quay instance', - 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', - }, - }, - }, - 'GITHUB_TRIGGER_CONFIG': { - 'type': ['object', 'null'], - 'description': 'Configuration for using GitHub (Enterprise) for build triggers', - 'required': ['GITHUB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'], - 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-build.html', - 'properties': { - 'GITHUB_ENDPOINT': { - 'type': 'string', - 'description': 'The endpoint of the GitHub (Enterprise) being hit', - 'x-example': 'https://github.com/', - }, - 'API_ENDPOINT': { - 'type': 'string', - 'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com', - 'x-example': 'https://api.github.com/', - }, - 'CLIENT_ID': { - 'type': 'string', - 'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_LOGIN_CONFIG', - 'x-example': '0e8dbe15c4c7630b6780', - 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html', - }, - 'CLIENT_SECRET': { - 'type': 'string', - 'description': 'The registered client secret for this Quay instance', - 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', - 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html', - }, - }, - }, - 'GOOGLE_LOGIN_CONFIG': { - 'type': ['object', 'null'], - 'description': 'Configuration for using Google for external authentication', - 'required': ['CLIENT_ID', 'CLIENT_SECRET'], - 'properties': { - 'CLIENT_ID': { - 'type': 'string', - 'description': 'The registered client ID for this Quay instance', - 'x-example': '0e8dbe15c4c7630b6780', - }, - 'CLIENT_SECRET': { - 'type': 'string', - 'description': 'The registered client secret for this Quay instance', - 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', - }, - }, - }, - 'GITLAB_TRIGGER_CONFIG': { - 'type': ['object', 'null'], - 'description': 'Configuration for using Gitlab (Enterprise) for external authentication', - 'required': ['GITLAB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'], - 'properties': { - 'GITLAB_ENDPOINT': { - 'type': 'string', - 'description': 'The endpoint at which Gitlab(Enterprise) is running', - 'x-example': 'https://gitlab.com', - }, - 'CLIENT_ID': { - 'type': 'string', - 'description': 'The registered client ID for this Quay instance', - 'x-example': '0e8dbe15c4c7630b6780', - }, - 'CLIENT_SECRET': { - 'type': 'string', - 'description': 'The registered client secret for this Quay instance', - 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', - }, - }, - }, - - # Health. - 'HEALTH_CHECKER': { - 'description': 'The configured health check.', - 'x-example': ('RDSAwareHealthCheck', {'access_key': 'foo', 'secret_key': 'bar'}), - }, - - # Metrics. - 'PROMETHEUS_NAMESPACE': { - 'type': 'string', - 'description': 'The prefix applied to all exposed Prometheus metrics. Defaults to `quay`', - 'x-example': 'myregistry', - }, - - # Misc configuration. - 'BLACKLIST_V2_SPEC': { - 'type': 'string', - 'description': 'The Docker CLI versions to which Quay will respond that V2 is *unsupported*. Defaults to `<1.6.0`', - 'x-reference': 'http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec', - 'x-example': '<1.8.0', - }, - 'USER_RECOVERY_TOKEN_LIFETIME': { - 'type': 'string', - 'description': 'The length of time a token for recovering a user accounts is valid. Defaults to 30m.', - 'x-example': '10m', - 'pattern': '^[0-9]+(w|m|d|h|s)$', - }, - 'SESSION_COOKIE_SECURE': { - 'type': 'boolean', - 'description': 'Whether the `secure` property should be set on session cookies. ' + - 'Defaults to False. Recommended to be True for all installations using SSL.', - 'x-example': True, - 'x-reference': 'https://en.wikipedia.org/wiki/Secure_cookies', - }, - 'PUBLIC_NAMESPACES': { - 'type': 'array', - 'description': 'If a namespace is defined in the public namespace list, then it will appear on *all*' + - ' user\'s repository list pages, regardless of whether that user is a member of the namespace.' + - ' Typically, this is used by an enterprise customer in configuring a set of "well-known"' + - ' namespaces.', - 'uniqueItems': True, - 'items': { - 'type': 'string', - }, - }, - 'AVATAR_KIND': { - 'type': 'string', - 'description': 'The types of avatars to display, either generated inline (local) or Gravatar (gravatar)', - 'enum': ['local', 'gravatar'], - }, - 'V2_PAGINATION_SIZE': { - 'type': 'number', - 'description': 'The number of results returned per page in V2 registry APIs', - 'x-example': 100, - }, - 'ENABLE_HEALTH_DEBUG_SECRET': { - 'type': ['string', 'null'], - 'description': 'If specified, a secret that can be given to health endpoints to see full debug info when' + - 'not authenticated as a superuser', - 'x-example': 'somesecrethere', - }, - 'BROWSER_API_CALLS_XHR_ONLY': { - 'type': 'boolean', - 'description': 'If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True.', - 'x-example': False, - }, - - # Time machine and tag expiration settings. - 'FEATURE_CHANGE_TAG_EXPIRATION': { - 'type': 'boolean', - 'description': 'Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True.', - 'x-example': False, - }, - 'DEFAULT_TAG_EXPIRATION': { - 'type': 'string', - 'description': 'The default, configurable tag expiration time for time machine. Defaults to `2w`.', - 'pattern': '^[0-9]+(w|m|d|h|s)$', - }, - 'TAG_EXPIRATION_OPTIONS': { - 'type': 'array', - 'description': 'The options that users can select for expiration of tags in their namespace (if enabled)', - 'items': { - 'type': 'string', - 'pattern': '^[0-9]+(w|m|d|h|s)$', - }, - }, - - # Team syncing. - 'FEATURE_TEAM_SYNCING': { - 'type': 'boolean', - 'description': 'Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone)', - 'x-example': True, - }, - 'TEAM_RESYNC_STALE_TIME': { - 'type': 'string', - 'description': 'If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m)', - 'x-example': '2h', - 'pattern': '^[0-9]+(w|m|d|h|s)$', - }, - 'FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP': { - 'type': 'boolean', - 'description': 'If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False.', - 'x-example': True, - }, - - # Security scanning. - 'FEATURE_SECURITY_SCANNER': { - 'type': 'boolean', - 'description': 'Whether to turn of/off the security scanner. Defaults to False', - 'x-example': False, - 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/security-scanning.html', - }, - 'FEATURE_SECURITY_NOTIFICATIONS': { - 'type': 'boolean', - 'description': 'If the security scanner is enabled, whether to turn of/off security notificaitons. Defaults to False', - 'x-example': False, - }, - 'SECURITY_SCANNER_ENDPOINT' : { - 'type': 'string', - 'pattern': '^http(s)?://(.)+$', - 'description': 'The endpoint for the security scanner', - 'x-example': 'http://192.168.99.101:6060' , - }, - 'SECURITY_SCANNER_INDEXING_INTERVAL': { - 'type': 'number', - 'description': 'The number of seconds between indexing intervals in the security scanner. Defaults to 30.', - 'x-example': 30, - }, - - # Bittorrent support. - 'FEATURE_BITTORRENT': { - 'type': 'boolean', - 'description': 'Whether to allow using Bittorrent-based pulls. Defaults to False', - 'x-example': False, - 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bittorrent.html', - }, - 'BITTORRENT_PIECE_SIZE': { - 'type': 'number', - 'description': 'The bittorent piece size to use. If not specified, defaults to 512 * 1024.', - 'x-example': 512 * 1024, - }, - 'BITTORRENT_ANNOUNCE_URL': { - 'type': 'string', - 'pattern': '^http(s)?://(.)+$', - 'description': 'The URL of the announce endpoint on the bittorrent tracker', - 'x-example': 'https://localhost:6881/announce', - }, - - # Build - 'FEATURE_GITHUB_BUILD': { - 'type': 'boolean', - 'description': 'Whether to support GitHub build triggers. Defaults to False', - 'x-example': False, - }, - 'FEATURE_BITBUCKET_BUILD': { - 'type': 'boolean', - 'description': 'Whether to support Bitbucket build triggers. Defaults to False', - 'x-example': False, - }, - 'FEATURE_GITLAB_BUILD': { - 'type': 'boolean', - 'description': 'Whether to support GitLab build triggers. Defaults to False', - 'x-example': False, - }, - 'FEATURE_BUILD_SUPPORT': { - 'type': 'boolean', - 'description': 'Whether to support Dockerfile build. Defaults to True', - 'x-example': True, - }, - 'DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT': { - 'type': ['number', 'null'], - 'description': 'If not None, the default maximum number of builds that can be queued in a namespace.', - 'x-example': 20, - }, - 'SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD': { - 'type': ['number', 'null'], - 'description': 'If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled. Defaults to 5.', - 'x-example': 10, - }, - 'SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD': { - 'type': ['number', 'null'], - 'description': 'If not None, the number of successive failures that can occur before a build trigger is automatically disabled. Defaults to 100.', - 'x-example': 50, - }, - - # Login - 'FEATURE_GITHUB_LOGIN': { - 'type': 'boolean', - 'description': 'Whether GitHub login is supported. Defaults to False', - 'x-example': False, - }, - 'FEATURE_GOOGLE_LOGIN': { - 'type': 'boolean', - 'description': 'Whether Google login is supported. Defaults to False', - 'x-example': False, - }, - - # Recaptcha - 'FEATURE_RECAPTCHA': { - 'type': 'boolean', - 'description': 'Whether Recaptcha is necessary for user login and recovery. Defaults to False', - 'x-example': False, - 'x-reference': 'https://www.google.com/recaptcha/intro/', - }, - 'RECAPTCHA_SITE_KEY': { - 'type': ['string', 'null'], - 'description': 'If recaptcha is enabled, the site key for the Recaptcha service', - }, - 'RECAPTCHA_SECRET_KEY': { - 'type': ['string', 'null'], - 'description': 'If recaptcha is enabled, the secret key for the Recaptcha service', - }, - - # External application tokens. - 'FEATURE_APP_SPECIFIC_TOKENS': { - 'type': 'boolean', - 'description': 'If enabled, users can create tokens for use by the Docker CLI. Defaults to True', - 'x-example': False, - }, - - 'APP_SPECIFIC_TOKEN_EXPIRATION': { - 'type': ['string', 'null'], - 'description': 'The expiration for external app tokens. Defaults to None.', - 'pattern': '^[0-9]+(w|m|d|h|s)$', - }, - - 'EXPIRED_APP_SPECIFIC_TOKEN_GC': { - 'type': ['string', 'null'], - 'description': 'Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d.', - 'pattern': '^[0-9]+(w|m|d|h|s)$', - }, - - # Feature Flag: Permanent Sessions. - 'FEATURE_PERMANENT_SESSIONS': { - 'type': 'boolean', - 'description': 'Whether sessions are permanent. Defaults to True', - 'x-example': True, - }, - - # Feature Flag: Super User Support. - 'FEATURE_SUPER_USERS': { - 'type': 'boolean', - 'description': 'Whether super users are supported. Defaults to True', - 'x-example': True, - }, - - # Feature Flag: Anonymous Users. - 'FEATURE_ANONYMOUS_ACCESS': { - 'type': 'boolean', - 'description': ' Whether to allow anonymous users to browse and pull public repositories. Defaults to True', - 'x-example': True, - }, - - # Feature Flag: User Creation. - 'FEATURE_USER_CREATION': { - 'type': 'boolean', - 'description': 'Whether users can be created (by non-super users). Defaults to True', - 'x-example': True, - }, - - # Feature Flag: Invite Only User Creation. - 'FEATURE_INVITE_ONLY_USER_CREATION': { - 'type': 'boolean', - 'description': 'Whether users being created must be invited by another user. Defaults to False', - 'x-example': False, - }, - - # Feature Flag: Encrypted Basic Auth. - 'FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH': { - 'type': 'boolean', - 'description': 'Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False', - 'x-example': False, - }, - - # Feature Flag: Direct Login. - 'FEATURE_DIRECT_LOGIN': { - 'type': 'boolean', - 'description': 'Whether users can directly login to the UI. Defaults to True', - 'x-example': True, - }, - - # Feature Flag: Advertising V2. - 'FEATURE_ADVERTISE_V2': { - 'type': 'boolean', - 'description': 'Whether the v2/ endpoint is visible. Defaults to True', - 'x-example': True, - }, - - # Feature Flag: Log Rotation. - 'FEATURE_ACTION_LOG_ROTATION': { - 'type': 'boolean', - 'description': 'Whether or not to rotate old action logs to storage. Defaults to False', - 'x-example': False, - }, - - # Feature Flag: ACI Conversion. - 'FEATURE_ACI_CONVERSION': { - 'type': 'boolean', - 'description': 'Whether to enable conversion to ACIs. Defaults to False', - 'x-example': False, - }, - - # Feature Flag: Library Support. - 'FEATURE_LIBRARY_SUPPORT': { - 'type': 'boolean', - 'description': 'Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True', - 'x-example': True, - }, - - # Feature Flag: Require Team Invite. - 'FEATURE_REQUIRE_TEAM_INVITE': { - 'type': 'boolean', - 'description': 'Whether to require invitations when adding a user to a team. Defaults to True', - 'x-example': True, - }, - - # Feature Flag: Collecting and Supporting Metadata. - 'FEATURE_USER_METADATA': { - 'type': 'boolean', - 'description': 'Whether to collect and support user metadata. Defaults to False', - 'x-example': False, - }, - - # Feature Flag: Support App Registry. - 'FEATURE_APP_REGISTRY': { - 'type': 'boolean', - 'description': 'Whether to enable support for App repositories. Defaults to False', - 'x-example': False, - }, - - # Feature Flag: Public Reposiotires in _catalog Endpoint. - 'FEATURE_PUBLIC_CATALOG': { - 'type': 'boolean', - 'description': 'If set to true, the _catalog endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False', - 'x-example': False, - }, - - # Feature Flag: Reader Build Logs. - 'FEATURE_READER_BUILD_LOGS': { - 'type': 'boolean', - 'description': 'If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False', - 'x-example': False, - }, - - # Feature Flag: Usernames Autocomplete. - 'FEATURE_PARTIAL_USER_AUTOCOMPLETE': { - 'type': 'boolean', - 'description': 'If set to true, autocompletion will apply to partial usernames. Defaults to True', - 'x-example': True, - }, - - # Feature Flag: User log access. - 'FEATURE_USER_LOG_ACCESS': { - 'type': 'boolean', - 'description': 'If set to true, users will have access to audit logs for their namespace. Defaults to False', - 'x-example': True, - }, - - # Feature Flag: User renaming. - 'FEATURE_USER_RENAME': { - 'type': 'boolean', - 'description': 'If set to true, users can rename their own namespace. Defaults to False', - 'x-example': True, - }, - }, -} - diff --git a/config_app/config_util/workers.py b/config_app/config_util/workers.py deleted file mode 100644 index f86f1d1bf..000000000 --- a/config_app/config_util/workers.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import psutil - - -def get_worker_count(worker_kind_name, multiplier, minimum=None, maximum=None): - """ Returns the number of gunicorn workers to run for the given worker kind, - based on a combination of environment variable, multiplier, minimum (if any), - and number of accessible CPU cores. - """ - minimum = minimum or multiplier - maximum = maximum or (multiplier * multiplier) - - # Check for an override via an environment variable. - override_value = os.environ.get('WORKER_COUNT_' + worker_kind_name.upper()) - if override_value is not None: - return max(override_value, minimum) - - override_value = os.environ.get('WORKER_COUNT') - if override_value is not None: - return max(override_value, minimum) - - # Load the number of CPU cores via affinity, and use that to calculate the - # number of workers to run. - p = psutil.Process(os.getpid()) - - try: - cpu_count = len(p.cpu_affinity()) - except AttributeError: - # cpu_affinity isn't supported on this platform. Assume 2. - cpu_count = 2 - - return min(max(cpu_count * multiplier, minimum), maximum) diff --git a/config_app/init/service/gunicorn_web/run b/config_app/init/service/gunicorn_web/run index e7564a2c9..1fdf1870e 100755 --- a/config_app/init/service/gunicorn_web/run +++ b/config_app/init/service/gunicorn_web/run @@ -6,6 +6,6 @@ QUAYPATH=${QUAYPATH:-"."} QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} cd ${QUAYDIR:-"/"} -PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYDIR/config_app/conf/gunicorn_local.py config_application:application +PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYDIR/config_app/conf/gunicorn_web.py config_application:application echo 'Gunicorn exited' \ No newline at end of file diff --git a/config_app/js/services/api-service.js b/config_app/js/services/api-service.js index 09b88bcfa..5163f1cba 100644 --- a/config_app/js/services/api-service.js +++ b/config_app/js/services/api-service.js @@ -3,13 +3,12 @@ * callbacks. Any method defined on the server is exposed here as an equivalent method. Also * defines some helper functions for working with API responses. */ -// console.log(angular.module('quay-config').requires); angular.module('quay-config').factory('ApiService', ['Restangular', '$q', 'UtilService', function(Restangular, $q, UtilService) { var apiService = {}; - // if (!window.__endpoints) { - // return apiService; - // } + if (!window.__endpoints) { + return apiService; + } var getResource = function(getMethod, operation, opt_parameters, opt_background) { var resource = {}; diff --git a/endpoints/api/discovery.py b/endpoints/api/discovery.py index 001396888..66e7c74a3 100644 --- a/endpoints/api/discovery.py +++ b/endpoints/api/discovery.py @@ -1,3 +1,4 @@ +# TODO to extract the discovery stuff into a util at the top level and then use it both here and config_app discovery.py """ API discovery information. """ import re diff --git a/local-config-app.sh b/local-config-app.sh index e2d63562e..9c6192200 100755 --- a/local-config-app.sh +++ b/local-config-app.sh @@ -7,7 +7,7 @@ cat << "EOF" \ \ \ \ / / | |__| | | |__| | / ____ \ | | | |____ | |__| | | . ` | | __| _| |_ | |__| | \ \/ \ \/ / \_ ___/ \____/ /_/ \_\ |_| \_____| \____/ |_| \_| |_| |_____| \_____| \__/ \__/ \ \__ - \___\ by CoreOS + \___\ by Red Hat Build, Store, and Distribute your Containers diff --git a/package.json b/package.json index 1a084ab37..6462e2d76 100644 --- a/package.json +++ b/package.json @@ -15,7 +15,8 @@ "clean": "rm -f static/build/*", "clean-config-app": "rm -f config_app/static/build/*", - "watch-config-app": "npm run clean-config-app && cd config_app && webpack --watch" + "watch-config-app": "npm run clean-config-app && cd config_app && webpack --watch", + "build-config-app": "npm run clean-config-app && cd config_app && NODE_ENV=production webpack --progress" }, "repository": { "type": "git", From 79a05909d5c06bbe6f5b6c38f7fa7d89384d09a3 Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Fri, 8 Jun 2018 15:33:54 -0400 Subject: [PATCH 12/14] Remove kube and scshutdown endpoint --- config_app/c_app.py | 3 - config_app/config_endpoints/api/suconfig.py | 37 ---- config_app/config_util/config/__init__.py | 6 +- config_app/config_util/config/k8sprovider.py | 170 ------------------- config_app/js/services/container-service.js | 4 +- 5 files changed, 2 insertions(+), 218 deletions(-) delete mode 100644 config_app/config_util/config/k8sprovider.py diff --git a/config_app/c_app.py b/config_app/c_app.py index 57155a5be..049aef619 100644 --- a/config_app/c_app.py +++ b/config_app/c_app.py @@ -18,9 +18,6 @@ OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, 'config_app/conf/stack') is_testing = 'TEST' in os.environ -# TODO(config kubernetes): reinstate when enabling kubernetes in config app -# is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ - config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', testing=is_testing) diff --git a/config_app/config_endpoints/api/suconfig.py b/config_app/config_endpoints/api/suconfig.py index 29d617cfa..539a6599c 100644 --- a/config_app/config_endpoints/api/suconfig.py +++ b/config_app/config_endpoints/api/suconfig.py @@ -199,43 +199,6 @@ class SuperUserSetupDatabase(ApiResource): abort(403) -# From: https://stackoverflow.com/a/44712205 -def get_process_id(name): - """Return process ids found by (partial) name or regex. - - >>> get_process_id('kthreadd') - [2] - >>> get_process_id('watchdog') - [10, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56, 61] # ymmv - >>> get_process_id('non-existent process') - [] - """ - child = subprocess.Popen(['pgrep', name], stdout=subprocess.PIPE, shell=False) - response = child.communicate()[0] - return [int(pid) for pid in response.split()] - - -@resource('/v1/superuser/shutdown') -class SuperUserShutdown(ApiResource): - """ Resource for sending a shutdown signal to the container. """ - - @nickname('scShutdownContainer') - def post(self): - """ Sends a signal to the phusion init system to shut down the container. """ - # Note: This method is called to set the database configuration before super users exists, - # so we also allow it to be called if there is no valid registry configuration setup. - - if app.config['TESTING'] or not database_has_users(): - # Note: We skip if debugging locally. - if app.config.get('DEBUGGING') == True: - return {} - - os.kill(get_process_id('my_init')[0], signal.SIGINT) - return {} - - abort(403) - - @resource('/v1/superuser/config/createsuperuser') class SuperUserCreateInitialSuperUser(ApiResource): """ Resource for creating the initial super user. """ diff --git a/config_app/config_util/config/__init__.py b/config_app/config_util/config/__init__.py index 16b3c0ffe..d32c159d8 100644 --- a/config_app/config_util/config/__init__.py +++ b/config_app/config_util/config/__init__.py @@ -1,16 +1,12 @@ from config_app.config_util.config.fileprovider import FileConfigProvider from config_app.config_util.config.testprovider import TestConfigProvider -from config_app.config_util.config.k8sprovider import KubernetesConfigProvider -def get_config_provider(config_volume, yaml_filename, py_filename, testing=False, kubernetes=False): +def get_config_provider(config_volume, yaml_filename, py_filename, testing=False): """ Loads and returns the config provider for the current environment. """ if testing: return TestConfigProvider() - if kubernetes: - return KubernetesConfigProvider(config_volume, yaml_filename, py_filename) - return FileConfigProvider(config_volume, yaml_filename, py_filename) diff --git a/config_app/config_util/config/k8sprovider.py b/config_app/config_util/config/k8sprovider.py deleted file mode 100644 index 57d0a5f8c..000000000 --- a/config_app/config_util/config/k8sprovider.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -import logging -import json -import base64 -import time - -from requests import Request, Session - -from config_app.config_util.config.baseprovider import CannotWriteConfigException, get_yaml -from config_app.config_util.config.basefileprovider import BaseFileProvider - - -logger = logging.getLogger(__name__) - -KUBERNETES_API_HOST = os.environ.get('KUBERNETES_SERVICE_HOST', '') -port = os.environ.get('KUBERNETES_SERVICE_PORT') -if port: - KUBERNETES_API_HOST += ':' + port - -SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token' - -QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise') -QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret') - -class KubernetesConfigProvider(BaseFileProvider): - """ Implementation of the config provider that reads and writes configuration - data from a Kubernetes Secret. """ - def __init__(self, config_volume, yaml_filename, py_filename): - super(KubernetesConfigProvider, self).__init__(config_volume, yaml_filename, py_filename) - - # Load the service account token from the local store. - if not os.path.exists(SERVICE_ACCOUNT_TOKEN_PATH): - raise Exception('Cannot load Kubernetes service account token') - - with open(SERVICE_ACCOUNT_TOKEN_PATH, 'r') as f: - self._service_token = f.read() - - @property - def provider_id(self): - return 'k8s' - - def get_volume_path(self, directory, filename): - # NOTE: Overridden to ensure we don't have subdirectories, which aren't supported - # in Kubernetes secrets. - return "_".join([directory.rstrip('/'), filename]) - - def volume_file_exists(self, filename): - # NOTE: Overridden because we don't have subdirectories, which aren't supported - # in Kubernetes secrets. - secret = self._lookup_secret() - if not secret or not secret.get('data'): - return False - return filename in secret['data'] - - def list_volume_directory(self, path): - # NOTE: Overridden because we don't have subdirectories, which aren't supported - # in Kubernetes secrets. - secret = self._lookup_secret() - - if not secret: - return [] - - paths = [] - for filename in secret.get('data', {}): - if filename.startswith(path): - paths.append(filename[len(path) + 1:]) - return paths - - def save_config(self, config_obj): - self._update_secret_file(self.yaml_filename, get_yaml(config_obj)) - - def write_volume_file(self, filename, contents): - try: - self._update_secret_file(filename, contents) - except IOError as ioe: - raise CannotWriteConfigException(str(ioe)) - - def remove_volume_file(self, filename): - try: - self._update_secret_file(filename, None) - except IOError as ioe: - raise CannotWriteConfigException(str(ioe)) - - def save_volume_file(self, filename, flask_file): - filepath = super(KubernetesConfigProvider, self).save_volume_file(filename, flask_file) - with open(filepath, 'r') as f: - self.write_volume_file(filename, f.read()) - - def _assert_success(self, response): - if response.status_code != 200: - logger.error('Kubernetes API call failed with response: %s => %s', response.status_code, - response.text) - raise CannotWriteConfigException('Kubernetes API call failed: %s' % response.text) - - def _update_secret_file(self, filename, value=None): - # Check first that the namespace for Quay Enterprise exists. If it does not, report that - # as an error, as it seems to be a common issue. - namespace_url = 'namespaces/%s' % (QE_NAMESPACE) - response = self._execute_k8s_api('GET', namespace_url) - if response.status_code // 100 != 2: - msg = 'A Kubernetes namespace with name `%s` must be created to save config' % QE_NAMESPACE - raise CannotWriteConfigException(msg) - - # Check if the secret exists. If not, then we create an empty secret and then update the file - # inside. - secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET) - secret = self._lookup_secret() - if secret is None: - self._assert_success(self._execute_k8s_api('POST', secret_url, { - "kind": "Secret", - "apiVersion": "v1", - "metadata": { - "name": QE_CONFIG_SECRET - }, - "data": {} - })) - - # Update the secret to reflect the file change. - secret['data'] = secret.get('data', {}) - - if value is not None: - secret['data'][filename] = base64.b64encode(value) - else: - secret['data'].pop(filename) - - self._assert_success(self._execute_k8s_api('PUT', secret_url, secret)) - - # Wait until the local mounted copy of the secret has been updated, as - # this is an eventual consistency operation, but the caller expects immediate - # consistency. - while True: - matching_files = set() - for secret_filename, encoded_value in secret['data'].iteritems(): - expected_value = base64.b64decode(encoded_value) - try: - with self.get_volume_file(secret_filename) as f: - contents = f.read() - - if contents == expected_value: - matching_files.add(secret_filename) - except IOError: - continue - - if matching_files == set(secret['data'].keys()): - break - - # Sleep for a second and then try again. - time.sleep(1) - - def _lookup_secret(self): - secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET) - response = self._execute_k8s_api('GET', secret_url) - if response.status_code != 200: - return None - return json.loads(response.text) - - def _execute_k8s_api(self, method, relative_url, data=None): - headers = { - 'Authorization': 'Bearer ' + self._service_token - } - - if data: - headers['Content-Type'] = 'application/json' - - data = json.dumps(data) if data else None - session = Session() - url = 'https://%s/api/v1/%s' % (KUBERNETES_API_HOST, relative_url) - - request = Request(method, url, data=data, headers=headers) - return session.send(request.prepare(), verify=False, timeout=2) diff --git a/config_app/js/services/container-service.js b/config_app/js/services/container-service.js index c20cfc162..0ce3d106b 100644 --- a/config_app/js/services/container-service.js +++ b/config_app/js/services/container-service.js @@ -6,9 +6,7 @@ angular.module('quay-config') function(ApiService, $timeout, Restangular) { var containerService = {}; containerService.restartContainer = function(callback) { - ApiService.scShutdownContainer(null, null).then(function(resp) { - $timeout(callback, 2000); - }, ApiService.errorDisplay('Cannot restart container. Please report this to support.')) + ApiService.errorDisplay('Removed Endpoint. This error should never be seen.') }; containerService.scheduleStatusCheck = function(callback, opt_config) { From 479f4c3fa4680dc9823704c01ebe6ceb11bb83dc Mon Sep 17 00:00:00 2001 From: Sam Chow Date: Mon, 11 Jun 2018 13:02:35 -0400 Subject: [PATCH 13/14] Set custom entrypoint for ci scripts to run image increase timeout for cache push revert test db change --- .travis.yml | 1 + scripts/ci | 2 +- test/data/test.db | Bin 1753088 -> 1753088 bytes 3 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 8b1085fa3..169228c72 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,6 +14,7 @@ before_script: - sudo service postgresql stop cache: + timeout: 1000 directories: - $HOME/docker diff --git a/scripts/ci b/scripts/ci index 681e76faa..38b04afea 100755 --- a/scripts/ci +++ b/scripts/ci @@ -45,7 +45,7 @@ clean_cache() { quay_run() { - docker run --net=host -e TEST_DATABASE_URI -ti "${IMAGE}:${IMAGE_TAG}" "$@" + docker run --net=host --entrypoint "/bin/bash" -e TEST_DATABASE_URI -ti "${IMAGE}:${IMAGE_TAG}" -c "$*" } diff --git a/test/data/test.db b/test/data/test.db index 34d3a84ba69063630149e8390d2a9a0cc20b0d9f..4b5cbd65f77db551dad7e57c671859735757c398 100644 GIT binary patch delta 178 zcmZo@$ZlxJo*>P*b)t+jLQSVTF6I#3eqt?hA5qBPp3)_nt$!{ir@C y5OV=BHxTmxF)tAF0Wm)i3jnbo5DNjZFc6CXu_zFW0kJp`O8~Lt_M`ez))N4Z_C1RL delta 182 zcmZo@$ZlxJo*>P*ZK8}b|x)&$1Z1g6#m=GFw3)&$np1h&=$_N@sVCJUuabqy>P z49%^K4Xg|e^~_8S3=J(B-GwC>LQSVTF6I#3eq Date: Tue, 12 Jun 2018 13:31:26 -0400 Subject: [PATCH 14/14] Add docker shell mode, and remove some config --- config_app/conf/logging.conf | 3 - config_app/conf/logging_debug.conf | 3 - config_app/conf/logging_debug_json.conf | 3 - config_app/conf/logging_json.conf | 3 - config_app/loghandler_config.py | 114 --------------------- config_app/templates/index.html | 10 -- quay-entrypoint.sh | 7 +- util/config/validators/validate_torrent.py | 2 + 8 files changed, 8 insertions(+), 137 deletions(-) delete mode 100755 config_app/loghandler_config.py diff --git a/config_app/conf/logging.conf b/config_app/conf/logging.conf index 885678395..3f1d3a33f 100644 --- a/config_app/conf/logging.conf +++ b/config_app/conf/logging.conf @@ -20,9 +20,6 @@ args=(sys.stdout, ) format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s class=logging.Formatter -[formatter_json] -class=loghandler_config.JsonFormatter - [logger_gunicorn.error] level=ERROR handlers=console diff --git a/config_app/conf/logging_debug.conf b/config_app/conf/logging_debug.conf index 1f1bb2c63..b57ff1519 100644 --- a/config_app/conf/logging_debug.conf +++ b/config_app/conf/logging_debug.conf @@ -36,6 +36,3 @@ qualname=gunicorn.error [formatter_generic] format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s class=logging.Formatter - -[formatter_json] -class=loghandler_config.JsonFormatter diff --git a/config_app/conf/logging_debug_json.conf b/config_app/conf/logging_debug_json.conf index 382f882d1..21eb994a8 100644 --- a/config_app/conf/logging_debug_json.conf +++ b/config_app/conf/logging_debug_json.conf @@ -36,6 +36,3 @@ qualname=gunicorn.error [formatter_generic] format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s class=logging.Formatter - -[formatter_json] -class=loghandler_config.JsonFormatter diff --git a/config_app/conf/logging_json.conf b/config_app/conf/logging_json.conf index cccdcf832..05d4c5dde 100644 --- a/config_app/conf/logging_json.conf +++ b/config_app/conf/logging_json.conf @@ -20,9 +20,6 @@ args=(sys.stdout, ) format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s class=logging.Formatter -[formatter_json] -class=loghandler_config.JsonFormatter - [logger_gunicorn.error] level=ERROR handlers=console diff --git a/config_app/loghandler_config.py b/config_app/loghandler_config.py deleted file mode 100755 index d3d9948cb..000000000 --- a/config_app/loghandler_config.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from __future__ import absolute_import - -import datetime -import json -import logging -import re -import traceback - - -LOG_FORMAT_REGEXP = re.compile(r'\((.+?)\)', re.IGNORECASE) - - -def _json_default(obj): - """ - Coerce everything to strings. - All objects representing time get output as ISO8601. - """ - if isinstance(obj, (datetime.date, datetime.time, datetime.datetime)): - return obj.isoformat() - - elif isinstance(obj, Exception): - return "Exception: %s" % str(obj) - - return str(obj) - - -# skip natural LogRecord attributes -# http://docs.python.org/library/logging.html#logrecord-attributes -RESERVED_ATTRS = set([ - 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename', 'funcName', 'levelname', - 'levelno', 'lineno', 'module', 'msecs', 'message', 'msg', 'name', 'pathname', 'process', - 'processName', 'relativeCreated', 'stack_info', 'thread', 'threadName' -]) - - -class JsonFormatter(logging.Formatter): - """ - A custom formatter to format logging records as json strings. - extra values will be formatted as str() if nor supported by - json default encoder - """ - - def __init__(self, *args, **kwargs): - """ - :param json_default: a function for encoding non-standard objects - as outlined in http://docs.python.org/2/library/json.html - :param json_encoder: optional custom encoder - :param json_serializer: a :meth:`json.dumps`-compatible callable - that will be used to serialize the log record. - :param prefix: an optional key prefix to nest logs - """ - self.json_default = kwargs.pop("json_default", _json_default) - self.json_encoder = kwargs.pop("json_encoder", None) - self.json_serializer = kwargs.pop("json_serializer", json.dumps) - self.default_values = kwargs.pop("default_extra", {}) - self.prefix_key = kwargs.pop("prefix_key", "data") - - logging.Formatter.__init__(self, *args, **kwargs) - - self._fmt_parameters = self._parse_format_string() - self._skip_fields = set(self._fmt_parameters) - self._skip_fields.update(RESERVED_ATTRS) - - def _parse_format_string(self): - """Parses format string looking for substitutions""" - standard_formatters = LOG_FORMAT_REGEXP - return standard_formatters.findall(self._fmt) - - def add_fields(self, log_record, record, message_dict): - """ - Override this method to implement custom logic for adding fields. - """ - - target = log_record - if self.prefix_key: - log_record[self.prefix_key] = {} - target = log_record[self.prefix_key] - - for field, value in record.__dict__.iteritems(): - if field in self._fmt_parameters and field in RESERVED_ATTRS: - log_record[field] = value - elif field not in RESERVED_ATTRS: - target[field] = value - - target.update(message_dict) - target.update(self.default_values) - - def format(self, record): - """Formats a log record and serializes to json""" - message_dict = {} - if isinstance(record.msg, dict): - message_dict = record.msg - record.message = None - if "message" in message_dict: - record.message = message_dict.pop("message", "") - else: - record.message = record.getMessage() - - # only format time if needed - if "asctime" in self._fmt_parameters: - record.asctime = self.formatTime(record, self.datefmt) - - # Display formatted exception, but allow overriding it in the - # user-supplied dict. - if record.exc_info and not message_dict.get('exc_info'): - message_dict['exc_info'] = traceback.format_list(traceback.extract_tb(record.exc_info[2])) - log_record = {} - - self.add_fields(log_record, record, message_dict) - - return self.json_serializer(log_record, default=self.json_default, cls=self.json_encoder) diff --git a/config_app/templates/index.html b/config_app/templates/index.html index 7a40e3d30..2ec95f8b8 100644 --- a/config_app/templates/index.html +++ b/config_app/templates/index.html @@ -19,19 +19,9 @@ - - - - - - - - - - {% for script_path in main_scripts %} diff --git a/quay-entrypoint.sh b/quay-entrypoint.sh index 9e9a60fe5..24c58b500 100755 --- a/quay-entrypoint.sh +++ b/quay-entrypoint.sh @@ -4,7 +4,7 @@ MODE="$1" display_usage() { echo "This script takes one argument." - echo -e "\nUsage: ${0} \n" + echo -e "\nUsage: ${0} \n" } if [[ "${MODE}" = "help" ]] @@ -32,6 +32,11 @@ EOF venv/bin/python -m displayversion case "$MODE" in + "shell") + echo "Entering shell mode" + /bin/bash + exit 0 + ;; "config") echo "Entering config mode, only copying config-app entrypoints" cp -r ${QUAYDIR}/config_app/init/service/* /etc/service diff --git a/util/config/validators/validate_torrent.py b/util/config/validators/validate_torrent.py index dce091efa..567285f0b 100644 --- a/util/config/validators/validate_torrent.py +++ b/util/config/validators/validate_torrent.py @@ -3,6 +3,8 @@ import logging from hashlib import sha1 from util.config.validators import BaseValidator, ConfigValidationException +# Temporarily removed because registry.torrent imports from app, add encoded_jwt back once extracted +# TODO(jschorr): extract app from following package and re-enable jwt_from_infohash in validator # from util.registry.torrent import jwt_from_infohash logger = logging.getLogger(__name__)