diff --git a/.travis.yml b/.travis.yml index 8b1085fa3..169228c72 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,6 +14,7 @@ before_script: - sudo service postgresql stop cache: + timeout: 1000 directories: - $HOME/docker diff --git a/Dockerfile b/Dockerfile index ef4cf3ee1..b9f9074d5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -106,6 +106,10 @@ RUN yarn build \ && jpegoptim static/img/**/*.jpg \ && optipng -clobber -quiet static/img/**/*.png +# Config app js compile +COPY config_app/ config_app/ +RUN yarn build-config-app + COPY . . RUN PYTHONPATH=$QUAYPATH venv/bin/python -m external_libraries @@ -138,4 +142,4 @@ RUN ./scripts/detect-config.sh EXPOSE 443 8443 80 -CMD ./quay-entrypoint.sh +ENTRYPOINT [ "/bin/bash", "./quay-entrypoint.sh"] diff --git a/config_app/Procfile b/config_app/Procfile new file mode 100644 index 000000000..16b3fb8a4 --- /dev/null +++ b/config_app/Procfile @@ -0,0 +1,3 @@ +app: PYTHONPATH="../" gunicorn -c conf/gunicorn_local.py config_application:application +webpack: npm run watch-config-app + diff --git a/config_app/__init__.py b/config_app/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/config_app/_init_config.py b/config_app/_init_config.py new file mode 100644 index 000000000..a1628321a --- /dev/null +++ b/config_app/_init_config.py @@ -0,0 +1,39 @@ +import os +import re +import subprocess + + +# Note: this currently points to the directory above, since we're in the quay config_app dir +# TODO(config_extract): revert to root directory rather than the one above +ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/")) +STATIC_DIR = os.path.join(ROOT_DIR, 'static/') +STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/') +STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/') +TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/') + + + +def _get_version_number_changelog(): + try: + with open(os.path.join(ROOT_DIR, 'CHANGELOG.md')) as f: + return re.search(r'(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0) + except IOError: + return '' + + +def _get_git_sha(): + if os.path.exists("GIT_HEAD"): + with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f: + return f.read() + else: + try: + return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8] + except (OSError, subprocess.CalledProcessError): + pass + return "unknown" + + +__version__ = _get_version_number_changelog() +__gitrev__ = _get_git_sha() diff --git a/config_app/c_app.py b/config_app/c_app.py new file mode 100644 index 000000000..049aef619 --- /dev/null +++ b/config_app/c_app.py @@ -0,0 +1,37 @@ +import os +import logging + +from flask import Flask + +from data import database +from util.config.superusermanager import SuperUserManager +from util.ipresolver import NoopIPResolver + +from config_app._init_config import ROOT_DIR +from config_app.config_util.config import get_config_provider + +app = Flask(__name__) + +logger = logging.getLogger(__name__) + +OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, 'config_app/conf/stack') + +is_testing = 'TEST' in os.environ + +config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', + testing=is_testing) + +if is_testing: + from test.testconfig import TestConfig + logger.debug('Loading test config.') + app.config.from_object(TestConfig()) +else: + from config import DefaultConfig + logger.debug('Loading default config.') + app.config.from_object(DefaultConfig()) + app.teardown_request(database.close_db_filter) + +# Load the override config via the provider. +config_provider.update_app_config(app.config) +superusers = SuperUserManager(app) +ip_resolver = NoopIPResolver() diff --git a/config_app/conf/__init__.py b/config_app/conf/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/config_app/conf/gunicorn_local.py b/config_app/conf/gunicorn_local.py new file mode 100644 index 000000000..d0ea0a758 --- /dev/null +++ b/config_app/conf/gunicorn_local.py @@ -0,0 +1,26 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +import logging + +from Crypto import Random +from config_app.config_util.log import logfile_path + + +logconfig = logfile_path(debug=True) +bind = '0.0.0.0:5000' +workers = 1 +worker_class = 'gevent' +daemon = False +pythonpath = '.' +preload_app = True + +def post_fork(server, worker): + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + +def when_ready(server): + logger = logging.getLogger(__name__) + logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class) diff --git a/config_app/conf/gunicorn_web.py b/config_app/conf/gunicorn_web.py new file mode 100644 index 000000000..4c4e1a152 --- /dev/null +++ b/config_app/conf/gunicorn_web.py @@ -0,0 +1,26 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +import logging + +from Crypto import Random +from config_app.config_util.log import logfile_path + + +logconfig = logfile_path(debug=True) + +bind = '0.0.0.0:80' +workers = 1 +worker_class = 'gevent' +pythonpath = '.' +preload_app = True + +def post_fork(server, worker): + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + +def when_ready(server): + logger = logging.getLogger(__name__) + logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class) diff --git a/config_app/conf/logging.conf b/config_app/conf/logging.conf new file mode 100644 index 000000000..3f1d3a33f --- /dev/null +++ b/config_app/conf/logging.conf @@ -0,0 +1,33 @@ +[loggers] +keys=root,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=generic,json + +[logger_root] +level=INFO +handlers=console + +[handler_console] +class=StreamHandler +formatter=generic +args=(sys.stdout, ) + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG diff --git a/config_app/conf/logging_debug.conf b/config_app/conf/logging_debug.conf new file mode 100644 index 000000000..b57ff1519 --- /dev/null +++ b/config_app/conf/logging_debug.conf @@ -0,0 +1,38 @@ +[loggers] +keys=root,boto,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=generic,json + +[logger_root] +level=DEBUG +handlers=console + +[logger_boto] +level=INFO +handlers=console +qualname=boto + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG + +[handler_console] +class=StreamHandler +formatter=generic +args=(sys.stdout, ) + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter diff --git a/config_app/conf/logging_debug_json.conf b/config_app/conf/logging_debug_json.conf new file mode 100644 index 000000000..21eb994a8 --- /dev/null +++ b/config_app/conf/logging_debug_json.conf @@ -0,0 +1,38 @@ +[loggers] +keys=root,boto,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=generic,json + +[logger_root] +level=DEBUG +handlers=console + +[logger_boto] +level=INFO +handlers=console +qualname=boto + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG + +[handler_console] +class=StreamHandler +formatter=json +args=(sys.stdout, ) + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter diff --git a/config_app/conf/logging_json.conf b/config_app/conf/logging_json.conf new file mode 100644 index 000000000..05d4c5dde --- /dev/null +++ b/config_app/conf/logging_json.conf @@ -0,0 +1,33 @@ +[loggers] +keys=root,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=json,generic + +[logger_root] +level=INFO +handlers=console + +[handler_console] +class=StreamHandler +formatter=json +args=(sys.stdout, ) + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG diff --git a/config_app/config_application.py b/config_app/config_application.py new file mode 100644 index 000000000..5c8835b66 --- /dev/null +++ b/config_app/config_application.py @@ -0,0 +1,9 @@ +from config_app.c_app import app as application + +# Bind all of the blueprints +import config_web + + +if __name__ == '__main__': + logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) + application.run(port=5000, debug=True, threaded=True, host='0.0.0.0') diff --git a/config_app/config_endpoints/__init__.py b/config_app/config_endpoints/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/config_app/config_endpoints/api/__init__.py b/config_app/config_endpoints/api/__init__.py new file mode 100644 index 000000000..d539639eb --- /dev/null +++ b/config_app/config_endpoints/api/__init__.py @@ -0,0 +1,140 @@ +import logging + +from flask import Blueprint, request +from flask_restful import Resource, Api +from flask_restful.utils.cors import crossdomain +from email.utils import formatdate +from calendar import timegm +from functools import partial, wraps +from jsonschema import validate, ValidationError + +from config_app.c_app import app +from config_app.config_endpoints.exception import InvalidResponse, InvalidRequest + +logger = logging.getLogger(__name__) +api_bp = Blueprint('api', __name__) + +CROSS_DOMAIN_HEADERS = ['Authorization', 'Content-Type', 'X-Requested-With'] + + +class ApiExceptionHandlingApi(Api): + pass + + @crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS) + def handle_error(self, error): + return super(ApiExceptionHandlingApi, self).handle_error(error) + + +api = ApiExceptionHandlingApi() + +api.init_app(api_bp) + + +def format_date(date): + """ Output an RFC822 date format. """ + if date is None: + return None + return formatdate(timegm(date.utctimetuple())) + + + +def resource(*urls, **kwargs): + def wrapper(api_resource): + if not api_resource: + return None + + api_resource.registered = True + api.add_resource(api_resource, *urls, **kwargs) + return api_resource + + return wrapper + + +class ApiResource(Resource): + registered = False + method_decorators = [] + + def options(self): + return None, 200 + + +def add_method_metadata(name, value): + def modifier(func): + if func is None: + return None + + if '__api_metadata' not in dir(func): + func.__api_metadata = {} + func.__api_metadata[name] = value + return func + + return modifier + + +def method_metadata(func, name): + if func is None: + return None + + if '__api_metadata' in dir(func): + return func.__api_metadata.get(name, None) + return None + + +def no_cache(f): + @wraps(f) + def add_no_cache(*args, **kwargs): + response = f(*args, **kwargs) + if response is not None: + response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate' + return response + return add_no_cache + + +def define_json_response(schema_name): + def wrapper(func): + @add_method_metadata('response_schema', schema_name) + @wraps(func) + def wrapped(self, *args, **kwargs): + schema = self.schemas[schema_name] + resp = func(self, *args, **kwargs) + + if app.config['TESTING']: + try: + validate(resp, schema) + except ValidationError as ex: + raise InvalidResponse(ex.message) + + return resp + return wrapped + return wrapper + + +def validate_json_request(schema_name, optional=False): + def wrapper(func): + @add_method_metadata('request_schema', schema_name) + @wraps(func) + def wrapped(self, *args, **kwargs): + schema = self.schemas[schema_name] + try: + json_data = request.get_json() + if json_data is None: + if not optional: + raise InvalidRequest('Missing JSON body') + else: + validate(json_data, schema) + return func(self, *args, **kwargs) + except ValidationError as ex: + raise InvalidRequest(ex.message) + return wrapped + return wrapper + + +nickname = partial(add_method_metadata, 'nickname') + + +import config_endpoints.api +import config_endpoints.api.discovery +import config_endpoints.api.suconfig +import config_endpoints.api.superuser +import config_endpoints.api.user + diff --git a/config_app/config_endpoints/api/discovery.py b/config_app/config_endpoints/api/discovery.py new file mode 100644 index 000000000..2b0f0ceb3 --- /dev/null +++ b/config_app/config_endpoints/api/discovery.py @@ -0,0 +1,253 @@ +# TODO to extract the discovery stuff into a util at the top level and then use it both here and old discovery.py +import logging +import sys +from collections import OrderedDict + +from config_app.c_app import app +from config_app.config_endpoints.api import method_metadata +from config_app.config_endpoints.common import fully_qualified_name, PARAM_REGEX, TYPE_CONVERTER + + +logger = logging.getLogger(__name__) + + +def generate_route_data(): + include_internal = True + compact = True + + def swagger_parameter(name, description, kind='path', param_type='string', required=True, + enum=None, schema=None): + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject + parameter_info = { + 'name': name, + 'in': kind, + 'required': required + } + + if schema: + parameter_info['schema'] = { + '$ref': '#/definitions/%s' % schema + } + else: + parameter_info['type'] = param_type + + if enum is not None and len(list(enum)) > 0: + parameter_info['enum'] = list(enum) + + return parameter_info + + paths = {} + models = {} + tags = [] + tags_added = set() + operation_ids = set() + + for rule in app.url_map.iter_rules(): + endpoint_method = app.view_functions[rule.endpoint] + + # Verify that we have a view class for this API method. + if not 'view_class' in dir(endpoint_method): + continue + + view_class = endpoint_method.view_class + + # Hide the class if it is internal. + internal = method_metadata(view_class, 'internal') + if not include_internal and internal: + continue + + # Build the tag. + parts = fully_qualified_name(view_class).split('.') + tag_name = parts[-2] + if not tag_name in tags_added: + tags_added.add(tag_name) + tags.append({ + 'name': tag_name, + 'description': (sys.modules[view_class.__module__].__doc__ or '').strip() + }) + + # Build the Swagger data for the path. + swagger_path = PARAM_REGEX.sub(r'{\2}', rule.rule) + full_name = fully_qualified_name(view_class) + path_swagger = { + 'x-name': full_name, + 'x-path': swagger_path, + 'x-tag': tag_name + } + + related_user_res = method_metadata(view_class, 'related_user_resource') + if related_user_res is not None: + path_swagger['x-user-related'] = fully_qualified_name(related_user_res) + + paths[swagger_path] = path_swagger + + # Add any global path parameters. + param_data_map = view_class.__api_path_params if '__api_path_params' in dir(view_class) else {} + if param_data_map: + path_parameters_swagger = [] + for path_parameter in param_data_map: + description = param_data_map[path_parameter].get('description') + path_parameters_swagger.append(swagger_parameter(path_parameter, description)) + + path_swagger['parameters'] = path_parameters_swagger + + # Add the individual HTTP operations. + method_names = list(rule.methods.difference(['HEAD', 'OPTIONS'])) + for method_name in method_names: + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object + method = getattr(view_class, method_name.lower(), None) + if method is None: + logger.debug('Unable to find method for %s in class %s', method_name, view_class) + continue + + operationId = method_metadata(method, 'nickname') + operation_swagger = { + 'operationId': operationId, + 'parameters': [], + } + + if operationId is None: + continue + + if operationId in operation_ids: + raise Exception('Duplicate operation Id: %s' % operationId) + + operation_ids.add(operationId) + + # Mark the method as internal. + internal = method_metadata(method, 'internal') + if internal is not None: + operation_swagger['x-internal'] = True + + if include_internal: + requires_fresh_login = method_metadata(method, 'requires_fresh_login') + if requires_fresh_login is not None: + operation_swagger['x-requires-fresh-login'] = True + + # Add the path parameters. + if rule.arguments: + for path_parameter in rule.arguments: + description = param_data_map.get(path_parameter, {}).get('description') + operation_swagger['parameters'].append(swagger_parameter(path_parameter, description)) + + # Add the query parameters. + if '__api_query_params' in dir(method): + for query_parameter_info in method.__api_query_params: + name = query_parameter_info['name'] + description = query_parameter_info['help'] + param_type = TYPE_CONVERTER[query_parameter_info['type']] + required = query_parameter_info['required'] + + operation_swagger['parameters'].append( + swagger_parameter(name, description, kind='query', + param_type=param_type, + required=required, + enum=query_parameter_info['choices'])) + + # Add the OAuth security block. + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject + scope = method_metadata(method, 'oauth2_scope') + if scope and not compact: + operation_swagger['security'] = [{'oauth2_implicit': [scope.scope]}] + + # Add the responses block. + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject + response_schema_name = method_metadata(method, 'response_schema') + if not compact: + if response_schema_name: + models[response_schema_name] = view_class.schemas[response_schema_name] + + models['ApiError'] = { + 'type': 'object', + 'properties': { + 'status': { + 'type': 'integer', + 'description': 'Status code of the response.' + }, + 'type': { + 'type': 'string', + 'description': 'Reference to the type of the error.' + }, + 'detail': { + 'type': 'string', + 'description': 'Details about the specific instance of the error.' + }, + 'title': { + 'type': 'string', + 'description': 'Unique error code to identify the type of error.' + }, + 'error_message': { + 'type': 'string', + 'description': 'Deprecated; alias for detail' + }, + 'error_type': { + 'type': 'string', + 'description': 'Deprecated; alias for detail' + } + }, + 'required': [ + 'status', + 'type', + 'title', + ] + } + + responses = { + '400': { + 'description': 'Bad Request', + }, + + '401': { + 'description': 'Session required', + }, + + '403': { + 'description': 'Unauthorized access', + }, + + '404': { + 'description': 'Not found', + }, + } + + for _, body in responses.items(): + body['schema'] = {'$ref': '#/definitions/ApiError'} + + if method_name == 'DELETE': + responses['204'] = { + 'description': 'Deleted' + } + elif method_name == 'POST': + responses['201'] = { + 'description': 'Successful creation' + } + else: + responses['200'] = { + 'description': 'Successful invocation' + } + + if response_schema_name: + responses['200']['schema'] = { + '$ref': '#/definitions/%s' % response_schema_name + } + + operation_swagger['responses'] = responses + + # Add the request block. + request_schema_name = method_metadata(method, 'request_schema') + if request_schema_name and not compact: + models[request_schema_name] = view_class.schemas[request_schema_name] + + operation_swagger['parameters'].append( + swagger_parameter('body', 'Request body contents.', kind='body', + schema=request_schema_name)) + + # Add the operation to the parent path. + if not internal or (internal and include_internal): + path_swagger[method_name.lower()] = operation_swagger + + tags.sort(key=lambda t: t['name']) + paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]['x-tag'])) + + if compact: + return {'paths': paths} diff --git a/config_app/config_endpoints/api/suconfig.py b/config_app/config_endpoints/api/suconfig.py new file mode 100644 index 000000000..539a6599c --- /dev/null +++ b/config_app/config_endpoints/api/suconfig.py @@ -0,0 +1,309 @@ +import logging +import os +import subprocess +import signal + +from flask import abort, request + +from config_app.config_endpoints.api.suconfig_models_pre_oci import pre_oci_model as model +from config_app.config_endpoints.api import resource, ApiResource, nickname, validate_json_request +from config_app.c_app import app, config_provider, superusers, OVERRIDE_CONFIG_DIRECTORY, ip_resolver + +from auth.auth_context import get_authenticated_user +from data.users import get_federated_service_name, get_users_handler +from data.database import configure +from data.runmigration import run_alembic_migration +from util.config.configutil import add_enterprise_config_defaults +from util.config.database import sync_database_with_config +from util.config.validator import validate_service_for_config, ValidatorContext + +logger = logging.getLogger(__name__) + + +def database_is_valid(): + """ Returns whether the database, as configured, is valid. """ + if app.config['TESTING']: + return False + + return model.is_valid() + + +def database_has_users(): + """ Returns whether the database has any users defined. """ + return model.has_users() + + +@resource('/v1/superuser/config') +class SuperUserConfig(ApiResource): + """ Resource for fetching and updating the current configuration, if any. """ + schemas = { + 'UpdateConfig': { + 'type': 'object', + 'description': 'Updates the YAML config file', + 'required': [ + 'config', + 'hostname' + ], + 'properties': { + 'config': { + 'type': 'object' + }, + 'hostname': { + 'type': 'string' + }, + 'password': { + 'type': 'string' + }, + }, + }, + } + + @nickname('scGetConfig') + def get(self): + """ Returns the currently defined configuration, if any. """ + config_object = config_provider.get_config() + return { + 'config': config_object + } + + @nickname('scUpdateConfig') + @validate_json_request('UpdateConfig') + def put(self): + """ Updates the config override file. """ + # Note: This method is called to set the database configuration before super users exists, + # so we also allow it to be called if there is no valid registry configuration setup. + if not config_provider.config_exists(): + config_object = request.get_json()['config'] + hostname = request.get_json()['hostname'] + + # Add any enterprise defaults missing from the config. + add_enterprise_config_defaults(config_object, app.config['SECRET_KEY'], hostname) + + # Write the configuration changes to the config override file. + config_provider.save_config(config_object) + + # If the authentication system is federated, link the superuser account to the + # the authentication system chosen. + service_name = get_federated_service_name(config_object['AUTHENTICATION_TYPE']) + if service_name is not None: + current_user = get_authenticated_user() + if current_user is None: + abort(401) + + service_name = get_federated_service_name(config_object['AUTHENTICATION_TYPE']) + if not model.has_federated_login(current_user.username, service_name): + # Verify the user's credentials and retrieve the user's external username+email. + handler = get_users_handler(config_object, config_provider, OVERRIDE_CONFIG_DIRECTORY) + (result, err_msg) = handler.verify_credentials(current_user.username, + request.get_json().get('password', '')) + if not result: + logger.error('Could not save configuration due to external auth failure: %s', err_msg) + abort(400) + + # Link the existing user to the external user. + model.attach_federated_login(current_user.username, service_name, result.username) + + # Ensure database is up-to-date with config + sync_database_with_config(config_object) + + return { + 'exists': True, + 'config': config_object + } + + abort(403) + + + +@resource('/v1/superuser/registrystatus') +class SuperUserRegistryStatus(ApiResource): + """ Resource for determining the status of the registry, such as if config exists, + if a database is configured, and if it has any defined users. + """ + @nickname('scRegistryStatus') + def get(self): + """ Returns the status of the registry. """ + + # If we have SETUP_COMPLETE, then we're ready to go! + if app.config.get('SETUP_COMPLETE', False): + return { + 'provider_id': config_provider.provider_id, + 'requires_restart': config_provider.requires_restart(app.config), + 'status': 'ready' + } + + # If there is no conf/stack volume, then report that status. + if not config_provider.volume_exists(): + return { + 'status': 'missing-config-dir' + } + + # If there is no config file, we need to setup the database. + if not config_provider.config_exists(): + return { + 'status': 'config-db' + } + + # If the database isn't yet valid, then we need to set it up. + if not database_is_valid(): + return { + 'status': 'setup-db' + } + + return { + 'status': 'create-superuser' if not database_has_users() else 'config' + } + + +class _AlembicLogHandler(logging.Handler): + def __init__(self): + super(_AlembicLogHandler, self).__init__() + self.records = [] + + def emit(self, record): + self.records.append({ + 'level': record.levelname, + 'message': record.getMessage() + }) + + +@resource('/v1/superuser/setupdb') +class SuperUserSetupDatabase(ApiResource): + """ Resource for invoking alembic to setup the database. """ + @nickname('scSetupDatabase') + def get(self): + """ Invokes the alembic upgrade process. """ + # Note: This method is called after the database configured is saved, but before the + # database has any tables. Therefore, we only allow it to be run in that unique case. + if config_provider.config_exists() and not database_is_valid(): + # Note: We need to reconfigure the database here as the config has changed. + combined = dict(**app.config) + combined.update(config_provider.get_config()) + + configure(combined) + app.config['DB_URI'] = combined['DB_URI'] + + log_handler = _AlembicLogHandler() + + try: + run_alembic_migration(log_handler) + except Exception as ex: + return { + 'error': str(ex) + } + + return { + 'logs': log_handler.records + } + + abort(403) + + +@resource('/v1/superuser/config/createsuperuser') +class SuperUserCreateInitialSuperUser(ApiResource): + """ Resource for creating the initial super user. """ + schemas = { + 'CreateSuperUser': { + 'type': 'object', + 'description': 'Information for creating the initial super user', + 'required': [ + 'username', + 'password', + 'email' + ], + 'properties': { + 'username': { + 'type': 'string', + 'description': 'The username for the superuser' + }, + 'password': { + 'type': 'string', + 'description': 'The password for the superuser' + }, + 'email': { + 'type': 'string', + 'description': 'The e-mail address for the superuser' + }, + }, + }, + } + + @nickname('scCreateInitialSuperuser') + @validate_json_request('CreateSuperUser') + def post(self): + """ Creates the initial super user, updates the underlying configuration and + sets the current session to have that super user. """ + + # Special security check: This method is only accessible when: + # - There is a valid config YAML file. + # - There are currently no users in the database (clean install) + # + # We do this special security check because at the point this method is called, the database + # is clean but does not (yet) have any super users for our permissions code to check against. + if config_provider.config_exists() and not database_has_users(): + data = request.get_json() + username = data['username'] + password = data['password'] + email = data['email'] + + # Create the user in the database. + superuser_uuid = model.create_superuser(username, password, email) + + # Add the user to the config. + config_object = config_provider.get_config() + config_object['SUPER_USERS'] = [username] + config_provider.save_config(config_object) + + # Update the in-memory config for the new superuser. + # TODO(config): do we need to register a list of the superusers? If so, we can take out the entire superuser in c_app + superusers.register_superuser(username) + + # Conduct login with that user. + # TODO(config): figure out if we need validation for checking logged in user stuff + # common_login(superuser_uuid) + + return { + 'status': True + } + + abort(403) + + +@resource('/v1/superuser/config/validate/') +class SuperUserConfigValidate(ApiResource): + """ Resource for validating a block of configuration against an external service. """ + schemas = { + 'ValidateConfig': { + 'type': 'object', + 'description': 'Validates configuration', + 'required': [ + 'config' + ], + 'properties': { + 'config': { + 'type': 'object' + }, + 'password': { + 'type': 'string', + 'description': 'The users password, used for auth validation' + } + }, + }, + } + + @nickname('scValidateConfig') + @validate_json_request('ValidateConfig') + def post(self, service): + """ Validates the given config for the given service. """ + # Note: This method is called to validate the database configuration before super users exists, + # so we also allow it to be called if there is no valid registry configuration setup. Note that + # this is also safe since this method does not access any information not given in the request. + if not config_provider.config_exists(): + config = request.get_json()['config'] + validator_context = ValidatorContext.from_app(app, config, request.get_json().get('password', ''), + ip_resolver=ip_resolver, + config_provider=config_provider) + return validate_service_for_config(service, validator_context) + + + abort(403) diff --git a/config_app/config_endpoints/api/suconfig_models_interface.py b/config_app/config_endpoints/api/suconfig_models_interface.py new file mode 100644 index 000000000..4b99170c5 --- /dev/null +++ b/config_app/config_endpoints/api/suconfig_models_interface.py @@ -0,0 +1,39 @@ +from abc import ABCMeta, abstractmethod +from six import add_metaclass + + +@add_metaclass(ABCMeta) +class SuperuserConfigDataInterface(object): + """ + Interface that represents all data store interactions required by the superuser config API. + """ + + @abstractmethod + def is_valid(self): + """ + Returns true if the configured database is valid. + """ + + @abstractmethod + def has_users(self): + """ + Returns true if there are any users defined. + """ + + @abstractmethod + def create_superuser(self, username, password, email): + """ + Creates a new superuser with the given username, password and email. Returns the user's UUID. + """ + + @abstractmethod + def has_federated_login(self, username, service_name): + """ + Returns true if the matching user has a federated login under the matching service. + """ + + @abstractmethod + def attach_federated_login(self, username, service_name, federated_username): + """ + Attaches a federatated login to the matching user, under the given service. + """ diff --git a/config_app/config_endpoints/api/suconfig_models_pre_oci.py b/config_app/config_endpoints/api/suconfig_models_pre_oci.py new file mode 100644 index 000000000..df83b8e9f --- /dev/null +++ b/config_app/config_endpoints/api/suconfig_models_pre_oci.py @@ -0,0 +1,35 @@ +from data import model +from data.database import User +from config_app.config_endpoints.api.suconfig_models_interface import SuperuserConfigDataInterface + + +class PreOCIModel(SuperuserConfigDataInterface): + def is_valid(self): + try: + list(User.select().limit(1)) + return True + except: + return False + + def has_users(self): + return bool(list(User.select().limit(1))) + + def create_superuser(self, username, password, email): + return model.user.create_user(username, password, email, auto_verify=True).uuid + + def has_federated_login(self, username, service_name): + user = model.user.get_user(username) + if user is None: + return False + + return bool(model.user.lookup_federated_login(user, service_name)) + + def attach_federated_login(self, username, service_name, federated_username): + user = model.user.get_user(username) + if user is None: + return False + + model.user.attach_federated_login(user, service_name, federated_username) + + +pre_oci_model = PreOCIModel() diff --git a/config_app/config_endpoints/api/superuser.py b/config_app/config_endpoints/api/superuser.py new file mode 100644 index 000000000..c061adacf --- /dev/null +++ b/config_app/config_endpoints/api/superuser.py @@ -0,0 +1,149 @@ +import os +import logging +import pathvalidate +from flask import request, jsonify + +from config_app.config_endpoints.exception import InvalidRequest +from config_app.config_endpoints.api import resource, ApiResource, nickname +from config_app.config_util.ssl import load_certificate, CertInvalidException +from config_app.c_app import app, config_provider + +from config_app.config_endpoints.api.superuser_models_pre_oci import pre_oci_model + +logger = logging.getLogger(__name__) +EXTRA_CA_DIRECTORY = 'extra_ca_certs' + + +@resource('/v1/superuser/customcerts/') +class SuperUserCustomCertificate(ApiResource): + """ Resource for managing a custom certificate. """ + + @nickname('uploadCustomCertificate') + def post(self, certpath): + uploaded_file = request.files['file'] + if not uploaded_file: + raise InvalidRequest('Missing certificate file') + + # Save the certificate. + certpath = pathvalidate.sanitize_filename(certpath) + if not certpath.endswith('.crt'): + raise InvalidRequest('Invalid certificate file: must have suffix `.crt`') + + logger.debug('Saving custom certificate %s', certpath) + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) + config_provider.save_volume_file(cert_full_path, uploaded_file) + logger.debug('Saved custom certificate %s', certpath) + + # Validate the certificate. + try: + logger.debug('Loading custom certificate %s', certpath) + with config_provider.get_volume_file(cert_full_path) as f: + load_certificate(f.read()) + except CertInvalidException: + logger.exception('Got certificate invalid error for cert %s', certpath) + return '', 204 + except IOError: + logger.exception('Got IO error for cert %s', certpath) + return '', 204 + + # Call the update script to install the certificate immediately. + if not app.config['TESTING']: + logger.debug('Calling certs_install.sh') + if os.system('/conf/init/certs_install.sh') != 0: + raise Exception('Could not install certificates') + + logger.debug('certs_install.sh completed') + + return '', 204 + + @nickname('deleteCustomCertificate') + def delete(self, certpath): + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) + config_provider.remove_volume_file(cert_full_path) + return '', 204 + + +@resource('/v1/superuser/customcerts') +class SuperUserCustomCertificates(ApiResource): + """ Resource for managing custom certificates. """ + + @nickname('getCustomCertificates') + def get(self): + has_extra_certs_path = config_provider.volume_file_exists(EXTRA_CA_DIRECTORY) + extra_certs_found = config_provider.list_volume_directory(EXTRA_CA_DIRECTORY) + if extra_certs_found is None: + return { + 'status': 'file' if has_extra_certs_path else 'none', + } + + cert_views = [] + for extra_cert_path in extra_certs_found: + try: + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, extra_cert_path) + with config_provider.get_volume_file(cert_full_path) as f: + certificate = load_certificate(f.read()) + cert_views.append({ + 'path': extra_cert_path, + 'names': list(certificate.names), + 'expired': certificate.expired, + }) + except CertInvalidException as cie: + cert_views.append({ + 'path': extra_cert_path, + 'error': cie.message, + }) + except IOError as ioe: + cert_views.append({ + 'path': extra_cert_path, + 'error': ioe.message, + }) + + return { + 'status': 'directory', + 'certs': cert_views, + } + + +@resource('/v1/superuser/keys') +class SuperUserServiceKeyManagement(ApiResource): + """ Resource for managing service keys.""" + schemas = { + 'CreateServiceKey': { + 'id': 'CreateServiceKey', + 'type': 'object', + 'description': 'Description of creation of a service key', + 'required': ['service', 'expiration'], + 'properties': { + 'service': { + 'type': 'string', + 'description': 'The service authenticating with this key', + }, + 'name': { + 'type': 'string', + 'description': 'The friendly name of a service key', + }, + 'metadata': { + 'type': 'object', + 'description': 'The key/value pairs of this key\'s metadata', + }, + 'notes': { + 'type': 'string', + 'description': 'If specified, the extra notes for the key', + }, + 'expiration': { + 'description': 'The expiration date as a unix timestamp', + 'anyOf': [{'type': 'number'}, {'type': 'null'}], + }, + }, + }, + } + + @nickname('listServiceKeys') + def get(self): + keys = pre_oci_model.list_all_service_keys() + + return jsonify({ + 'keys': [key.to_dict() for key in keys], + }) + + diff --git a/config_app/config_endpoints/api/superuser_models_interface.py b/config_app/config_endpoints/api/superuser_models_interface.py new file mode 100644 index 000000000..f9971fdd1 --- /dev/null +++ b/config_app/config_endpoints/api/superuser_models_interface.py @@ -0,0 +1,170 @@ +from abc import ABCMeta, abstractmethod +from collections import namedtuple +from six import add_metaclass + +from config_app.config_endpoints.api import format_date + + +def user_view(user): + return { + 'name': user.username, + 'kind': 'user', + 'is_robot': user.robot, + } + + +class RepositoryBuild(namedtuple('RepositoryBuild', + ['uuid', 'logs_archived', 'repository_namespace_user_username', 'repository_name', + 'can_write', 'can_read', 'pull_robot', 'resource_key', 'trigger', 'display_name', + 'started', 'job_config', 'phase', 'status', 'error', 'archive_url'])): + """ + RepositoryBuild represents a build associated with a repostiory + :type uuid: string + :type logs_archived: boolean + :type repository_namespace_user_username: string + :type repository_name: string + :type can_write: boolean + :type can_write: boolean + :type pull_robot: User + :type resource_key: string + :type trigger: Trigger + :type display_name: string + :type started: boolean + :type job_config: {Any -> Any} + :type phase: string + :type status: string + :type error: string + :type archive_url: string + """ + + def to_dict(self): + + resp = { + 'id': self.uuid, + 'phase': self.phase, + 'started': format_date(self.started), + 'display_name': self.display_name, + 'status': self.status or {}, + 'subdirectory': self.job_config.get('build_subdir', ''), + 'dockerfile_path': self.job_config.get('build_subdir', ''), + 'context': self.job_config.get('context', ''), + 'tags': self.job_config.get('docker_tags', []), + 'manual_user': self.job_config.get('manual_user', None), + 'is_writer': self.can_write, + 'trigger': self.trigger.to_dict(), + 'trigger_metadata': self.job_config.get('trigger_metadata', None) if self.can_read else None, + 'resource_key': self.resource_key, + 'pull_robot': user_view(self.pull_robot) if self.pull_robot else None, + 'repository': { + 'namespace': self.repository_namespace_user_username, + 'name': self.repository_name + }, + 'error': self.error, + } + + if self.can_write: + if self.resource_key is not None: + resp['archive_url'] = self.archive_url + elif self.job_config.get('archive_url', None): + resp['archive_url'] = self.job_config['archive_url'] + + return resp + + +class Approval(namedtuple('Approval', ['approver', 'approval_type', 'approved_date', 'notes'])): + """ + Approval represents whether a key has been approved or not + :type approver: User + :type approval_type: string + :type approved_date: Date + :type notes: string + """ + + def to_dict(self): + return { + 'approver': self.approver.to_dict() if self.approver else None, + 'approval_type': self.approval_type, + 'approved_date': self.approved_date, + 'notes': self.notes, + } + + +class ServiceKey(namedtuple('ServiceKey', ['name', 'kid', 'service', 'jwk', 'metadata', 'created_date', + 'expiration_date', 'rotation_duration', 'approval'])): + """ + ServiceKey is an apostille signing key + :type name: string + :type kid: int + :type service: string + :type jwk: string + :type metadata: string + :type created_date: Date + :type expiration_date: Date + :type rotation_duration: Date + :type approval: Approval + + """ + + def to_dict(self): + return { + 'name': self.name, + 'kid': self.kid, + 'service': self.service, + 'jwk': self.jwk, + 'metadata': self.metadata, + 'created_date': self.created_date, + 'expiration_date': self.expiration_date, + 'rotation_duration': self.rotation_duration, + 'approval': self.approval.to_dict() if self.approval is not None else None, + } + + +class User(namedtuple('User', ['username', 'email', 'verified', 'enabled', 'robot'])): + """ + User represents a single user. + :type username: string + :type email: string + :type verified: boolean + :type enabled: boolean + :type robot: User + """ + + def to_dict(self): + user_data = { + 'kind': 'user', + 'name': self.username, + 'username': self.username, + 'email': self.email, + 'verified': self.verified, + 'enabled': self.enabled, + } + + return user_data + + +class Organization(namedtuple('Organization', ['username', 'email'])): + """ + Organization represents a single org. + :type username: string + :type email: string + """ + + def to_dict(self): + return { + 'name': self.username, + 'email': self.email, + } + + + + +@add_metaclass(ABCMeta) +class SuperuserDataInterface(object): + """ + Interface that represents all data store interactions required by a superuser api. + """ + @abstractmethod + def list_all_service_keys(self): + """ + Returns a list of service keys + """ diff --git a/config_app/config_endpoints/api/superuser_models_pre_oci.py b/config_app/config_endpoints/api/superuser_models_pre_oci.py new file mode 100644 index 000000000..3002d5686 --- /dev/null +++ b/config_app/config_endpoints/api/superuser_models_pre_oci.py @@ -0,0 +1,31 @@ +from data import model + +from config_app.config_endpoints.api.superuser_models_interface import SuperuserDataInterface, User, ServiceKey, Approval + +def _create_user(user): + if user is None: + return None + return User(user.username, user.email, user.verified, user.enabled, user.robot) + + +def _create_key(key): + approval = None + if key.approval is not None: + approval = Approval(_create_user(key.approval.approver), key.approval.approval_type, key.approval.approved_date, + key.approval.notes) + + return ServiceKey(key.name, key.kid, key.service, key.jwk, key.metadata, key.created_date, key.expiration_date, + key.rotation_duration, approval) + + +class PreOCIModel(SuperuserDataInterface): + """ + PreOCIModel implements the data model for the SuperUser using a database schema + before it was changed to support the OCI specification. + """ + def list_all_service_keys(self): + keys = model.service_keys.list_all_keys() + return [_create_key(key) for key in keys] + + +pre_oci_model = PreOCIModel() diff --git a/config_app/config_endpoints/api/user.py b/config_app/config_endpoints/api/user.py new file mode 100644 index 000000000..68d573873 --- /dev/null +++ b/config_app/config_endpoints/api/user.py @@ -0,0 +1,19 @@ +from auth.auth_context import get_authenticated_user +from config_app.config_endpoints.api import resource, ApiResource, nickname +from config_app.config_endpoints.api.superuser_models_interface import user_view + + +@resource('/v1/user/') +class User(ApiResource): + """ Operations related to users. """ + + @nickname('getLoggedInUser') + def get(self): + """ Get user information for the authenticated user. """ + user = get_authenticated_user() + # TODO(config): figure out if we need user validation + # if user is None or user.organization or not UserReadPermission(user.username).can(): + # raise InvalidToken("Requires authentication", payload={'session_required': False}) + + return user_view(user) + diff --git a/config_app/config_endpoints/common.py b/config_app/config_endpoints/common.py new file mode 100644 index 000000000..2cb1c2dc8 --- /dev/null +++ b/config_app/config_endpoints/common.py @@ -0,0 +1,58 @@ +import logging +import os +import re + +from flask import make_response, render_template +from flask_restful import reqparse + +from config_app._init_config import ROOT_DIR + + +def truthy_bool(param): + return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'} + + +DEFAULT_JS_BUNDLE_NAME = 'configapp' +PARAM_REGEX = re.compile(r'<([^:>]+:)*([\w]+)>') +logger = logging.getLogger(__name__) +TYPE_CONVERTER = { + truthy_bool: 'boolean', + str: 'string', + basestring: 'string', + reqparse.text_type: 'string', + int: 'integer', +} + + +def _list_files(path, extension, contains=""): + """ Returns a list of all the files with the given extension found under the given path. """ + + def matches(f): + return os.path.splitext(f)[1] == '.' + extension and contains in os.path.splitext(f)[0] + + def join_path(dp, f): + # Remove the static/ prefix. It is added in the template. + return os.path.join(dp, f)[len(ROOT_DIR) + 1 + len('config_app/static/'):] + + filepath = os.path.join(os.path.join(ROOT_DIR, 'config_app/static/'), path) + return [join_path(dp, f) for dp, _, files in os.walk(filepath) for f in files if matches(f)] + + +def render_page_template(name, route_data=None, js_bundle_name=DEFAULT_JS_BUNDLE_NAME, **kwargs): + """ Renders the page template with the given name as the response and returns its contents. """ + main_scripts = _list_files('build', 'js', js_bundle_name) + + contents = render_template(name, + route_data=route_data, + main_scripts=main_scripts, + **kwargs) + + resp = make_response(contents) + resp.headers['X-FRAME-OPTIONS'] = 'DENY' + return resp + + +def fully_qualified_name(method_view_class): + return '%s.%s' % (method_view_class.__module__, method_view_class.__name__) + + diff --git a/config_app/config_endpoints/exception.py b/config_app/config_endpoints/exception.py new file mode 100644 index 000000000..33cb161d2 --- /dev/null +++ b/config_app/config_endpoints/exception.py @@ -0,0 +1,67 @@ +from enum import Enum + +from flask import url_for +from werkzeug.exceptions import HTTPException + + +class ApiErrorType(Enum): + invalid_request = 'invalid_request' + + +class ApiException(HTTPException): + """ + Represents an error in the application/problem+json format. + + See: https://tools.ietf.org/html/rfc7807 + + - "type" (string) - A URI reference that identifies the + problem type. + + - "title" (string) - A short, human-readable summary of the problem + type. It SHOULD NOT change from occurrence to occurrence of the + problem, except for purposes of localization + + - "status" (number) - The HTTP status code + + - "detail" (string) - A human-readable explanation specific to this + occurrence of the problem. + + - "instance" (string) - A URI reference that identifies the specific + occurrence of the problem. It may or may not yield further + information if dereferenced. + """ + + def __init__(self, error_type, status_code, error_description, payload=None): + Exception.__init__(self) + self.error_description = error_description + self.code = status_code + self.payload = payload + self.error_type = error_type + self.data = self.to_dict() + + super(ApiException, self).__init__(error_description, None) + + def to_dict(self): + rv = dict(self.payload or ()) + + if self.error_description is not None: + rv['detail'] = self.error_description + rv['error_message'] = self.error_description # TODO: deprecate + + rv['error_type'] = self.error_type.value # TODO: deprecate + rv['title'] = self.error_type.value + rv['type'] = url_for('api.error', error_type=self.error_type.value, _external=True) + rv['status'] = self.code + + return rv + + + +class InvalidRequest(ApiException): + def __init__(self, error_description, payload=None): + ApiException.__init__(self, ApiErrorType.invalid_request, 400, error_description, payload) + + +class InvalidResponse(ApiException): + def __init__(self, error_description, payload=None): + ApiException.__init__(self, ApiErrorType.invalid_response, 400, error_description, payload) diff --git a/config_app/config_endpoints/setup_web.py b/config_app/config_endpoints/setup_web.py new file mode 100644 index 000000000..b9aba06c4 --- /dev/null +++ b/config_app/config_endpoints/setup_web.py @@ -0,0 +1,26 @@ +from flask import Blueprint +from cachetools import lru_cache + +from config_app.config_endpoints.common import render_page_template +from config_app.config_endpoints.api.discovery import generate_route_data +from config_app.config_endpoints.api import no_cache + + +setup_web = Blueprint('setup_web', __name__, template_folder='templates') + + +@lru_cache(maxsize=1) +def _get_route_data(): + return generate_route_data() + + +def render_page_template_with_routedata(name, *args, **kwargs): + return render_page_template(name, _get_route_data(), *args, **kwargs) + + +@no_cache +@setup_web.route('/', methods=['GET'], defaults={'path': ''}) +def index(path, **kwargs): + return render_page_template_with_routedata('index.html', js_bundle_name='configapp', **kwargs) + + diff --git a/config_app/config_util/__init__.py b/config_app/config_util/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/config_app/config_util/config/__init__.py b/config_app/config_util/config/__init__.py new file mode 100644 index 000000000..d32c159d8 --- /dev/null +++ b/config_app/config_util/config/__init__.py @@ -0,0 +1,12 @@ +from config_app.config_util.config.fileprovider import FileConfigProvider +from config_app.config_util.config.testprovider import TestConfigProvider + + +def get_config_provider(config_volume, yaml_filename, py_filename, testing=False): + """ Loads and returns the config provider for the current environment. """ + if testing: + return TestConfigProvider() + + return FileConfigProvider(config_volume, yaml_filename, py_filename) + + diff --git a/config_app/config_util/config/basefileprovider.py b/config_app/config_util/config/basefileprovider.py new file mode 100644 index 000000000..8929845c8 --- /dev/null +++ b/config_app/config_util/config/basefileprovider.py @@ -0,0 +1,71 @@ +import os +import logging + +from config_app.config_util.config.baseprovider import (BaseProvider, import_yaml, export_yaml, + CannotWriteConfigException) + +logger = logging.getLogger(__name__) + + +class BaseFileProvider(BaseProvider): + """ Base implementation of the config provider that reads the data from the file system. """ + def __init__(self, config_volume, yaml_filename, py_filename): + self.config_volume = config_volume + self.yaml_filename = yaml_filename + self.py_filename = py_filename + + self.yaml_path = os.path.join(config_volume, yaml_filename) + self.py_path = os.path.join(config_volume, py_filename) + + def update_app_config(self, app_config): + if os.path.exists(self.py_path): + logger.debug('Applying config file: %s', self.py_path) + app_config.from_pyfile(self.py_path) + + if os.path.exists(self.yaml_path): + logger.debug('Applying config file: %s', self.yaml_path) + import_yaml(app_config, self.yaml_path) + + def get_config(self): + if not self.config_exists(): + return None + + config_obj = {} + import_yaml(config_obj, self.yaml_path) + return config_obj + + def config_exists(self): + return self.volume_file_exists(self.yaml_filename) + + def volume_exists(self): + return os.path.exists(self.config_volume) + + def volume_file_exists(self, filename): + return os.path.exists(os.path.join(self.config_volume, filename)) + + def get_volume_file(self, filename, mode='r'): + return open(os.path.join(self.config_volume, filename), mode=mode) + + def get_volume_path(self, directory, filename): + return os.path.join(directory, filename) + + def list_volume_directory(self, path): + dirpath = os.path.join(self.config_volume, path) + if not os.path.exists(dirpath): + return None + + if not os.path.isdir(dirpath): + return None + + return os.listdir(dirpath) + + def requires_restart(self, app_config): + file_config = self.get_config() + if not file_config: + return False + + for key in file_config: + if app_config.get(key) != file_config[key]: + return True + + return False diff --git a/config_app/config_util/config/baseprovider.py b/config_app/config_util/config/baseprovider.py new file mode 100644 index 000000000..5a616895f --- /dev/null +++ b/config_app/config_util/config/baseprovider.py @@ -0,0 +1,128 @@ +import logging +import yaml + +from abc import ABCMeta, abstractmethod +from six import add_metaclass + +from jsonschema import validate, ValidationError + +from util.config.schema import CONFIG_SCHEMA + +logger = logging.getLogger(__name__) + + +class CannotWriteConfigException(Exception): + """ Exception raised when the config cannot be written. """ + pass + + +class SetupIncompleteException(Exception): + """ Exception raised when attempting to verify config that has not yet been setup. """ + pass + + +def import_yaml(config_obj, config_file): + with open(config_file) as f: + c = yaml.safe_load(f) + if not c: + logger.debug('Empty YAML config file') + return + + if isinstance(c, str): + raise Exception('Invalid YAML config file: ' + str(c)) + + for key in c.iterkeys(): + if key.isupper(): + config_obj[key] = c[key] + + if config_obj.get('SETUP_COMPLETE', True): + try: + validate(config_obj, CONFIG_SCHEMA) + except ValidationError: + # TODO: Change this into a real error + logger.exception('Could not validate config schema') + else: + logger.debug('Skipping config schema validation because setup is not complete') + + return config_obj + + +def get_yaml(config_obj): + return yaml.safe_dump(config_obj, encoding='utf-8', allow_unicode=True) + + +def export_yaml(config_obj, config_file): + try: + with open(config_file, 'w') as f: + f.write(get_yaml(config_obj)) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + +@add_metaclass(ABCMeta) +class BaseProvider(object): + """ A configuration provider helps to load, save, and handle config override in the application. + """ + + @property + def provider_id(self): + raise NotImplementedError + + @abstractmethod + def update_app_config(self, app_config): + """ Updates the given application config object with the loaded override config. """ + + @abstractmethod + def get_config(self): + """ Returns the contents of the config override file, or None if none. """ + + @abstractmethod + def save_config(self, config_object): + """ Updates the contents of the config override file to those given. """ + + @abstractmethod + def config_exists(self): + """ Returns true if a config override file exists in the config volume. """ + + @abstractmethod + def volume_exists(self): + """ Returns whether the config override volume exists. """ + + @abstractmethod + def volume_file_exists(self, filename): + """ Returns whether the file with the given name exists under the config override volume. """ + + @abstractmethod + def get_volume_file(self, filename, mode='r'): + """ Returns a Python file referring to the given name under the config override volume. """ + + @abstractmethod + def write_volume_file(self, filename, contents): + """ Writes the given contents to the config override volumne, with the given filename. """ + + @abstractmethod + def remove_volume_file(self, filename): + """ Removes the config override volume file with the given filename. """ + + @abstractmethod + def list_volume_directory(self, path): + """ Returns a list of strings representing the names of the files found in the config override + directory under the given path. If the path doesn't exist, returns None. + """ + + @abstractmethod + def save_volume_file(self, filename, flask_file): + """ Saves the given flask file to the config override volume, with the given + filename. + """ + + @abstractmethod + def requires_restart(self, app_config): + """ If true, the configuration loaded into memory for the app does not match that on disk, + indicating that this container requires a restart. + """ + + @abstractmethod + def get_volume_path(self, directory, filename): + """ Helper for constructing file paths, which may differ between providers. For example, + kubernetes can't have subfolders in configmaps """ diff --git a/config_app/config_util/config/fileprovider.py b/config_app/config_util/config/fileprovider.py new file mode 100644 index 000000000..385fe501f --- /dev/null +++ b/config_app/config_util/config/fileprovider.py @@ -0,0 +1,60 @@ +import os +import logging + +from config_app.config_util.config.baseprovider import export_yaml, CannotWriteConfigException +from config_app.config_util.config.basefileprovider import BaseFileProvider + + +logger = logging.getLogger(__name__) + + +def _ensure_parent_dir(filepath): + """ Ensures that the parent directory of the given file path exists. """ + try: + parentpath = os.path.abspath(os.path.join(filepath, os.pardir)) + if not os.path.isdir(parentpath): + os.makedirs(parentpath) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + +class FileConfigProvider(BaseFileProvider): + """ Implementation of the config provider that reads and writes the data + from/to the file system. """ + def __init__(self, config_volume, yaml_filename, py_filename): + super(FileConfigProvider, self).__init__(config_volume, yaml_filename, py_filename) + + @property + def provider_id(self): + return 'file' + + def save_config(self, config_obj): + export_yaml(config_obj, self.yaml_path) + + def write_volume_file(self, filename, contents): + filepath = os.path.join(self.config_volume, filename) + _ensure_parent_dir(filepath) + + try: + with open(filepath, mode='w') as f: + f.write(contents) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + return filepath + + def remove_volume_file(self, filename): + filepath = os.path.join(self.config_volume, filename) + os.remove(filepath) + + def save_volume_file(self, filename, flask_file): + filepath = os.path.join(self.config_volume, filename) + _ensure_parent_dir(filepath) + + # Write the file. + try: + flask_file.save(filepath) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + return filepath diff --git a/config_app/config_util/config/testprovider.py b/config_app/config_util/config/testprovider.py new file mode 100644 index 000000000..32e0127c8 --- /dev/null +++ b/config_app/config_util/config/testprovider.py @@ -0,0 +1,81 @@ +import json +import io +import os +from datetime import datetime, timedelta + +from config_app.config_util.config.baseprovider import BaseProvider + +REAL_FILES = ['test/data/signing-private.gpg', 'test/data/signing-public.gpg', 'test/data/test.pem'] + + +class TestConfigProvider(BaseProvider): + """ Implementation of the config provider for testing. Everything is kept in-memory instead on + the real file system. """ + def __init__(self): + self.clear() + + def clear(self): + self.files = {} + self._config = {} + + @property + def provider_id(self): + return 'test' + + def update_app_config(self, app_config): + self._config = app_config + + def get_config(self): + if not 'config.yaml' in self.files: + return None + + return json.loads(self.files.get('config.yaml', '{}')) + + def save_config(self, config_obj): + self.files['config.yaml'] = json.dumps(config_obj) + + def config_exists(self): + return 'config.yaml' in self.files + + def volume_exists(self): + return True + + def volume_file_exists(self, filename): + if filename in REAL_FILES: + return True + + return filename in self.files + + def save_volume_file(self, filename, flask_file): + self.files[filename] = flask_file.read() + + def write_volume_file(self, filename, contents): + self.files[filename] = contents + + def get_volume_file(self, filename, mode='r'): + if filename in REAL_FILES: + return open(filename, mode=mode) + + return io.BytesIO(self.files[filename]) + + def remove_volume_file(self, filename): + self.files.pop(filename, None) + + def list_volume_directory(self, path): + paths = [] + for filename in self.files: + if filename.startswith(path): + paths.append(filename[len(path)+1:]) + + return paths + + def requires_restart(self, app_config): + return False + + def reset_for_test(self): + self._config['SUPER_USERS'] = ['devtable'] + self.files = {} + + def get_volume_path(self, directory, filename): + return os.path.join(directory, filename) + diff --git a/config_app/config_util/log.py b/config_app/config_util/log.py new file mode 100644 index 000000000..9d91b3d68 --- /dev/null +++ b/config_app/config_util/log.py @@ -0,0 +1,47 @@ +import os +from config_app._init_config import CONF_DIR + + +def logfile_path(jsonfmt=False, debug=False): + """ + Returns the a logfileconf path following this rules: + - conf/logging_debug_json.conf # jsonfmt=true, debug=true + - conf/logging_json.conf # jsonfmt=true, debug=false + - conf/logging_debug.conf # jsonfmt=false, debug=true + - conf/logging.conf # jsonfmt=false, debug=false + Can be parametrized via envvars: JSONLOG=true, DEBUGLOG=true + """ + _json = "" + _debug = "" + + if jsonfmt or os.getenv('JSONLOG', 'false').lower() == 'true': + _json = "_json" + + if debug or os.getenv('DEBUGLOG', 'false').lower() == 'true': + _debug = "_debug" + + return os.path.join(CONF_DIR, "logging%s%s.conf" % (_debug, _json)) + + +def filter_logs(values, filtered_fields): + """ + Takes a dict and a list of keys to filter. + eg: + with filtered_fields: + [{'key': ['k1', k2'], 'fn': lambda x: 'filtered'}] + and values: + {'k1': {'k2': 'some-secret'}, 'k3': 'some-value'} + the returned dict is: + {'k1': {k2: 'filtered'}, 'k3': 'some-value'} + """ + for field in filtered_fields: + cdict = values + + for key in field['key'][:-1]: + if key in cdict: + cdict = cdict[key] + + last_key = field['key'][-1] + + if last_key in cdict and cdict[last_key]: + cdict[last_key] = field['fn'](cdict[last_key]) diff --git a/config_app/config_util/ssl.py b/config_app/config_util/ssl.py new file mode 100644 index 000000000..f14d2c04e --- /dev/null +++ b/config_app/config_util/ssl.py @@ -0,0 +1,81 @@ +from fnmatch import fnmatch + +import OpenSSL + +class CertInvalidException(Exception): + """ Exception raised when a certificate could not be parsed/loaded. """ + pass + +class KeyInvalidException(Exception): + """ Exception raised when a key could not be parsed/loaded or successfully applied to a cert. """ + pass + + +def load_certificate(cert_contents): + """ Loads the certificate from the given contents and returns it or raises a CertInvalidException + on failure. + """ + try: + cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_contents) + return SSLCertificate(cert) + except OpenSSL.crypto.Error as ex: + raise CertInvalidException(ex.message[0][2]) + + +_SUBJECT_ALT_NAME = 'subjectAltName' + +class SSLCertificate(object): + """ Helper class for easier working with SSL certificates. """ + def __init__(self, openssl_cert): + self.openssl_cert = openssl_cert + + def validate_private_key(self, private_key_path): + """ Validates that the private key found at the given file path applies to this certificate. + Raises a KeyInvalidException on failure. + """ + context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD) + context.use_certificate(self.openssl_cert) + + try: + context.use_privatekey_file(private_key_path) + context.check_privatekey() + except OpenSSL.SSL.Error as ex: + raise KeyInvalidException(ex.message[0][2]) + + def matches_name(self, check_name): + """ Returns true if this SSL certificate matches the given DNS hostname. """ + for dns_name in self.names: + if fnmatch(check_name, dns_name): + return True + + return False + + @property + def expired(self): + """ Returns whether the SSL certificate has expired. """ + return self.openssl_cert.has_expired() + + @property + def common_name(self): + """ Returns the defined common name for the certificate, if any. """ + return self.openssl_cert.get_subject().commonName + + @property + def names(self): + """ Returns all the DNS named to which the certificate applies. May be empty. """ + dns_names = set() + common_name = self.common_name + if common_name is not None: + dns_names.add(common_name) + + # Find the DNS extension, if any. + for i in range(0, self.openssl_cert.get_extension_count()): + ext = self.openssl_cert.get_extension(i) + if ext.get_short_name() == _SUBJECT_ALT_NAME: + value = str(ext) + for san_name in value.split(','): + san_name_trimmed = san_name.strip() + if san_name_trimmed.startswith('DNS:'): + dns_names.add(san_name_trimmed[4:]) + + return dns_names diff --git a/config_app/config_web.py b/config_app/config_web.py new file mode 100644 index 000000000..487f8b78e --- /dev/null +++ b/config_app/config_web.py @@ -0,0 +1,8 @@ +from config_app.c_app import app as application +from config_app.config_endpoints.api import api_bp +from config_app.config_endpoints.setup_web import setup_web + + +application.register_blueprint(setup_web) +application.register_blueprint(api_bp, url_prefix='/api') + diff --git a/config_app/init/service/gunicorn_web/run b/config_app/init/service/gunicorn_web/run new file mode 100755 index 000000000..1fdf1870e --- /dev/null +++ b/config_app/init/service/gunicorn_web/run @@ -0,0 +1,11 @@ +#! /bin/bash + +echo 'Starting gunicon' + +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYDIR/config_app/conf/gunicorn_web.py config_application:application + +echo 'Gunicorn exited' \ No newline at end of file diff --git a/config_app/js/components/cor-progress/cor-progress-bar.html b/config_app/js/components/cor-progress/cor-progress-bar.html new file mode 100644 index 000000000..6ccd75fe3 --- /dev/null +++ b/config_app/js/components/cor-progress/cor-progress-bar.html @@ -0,0 +1,4 @@ +
+
+
\ No newline at end of file diff --git a/config_app/js/components/cor-progress/cor-progress-components.js b/config_app/js/components/cor-progress/cor-progress-components.js new file mode 100644 index 000000000..a8bc9b3b9 --- /dev/null +++ b/config_app/js/components/cor-progress/cor-progress-components.js @@ -0,0 +1,74 @@ + + +const corStepBarUrl = require('./cor-step-bar.html'); +const corStepUrl = require('./cor-step.html'); +const corProgressBarUrl = require('./cor-progress-bar.html'); + +angular.module('quay-config') + .directive('corStepBar', () => { + const directiveDefinitionObject = { + priority: 4, + templateUrl: corStepBarUrl, + replace: true, + transclude: true, + restrict: 'C', + scope: { + 'progress': '=progress' + }, + controller: function($rootScope, $scope, $element) { + $scope.$watch('progress', function(progress) { + if (!progress) { return; } + + var index = 0; + for (var i = 0; i < progress.length; ++i) { + if (progress[i]) { + index = i; + } + } + + $element.find('.transclude').children('.co-step-element').each(function(i, elem) { + $(elem).removeClass('active'); + if (i <= index) { + $(elem).addClass('active'); + } + }); + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('corStep', function() { + var directiveDefinitionObject = { + priority: 4, + templateUrl: corStepUrl, + replace: true, + transclude: false, + requires: '^corStepBar', + restrict: 'C', + scope: { + 'icon': '@icon', + 'title': '@title', + 'text': '@text' + }, + controller: function($rootScope, $scope, $element) { + } + }; + return directiveDefinitionObject; + }) + + .directive('corProgressBar', function() { + var directiveDefinitionObject = { + priority: 4, + templateUrl: corProgressBarUrl, + replace: true, + transclude: true, + restrict: 'C', + scope: { + 'progress': '=progress' + }, + controller: function($rootScope, $scope, $element) { + } + }; + return directiveDefinitionObject; + }); diff --git a/config_app/js/components/cor-progress/cor-step-bar.html b/config_app/js/components/cor-progress/cor-step-bar.html new file mode 100644 index 000000000..117f8185d --- /dev/null +++ b/config_app/js/components/cor-progress/cor-step-bar.html @@ -0,0 +1,3 @@ +
+ +
diff --git a/config_app/js/components/cor-progress/cor-step.html b/config_app/js/components/cor-progress/cor-step.html new file mode 100644 index 000000000..acc9baee4 --- /dev/null +++ b/config_app/js/components/cor-progress/cor-step.html @@ -0,0 +1,6 @@ + + + {{ text }} + + + diff --git a/config_app/js/components/file-upload-box.html b/config_app/js/components/file-upload-box.html new file mode 100644 index 000000000..65cdf9d6c --- /dev/null +++ b/config_app/js/components/file-upload-box.html @@ -0,0 +1,46 @@ +
+
+
+
+ + +
+
+ +
+ +
+
+
+
+
+ + Uploading file {{ currentlyUploadingFile.name }}... +
+ +
{{ selectMessage }}
+
+ + {{ message }} +
+
+ + {{ message }} +
+
+
\ No newline at end of file diff --git a/config_app/js/components/file-upload-box.js b/config_app/js/components/file-upload-box.js new file mode 100644 index 000000000..2a48b06d0 --- /dev/null +++ b/config_app/js/components/file-upload-box.js @@ -0,0 +1,173 @@ +const templateUrl = require('./file-upload-box.html'); +/** + * An element which adds a stylize box for uploading a file. + */ +angular.module('quay-config').directive('fileUploadBox', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl, + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'selectMessage': '@selectMessage', + + 'filesSelected': '&filesSelected', + 'filesCleared': '&filesCleared', + 'filesValidated': '&filesValidated', + + 'extensions': '= $scope.selectedFiles.length) { + callback(true, fileIds); + return; + } + + // For the current file, retrieve a file-drop URL from the API for the file. + var currentFile = $scope.selectedFiles[currentIndex]; + var mimeType = currentFile.type || 'application/octet-stream'; + var data = { + 'mimeType': mimeType + }; + + $scope.currentlyUploadingFile = currentFile; + $scope.uploadProgress = 0; + + ApiService.getFiledropUrl(data).then(function(resp) { + // Perform the upload. + conductUpload(currentFile, resp.url, resp.file_id, mimeType, progressCb, doneCb); + }, function() { + callback(false, 'Could not retrieve upload URL'); + }); + }; + + // Start the uploading. + $scope.state = 'uploading'; + performFileUpload(); + }; + + $scope.handleFilesChanged = function(files) { + if ($scope.state == 'uploading') { return; } + + $scope.message = null; + $scope.selectedFiles = files; + + if (files.length == 0) { + $scope.state = 'clear'; + $scope.filesCleared(); + } else { + for (var i = 0; i < files.length; ++i) { + if (files[i].size > MAX_FILE_SIZE) { + $scope.state = 'error'; + $scope.message = 'File ' + files[i].name + ' is larger than the maximum file ' + + 'size of ' + MAX_FILE_SIZE_MB + ' MB'; + return; + } + } + + $scope.state = 'checking'; + $scope.filesSelected({ + 'files': files, + 'callback': function(status, message) { + $scope.state = status ? 'okay' : 'error'; + $scope.message = message; + + if (status) { + $scope.filesValidated({ + 'files': files, + 'uploadFiles': uploadFiles + }); + } + } + }); + } + }; + + $scope.getAccepts = function(extensions) { + if (!extensions || !extensions.length) { + return '*'; + } + + return extensions.join(','); + }; + + $scope.$watch('reset', function(reset) { + if (reset) { + $scope.state = 'clear'; + $element.find('#file-drop-' + $scope.boxId).parent().trigger('reset'); + } + }); + } + }; + return directiveDefinitionObject; +}); \ No newline at end of file diff --git a/config_app/js/config-app.module.ts b/config_app/js/config-app.module.ts new file mode 100644 index 000000000..3f9439c42 --- /dev/null +++ b/config_app/js/config-app.module.ts @@ -0,0 +1,43 @@ +import { NgModule } from 'ng-metadata/core'; +import * as restangular from 'restangular'; + +const quayDependencies: string[] = [ + 'restangular', + 'ngCookies', + 'angularFileUpload', + 'ngSanitize' +]; + +@NgModule(({ + imports: quayDependencies, + declarations: [], + providers: [ + provideConfig, + ] +})) +class DependencyConfig{} + + +provideConfig.$inject = [ + '$provide', + '$injector', + '$compileProvider', + 'RestangularProvider', +]; + +function provideConfig($provide: ng.auto.IProvideService, + $injector: ng.auto.IInjectorService, + $compileProvider: ng.ICompileProvider, + RestangularProvider: any): void { + + // Configure the API provider. + RestangularProvider.setBaseUrl('/api/v1/'); +} + + +@NgModule({ + imports: [ DependencyConfig ], + declarations: [], + providers: [] +}) +export class ConfigAppModule {} diff --git a/config_app/js/config-field-templates/config-bool-field.html b/config_app/js/config-field-templates/config-bool-field.html new file mode 100644 index 000000000..190698290 --- /dev/null +++ b/config_app/js/config-field-templates/config-bool-field.html @@ -0,0 +1,8 @@ +
+
+ +
+
diff --git a/config_app/js/config-field-templates/config-certificates-field.html b/config_app/js/config-field-templates/config-certificates-field.html new file mode 100644 index 000000000..f20e4c459 --- /dev/null +++ b/config_app/js/config-field-templates/config-certificates-field.html @@ -0,0 +1,76 @@ +
+
+ +
+ extra_ca_certs is a single file and cannot be processed by this tool. If a valid and appended list of certificates, they will be installed on container startup. +
+ +
+
+

This section lists any custom or self-signed SSL certificates that are installed in the container on startup after being read from the extra_ca_certs directory in the configuration volume. +

+

+ Custom certificates are typically used in place of publicly signed certificates for corporate-internal services. +

+

Please make sure that all custom names used for downstream services (such as Clair) are listed in the certificates below.

+
+ + + + + + +
Upload certificates: +
+
+ + + + + + + + + + + + + + +
Certificate FilenameStatusNames Handled
{{ certificate.path }} +
+ + Error: {{ certificate.error }} +
+
+ + Certificate is expired +
+
+ + Certificate is valid +
+
+
(None)
+ {{ name }} +
+ + + Delete Certificate + + +
+
+
+ Uploading, validating and updating certificate(s) +
+
+
No custom certificates found.
+
+
+
+
\ No newline at end of file diff --git a/config_app/js/config-field-templates/config-contact-field.html b/config_app/js/config-field-templates/config-contact-field.html new file mode 100644 index 000000000..58cdea0c4 --- /dev/null +++ b/config_app/js/config-field-templates/config-contact-field.html @@ -0,0 +1,46 @@ +
+ + + + + +
+ + +
+ +
+
+
diff --git a/config_app/js/config-field-templates/config-contacts-field.html b/config_app/js/config-field-templates/config-contacts-field.html new file mode 100644 index 000000000..40762934c --- /dev/null +++ b/config_app/js/config-field-templates/config-contacts-field.html @@ -0,0 +1,4 @@ +
+
+
+
diff --git a/config_app/js/config-field-templates/config-file-field.html b/config_app/js/config-field-templates/config-file-field.html new file mode 100644 index 000000000..11c4227f7 --- /dev/null +++ b/config_app/js/config-field-templates/config-file-field.html @@ -0,0 +1,13 @@ +
+ + + /conf/stack/{{ filename }} + Select a replacement file: + + Please select a file to upload as {{ filename }}: + + + + Uploading file as {{ filename }}... {{ uploadProgress }}% + +
diff --git a/config_app/js/config-field-templates/config-list-field.html b/config_app/js/config-field-templates/config-list-field.html new file mode 100644 index 000000000..9918e9a07 --- /dev/null +++ b/config_app/js/config-field-templates/config-list-field.html @@ -0,0 +1,17 @@ +
+
    +
  • + {{ item }} + + Remove + +
  • +
+ No {{ itemTitle }}s defined +
+ + +
+
diff --git a/config_app/js/config-field-templates/config-map-field.html b/config_app/js/config-field-templates/config-map-field.html new file mode 100644 index 000000000..84f086052 --- /dev/null +++ b/config_app/js/config-field-templates/config-map-field.html @@ -0,0 +1,20 @@ +
+ + + + + + +
{{ key }}{{ value }} + Remove +
+ No entries defined +
+ Add Key-Value: + + + +
+
diff --git a/config_app/js/config-field-templates/config-numeric-field.html b/config_app/js/config-field-templates/config-numeric-field.html new file mode 100644 index 000000000..8c25a2fea --- /dev/null +++ b/config_app/js/config-field-templates/config-numeric-field.html @@ -0,0 +1,6 @@ +
+
+ +
+
diff --git a/config_app/js/config-field-templates/config-parsed-field.html b/config_app/js/config-field-templates/config-parsed-field.html new file mode 100644 index 000000000..766b0a8a2 --- /dev/null +++ b/config_app/js/config-field-templates/config-parsed-field.html @@ -0,0 +1 @@ +
diff --git a/config_app/js/config-field-templates/config-service-key-field.html b/config_app/js/config-field-templates/config-service-key-field.html new file mode 100644 index 000000000..52b7c1187 --- /dev/null +++ b/config_app/js/config-field-templates/config-service-key-field.html @@ -0,0 +1,29 @@ +
+ +
+ + +
+ Could not load service keys +
+ + +
+
+ + Valid key for service {{ serviceName }} exists +
+
+ No valid key found for service {{ serviceName }} + Create Key +
+
+ + + + +
+
diff --git a/config_app/js/config-field-templates/config-setup-tool.html b/config_app/js/config-field-templates/config-setup-tool.html new file mode 100644 index 000000000..629e3b45f --- /dev/null +++ b/config_app/js/config-field-templates/config-setup-tool.html @@ -0,0 +1,1656 @@ +
+
+
+
+ + +
+
+ Custom SSL Certificates +
+
+
+
+
+ + +
+
+ Basic Configuration +
+
+ + + + + + + + + + +
Enterprise Logo URL: + +
+ Enter the full URL to your company's logo. +
+
+ +
Contact Information: + +
+ Information to show in the Contact Page. If none specified, CoreOS contact information + is displayed. +
+
+
+
+ + +
+
+ Server Configuration +
+
+ + + + + + + + + +
Server Hostname: + +
+ The HTTP host (and optionally the port number if a non-standard HTTP/HTTPS port) of the location + where the registry will be accessible on the network +
+
TLS: + + +
+ Running without TLS should not be used for production workloads! +
+ +
+ Terminating TLS outside of Quay Enterprise can result in unusual behavior if the external load balancer is not + configured properly. This option is not recommended for simple setups. Please contact support + if you encounter problems while using this option. +
+ +
+ Enabling TLS also enables HTTP Strict Transport Security.
+ This prevents downgrade attacks and cookie theft, but browsers will reject all future insecure connections on this hostname. +
+ + + + + + + + + + +
Certificate: + +
+ The certificate must be in PEM format. +
+
Private key: + +
+
+ +
+
+ + +
+
+ Data Consistency Settings +
+
+
+

Relax constraints on consistency guarantees for specific operations + to enable higher performance and availability. +

+
+ + + + +
+
+ Allow repository pulls even if audit logging fails. +
+ If enabled, failures to write to the audit log will fallback from + the database to the standard logger for registry pulls. +
+
+
+
+
+ + +
+
+ Time Machine +
+
+
+

Time machine keeps older copies of tags within a repository for the configured period + of time, after which they are garbage collected. This allows users to + revert tags to older images in case they accidentally pushed a broken image. It is + highly recommended to have time machine enabled, but it does take a bit more space + in storage. +

+
+ + + + + + + + + + + + + + +
Allowed expiration periods: + +
+ The expiration periods allowed for configuration. The default tag expiration *must* be in this list. +
+
Default expiration period: + +
+ The default tag expiration period for all namespaces (users and organizations). Must be expressed in a duration string form: 30m, 1h, 1d, 2w. +
+
Allow users to select expiration: +
+ Enable Expiration Configuration +
+ If enabled, users will be able to select the tag expiration duration for the namespace(s) they + administrate, from the configured list of options. +
+
+
+
+
+ + +
+
+ redis +
+
+
+

A redis key-value store is required for real-time events and build logs.

+
+ + + + + + + + + + + + + + +
Redis Hostname: + +
Redis port: + +
+ Access to this port and hostname must be allowed from all hosts running + the enterprise registry +
+
Redis password: + +
+
+
+ + +
+
+ Registry Storage +
+
+
+

+ Registry images can be stored either locally or in a remote storage system. + A remote storage system is required for high-availability systems. +

+ +
+ Enable Storage Replication +
+ If enabled, replicates storage to other regions. See documentation for more information. +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Location ID: + +
+ {{ sc.location }} +
+
+ {{ storageConfigError[$index].location }} +
+ +
Set Default: +
+ Replicate to storage engine by default +
+
Storage Engine: + + +
+ {{ storageConfigError[$index].engine }} +
+
{{ field.title }}: + + + + {{ field.placeholder }} + + +
+ +
+
+ {{ field.help_text }} +
+
+ See Documentation for more information +
+
+
+ + +
+
+
+ + +
+
+ Action Log Rotation and Archiving +
+
+
+

+ All actions performed in are automatically logged. These logs are stored in a database table, which can become quite large. + Enabling log rotation and archiving will move all logs older than 30 days into storage. +

+
+
+ Enable Action Log Rotation +
+ + + + + + + + + + +
Storage location: + +
+ The storage location in which to place archived action logs. Logs will only be archived to this single location. +
+
Storage path: + +
+ The path under the configured storage engine in which to place the archived logs in JSON form. +
+
+
+ + +
+
+ Security Scanner +
+
+
+

If enabled, all images pushed to Quay will be scanned via the external security scanning service, with vulnerability information available in the UI and API, as well + as async notification support. +

+
+ +
+ Enable Security Scanning +
+
+ A scanner compliant with the Quay Security Scanning API must be running to use this feature. Documentation on running Clair can be found at Running Clair Security Scanner. +
+ + + + + + + + + + +
Authentication Key: + +
+ The security scanning service requires an authorized service key to speak to Quay. Once setup, the key + can be managed in the Service Keys panel under the Super User Admin Panel. +
+
Security Scanner Endpoint: + +
+ The HTTP URL at which the security scanner is running. +
+
+ Is the security scanner behind a domain signed with a self-signed TLS certificate? If so, please make sure to register your SSL CA in the custom certificates panel above. +
+
+
+
+ + +
+
+ Application Registry +
+
+
+

If enabled, an additional registry API will be available for managing applications (Kubernetes manifests, Helm charts) via the App Registry specification. A great place to get started is to install the Helm Registry Plugin. +

+ +
+ Enable App Registry +
+
+
+ + +
+
+ BitTorrent-based download +
+
+
+

If enabled, all images in the registry can be downloaded using the quayctl tool via the BitTorrent protocol. A JWT-compatible BitTorrent tracker such as Chihaya must be run. +

+ +
+ Enable BitTorrent downloads +
+ + + + + + +
Announce URL: + +
+ The HTTP URL at which the torrents should be announced. A JWT-compatible tracker such as Chihaya must be run to ensure proper security. Documentation on running Chihaya with + this support can be found at Running Chihaya for Quay Enterprise. +
+
+
+
+ + +
+
+ rkt Conversion +
+
+
+

If enabled, all images in the registry can be fetched via rkt fetch or any other AppC discovery-compliant implementation.

+
+ +
+ Enable ACI Conversion +
+ +
+ Documentation on generating these keys can be found at Generating ACI Signing Keys. +
+ + + + + + + + + + + + + + +
GPG2 Public Key File: + +
+ The certificate must be in PEM format. +
+
GPG2 Private Key File: + +
GPG2 Private Key Name: + +
+
+
+ + +
+
+ E-mail +
+
+
+

Valid e-mail server configuration is required for notification e-mails and the ability of + users to reset their passwords.

+
+ +
+ Enable E-mails +
+ + + + + + + + + + + + + + + + + + + + + + + +
SMTP Server: + > +
SMTP Server Port: + +
TLS: +
+ Require TLS +
+
Mail Sender: + +
+ E-mail address from which all e-mails are sent. If not specified, + support@quay.io will be used. +
+
Authentication: +
+ Requires Authentication +
+ + + + + + + + + + +
Username: + +
Password: + +
+
+
+
+ + +
+
+ Internal Authentication +
+
+
+

+ Authentication for the registry can be handled by either the registry itself, LDAP, Keystone, or external JWT endpoint. +

+

+ Additional external authentication providers (such as GitHub) can be used in addition for login into the UI. +

+
+ +
+
+ It is highly recommended to require encrypted client passwords. External passwords used in the Docker client will be stored in plaintext! + Enable this requirement now. +
+ +
+ Note: The "Require Encrypted Client Passwords" feature is currently enabled which will + prevent passwords from being saved as plaintext by the Docker client. +
+
+ + + + + + + + + + + + + + + + + + + +
Authentication: + +
Team synchronization: +
+ Enable Team Synchronization Support +
+
+ If enabled, organization administrators who are also superusers can set teams to have their membership synchronized with a backing group in {{ config.AUTHENTICATION_TYPE }}. +
+
Resynchronization duration: + +
+ The duration before a team must be re-synchronized. Must be expressed in a duration string form: 30m, 1h, 1d. +
+
Self-service team syncing setup: +
If enabled, this feature will allow *any organization administrator* to read the membership of any {{ config.AUTHENTICATION_TYPE }} group.
+
+ Allow non-superusers to enable and manage team syncing +
+
+ If enabled, non-superusers will be able to enable and manage team sycning on teams under organizations in which they are administrators. +
+
+ + + + + + + + + + + + + + + + + + + + + + + +
Keystone API Version: + +
Keystone Authentication URL: + +
+ The URL (starting with http or https) of the Keystone Server endpoint for auth. +
+
Keystone Administrator Username: + +
+ The username for the Keystone admin. +
+
Keystone Administrator Password: + +
+ The password for the Keystone admin. +
+
Keystone Administrator Tenant: + +
+ The tenant (project/group) that contains the administrator user. +
+
+ + +
+ JSON Web Token authentication allows your organization to provide an HTTP endpoint that + verifies user credentials on behalf of . +
+ Documentation + on the API required can be found here: https://github.com/coreos/jwt-auth-example. +
+ + + + + + + + + + + + + + + + + + + + + + +
Authentication Issuer: + +
+ The id of the issuer signing the JWT token. Must be unique to your organization. +
+
Public Key: + +
+ A certificate containing the public key portion of the key pair used to sign + the JSON Web Tokens. This file must be in PEM format. +
+
User Verification Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for verifying username and password credentials. +
+ +
+ Credentials will be sent in the Authorization header as Basic Auth, and this endpoint should return 200 OK on success (or a 4** otherwise). +
+
User Query Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for looking up + users based on a prefix query. This is optional. +
+ +
+ The prefix query will be sent as a query parameter with name query. +
+
User Lookup Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for looking up + a user by username or email address. +
+ +
+ The username or email address will be sent as a query parameter with name username. +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LDAP URI: + +
+ The full LDAP URI, including the ldap:// or ldaps:// prefix. +
+
Base DN: + +
+ A Distinguished Name path which forms the base path for looking up all LDAP records. +
+
+ Example: dc=my,dc=domain,dc=com +
+
User Relative DN: + +
+ A Distinguished Name path which forms the base path for looking up all user LDAP records, + relative to the Base DN defined above. +
+
+ Example: ou=employees +
+
Secondary User Relative DNs: + +
+ A list of Distinguished Name path(s) which forms the secondary base path(s) for + looking up all user LDAP records, relative to the Base DN defined above. These path(s) + will be tried if the user is not found via the primary relative DN. +
+
+ Example: [ou=employees] +
+
Administrator DN: +
+ The Distinguished Name for the Administrator account. This account must be able to login and view the records for all user accounts. +
+
+ Example: uid=admin,ou=employees,dc=my,dc=domain,dc=com +
+
Administrator DN Password: +
+ Note: This will be stored in + plaintext inside the config.yaml, so setting up a dedicated account or using + a password hash is highly recommended. +
+ +
+ The password for the Administrator DN. +
+
UID Attribute: + +
+ The name of the property field in your LDAP user records that stores your + users' username. Typically "uid". +
+
Mail Attribute: + +
+ The name of the property field in your LDAP user records that stores your + users' e-mail address(es). Typically "mail". +
+
Custom TLS Certificate: + +
+ If specified, the certificate (in PEM format) for the LDAP TLS connection. +
+
Allow insecure: +
+ Allow fallback to non-TLS connections +
+
+ If enabled, LDAP will fallback to insecure non-TLS connections if TLS does not succeed. +
+
+
+
+ +
+
+ External Authorization (OAuth) +
+
+ +
+
+ GitHub (Enterprise) Authentication +
+
+
+

+ If enabled, users can use GitHub or GitHub Enterprise to authenticate to the registry. +

+

+ Note: A registered GitHub (Enterprise) OAuth application is required. + View instructions on how to + + Create an OAuth Application in GitHub + +

+
+ +
+ Enable GitHub Authentication +
+ +
+ Warning: This provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
GitHub: + +
GitHub Endpoint: + + +
+ The GitHub Enterprise endpoint. Must start with http:// or https://. +
+
OAuth Client ID: + + +
OAuth Client Secret: + + +
Organization Filtering: +
+ Restrict By Organization Membership +
+ +
+ If enabled, only members of specified GitHub + Enterprise organizations will be allowed to login via GitHub + Enterprise. +
+ + + +
Binding Field: + +
+ If selected, when a user logs in via this provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this provider. This is not the recommended setup. +
+
+
+
+ + +
+
+ Google Authentication +
+
+
+

+ If enabled, users can use Google to authenticate to the registry. +

+

+ Note: A registered Google OAuth application is required. + Visit the + + Google Developer Console + + to register an application. +

+
+ +
+ Enable Google Authentication +
+ +
+ Warning: This provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + +
OAuth Client ID: + + +
OAuth Client Secret: + + +
Binding Field: + +
+ If selected, when a user logs in via this provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this provider. This is not the recommended setup. +
+
+
+
+ + +
+
+ + {{ config[provider]['SERVICE_NAME'] || (getOIDCProviderId(provider) + ' Authentication') }} + (Delete) +
+
+
+ Warning: This OIDC provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Service ID: + {{ getOIDCProviderId(provider) }} +
OIDC Server: + + +
+ The URL of an OIDC-compliant server. +
+
Client ID: + +
Client Secret: + +
Service Name: + + +
+ The user friendly name to display for the service on the login page. +
+
Service Icon (optional): + + +
+ If specified, the icon to display for this login service on the login page. Can be either a URL to an icon or a CSS class name from Font Awesome +
+
Binding Field: + +
+ If selected, when a user logs in via this OIDC provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the OIDC provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this OIDC provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this OIDC provider. This is not the recommended setup. +
+
Login Scopes: + +
+ If specified, the scopes to send to the OIDC provider when performing the login flow. Note that, if specified, these scopes will + override those set by default, so this list must include a scope for OpenID Connect + (typically the openid scope) or this provider will fail. +
+
+
+

Callback URLs for this service:

+
    +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback
  • +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback/attach
  • +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback/cli
  • +
+
+
+
+ + + Add OIDC Provider + What is OIDC? +
+
+ + +
+
+ Access Settings +
+
+
+

Various settings around access and authentication to the registry.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Basic Credentials Login: +
+ Login to User Interface via credentials +
+
+
+ Login to User Interface via credentials must be enabled. Click here to enable. +
+
+ Login to User Interface via credentials is enabled (requires at least one OIDC provider to disable) +
+
+
+ If enabled, users will be able to login to the user interface via their username and password credentials. +
+
+ If disabled, users will only be able to login to the user interface via one of the configured External Authentication providers. +
+
External Application tokens +
+ Allow external application tokens +
+
+ If enabled, users will be able to generate external application tokens for use on the Docker and rkt CLI. Note + that these tokens will not be required unless "App Token" is chosen as the Internal Authentication method above. +
+
External application token expiration + +
+ The expiration time for user generated external application tokens. If none, tokens will never expire. +
+
Anonymous Access: +
+ Enable Anonymous Access +
+
+ If enabled, public repositories and search can be accessed by anyone that can + reach the registry, even if they are not authenticated. Disable to only allow + authenticated users to view and pull "public" resources. +
+
User Creation: +
+ Enable Open User Creation +
+
+ If enabled, user accounts can be created by anyone (unless restricted below to invited users). + Users can always be created in the users panel in this superuser tool, even if this feature is disabled. +
+
Invite-only User Creation: +
+ Enable Invite-only User Creation +
+
+ If enabled, user accounts can only be created when a user has been invited, by e-mail address, to join a team. + Users can always be created in the users panel in this superuser tool, even if this feature is enabled. +
+
Encrypted Client Password: +
+ Require Encrypted Client Passwords +
+
+ If enabled, users will not be able to login from the Docker command + line with a non-encrypted password and must generate an encrypted + password to use. +
+
+ This feature is highly recommended for setups with external authentication, as Docker currently stores passwords in plaintext on user's machines. +
+
Prefix username autocompletion: +
+ Allow prefix username autocompletion +
+
+ If disabled, autocompletion for users will only match on exact usernames. +
+
Team Invitations: +
+ Require Team Invitations +
+
+ If enabled, when adding a new user to a team, they will receive an invitation to join the team, with the option to decline. + Otherwise, users will be immediately part of a team when added by a team administrator. +
+
+
+
+ + +
+
+ Dockerfile Build Support +
+
+
+ If enabled, users can submit Dockerfiles to be built and pushed by . +
+ +
+ Enable Dockerfile Build +
+ +
+ Note: Build workers are required for this feature. + See Adding Build Workers for instructions on how to setup build workers. +
+
+
+ + +
+
+ GitHub (Enterprise) Build Triggers +
+
+
+

+ If enabled, users can setup GitHub or GitHub Enterprise triggers to invoke Registry builds. +

+

+ Note: A registered GitHub (Enterprise) OAuth application (separate from GitHub Authentication) is required. + View instructions on how to + + Create an OAuth Application in GitHub + +

+
+ +
+ Enable GitHub Triggers +
+ + + + + + + + + + + + + + + + + + +
GitHub: + +
GitHub Endpoint: + + +
+ The GitHub Enterprise endpoint. Must start with http:// or https://. +
+
OAuth Client ID: + + +
OAuth Client Secret: + + +
+
+
+ + +
+
+ BitBucket Build Triggers +
+
+
+

+ If enabled, users can setup BitBucket triggers to invoke Registry builds. +

+

+ Note: A registered BitBucket OAuth application is required. + View instructions on how to + + Create an OAuth Application in BitBucket + +

+
+ +
+ Enable BitBucket Triggers +
+ + + + + + + + + + +
OAuth Consumer Key: + + +
OAuth Consumer Secret: + + +
+
+
+ + +
+
+ GitLab Build Triggers +
+
+
+

+ If enabled, users can setup GitLab triggers to invoke Registry builds. +

+

+ Note: A registered GitLab OAuth application is required. + Visit the + + GitLab applications admin panel + + to create a new application. +

+

The callback URL to use is:   + {{ config.PREFERRED_URL_SCHEME || 'http' }}://{{ config.SERVER_HOSTNAME || 'localhost' }}/oauth2/gitlab/callback/trigger +

+
+ +
+ Enable GitLab Triggers +
+ + + + + + + + + + + + + + + + + + +
GitLab: + +
GitLab Endpoint: + + +
+ The GitLab Enterprise endpoint. Must start with http:// or https://. +
+
Application Id: + + +
Secret: + + +
+
+
+ + + + +
+ + +
+ + + + +
+
diff --git a/config_app/js/config-field-templates/config-string-field.html b/config_app/js/config-field-templates/config-string-field.html new file mode 100644 index 000000000..703891f89 --- /dev/null +++ b/config_app/js/config-field-templates/config-string-field.html @@ -0,0 +1,10 @@ +
+
+ +
+ {{ errorMessage }} +
+
+
diff --git a/config_app/js/config-field-templates/config-string-list-field.html b/config_app/js/config-field-templates/config-string-list-field.html new file mode 100644 index 000000000..de29dfb91 --- /dev/null +++ b/config_app/js/config-field-templates/config-string-list-field.html @@ -0,0 +1,6 @@ +
+
+ +
+
diff --git a/config_app/js/config-field-templates/config-variable-field.html b/config_app/js/config-field-templates/config-variable-field.html new file mode 100644 index 000000000..9236469cd --- /dev/null +++ b/config_app/js/config-field-templates/config-variable-field.html @@ -0,0 +1,10 @@ +
+
+ +
+ + +
diff --git a/config_app/js/core-config-setup/config-setup-tool.html b/config_app/js/core-config-setup/config-setup-tool.html new file mode 100644 index 000000000..ec3faa1c7 --- /dev/null +++ b/config_app/js/core-config-setup/config-setup-tool.html @@ -0,0 +1,1657 @@ +
+
+
+ +
+ + +
+
+ Custom SSL Certificates +
+
+
+
+
+ + +
+
+ Basic Configuration +
+
+ + + + + + + + + + +
Enterprise Logo URL: + +
+ Enter the full URL to your company's logo. +
+
+ +
Contact Information: + +
+ Information to show in the Contact Page. If none specified, CoreOS contact information + is displayed. +
+
+
+
+ + +
+
+ Server Configuration +
+
+ + + + + + + + + +
Server Hostname: + +
+ The HTTP host (and optionally the port number if a non-standard HTTP/HTTPS port) of the location + where the registry will be accessible on the network +
+
TLS: + + +
+ Running without TLS should not be used for production workloads! +
+ +
+ Terminating TLS outside of Quay Enterprise can result in unusual behavior if the external load balancer is not + configured properly. This option is not recommended for simple setups. Please contact support + if you encounter problems while using this option. +
+ +
+ Enabling TLS also enables HTTP Strict Transport Security.
+ This prevents downgrade attacks and cookie theft, but browsers will reject all future insecure connections on this hostname. +
+ + + + + + + + + + +
Certificate: + +
+ The certificate must be in PEM format. +
+
Private key: + +
+
+ +
+
+ + +
+
+ Data Consistency Settings +
+
+
+

Relax constraints on consistency guarantees for specific operations + to enable higher performance and availability. +

+
+ + + + +
+
+ Allow repository pulls even if audit logging fails. +
+ If enabled, failures to write to the audit log will fallback from + the database to the standard logger for registry pulls. +
+
+
+
+
+ + +
+
+ Time Machine +
+
+
+

Time machine keeps older copies of tags within a repository for the configured period + of time, after which they are garbage collected. This allows users to + revert tags to older images in case they accidentally pushed a broken image. It is + highly recommended to have time machine enabled, but it does take a bit more space + in storage. +

+
+ + + + + + + + + + + + + + +
Allowed expiration periods: + +
+ The expiration periods allowed for configuration. The default tag expiration *must* be in this list. +
+
Default expiration period: + +
+ The default tag expiration period for all namespaces (users and organizations). Must be expressed in a duration string form: 30m, 1h, 1d, 2w. +
+
Allow users to select expiration: +
+ Enable Expiration Configuration +
+ If enabled, users will be able to select the tag expiration duration for the namespace(s) they + administrate, from the configured list of options. +
+
+
+
+
+ + +
+
+ redis +
+
+
+

A redis key-value store is required for real-time events and build logs.

+
+ + + + + + + + + + + + + + +
Redis Hostname: + +
Redis port: + +
+ Access to this port and hostname must be allowed from all hosts running + the enterprise registry +
+
Redis password: + +
+
+
+ + +
+
+ Registry Storage +
+
+
+

+ Registry images can be stored either locally or in a remote storage system. + A remote storage system is required for high-availability systems. +

+ +
+ Enable Storage Replication +
+ If enabled, replicates storage to other regions. See documentation for more information. +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Location ID: + +
+ {{ sc.location }} +
+
+ {{ storageConfigError[$index].location }} +
+ +
Set Default: +
+ Replicate to storage engine by default +
+
Storage Engine: + + +
+ {{ storageConfigError[$index].engine }} +
+
{{ field.title }}: + + + + {{ field.placeholder }} + + +
+ +
+
+ {{ field.help_text }} +
+
+ See Documentation for more information +
+
+
+ + +
+
+
+ + +
+
+ Action Log Rotation and Archiving +
+
+
+

+ All actions performed in are automatically logged. These logs are stored in a database table, which can become quite large. + Enabling log rotation and archiving will move all logs older than 30 days into storage. +

+
+
+ Enable Action Log Rotation +
+ + + + + + + + + + +
Storage location: + +
+ The storage location in which to place archived action logs. Logs will only be archived to this single location. +
+
Storage path: + +
+ The path under the configured storage engine in which to place the archived logs in JSON form. +
+
+
+ + +
+
+ Security Scanner +
+
+
+

If enabled, all images pushed to Quay will be scanned via the external security scanning service, with vulnerability information available in the UI and API, as well + as async notification support. +

+
+ +
+ Enable Security Scanning +
+
+ A scanner compliant with the Quay Security Scanning API must be running to use this feature. Documentation on running Clair can be found at Running Clair Security Scanner. +
+ + + + + + + + + + +
Authentication Key: + +
+ The security scanning service requires an authorized service key to speak to Quay. Once setup, the key + can be managed in the Service Keys panel under the Super User Admin Panel. +
+
Security Scanner Endpoint: + +
+ The HTTP URL at which the security scanner is running. +
+
+ Is the security scanner behind a domain signed with a self-signed TLS certificate? If so, please make sure to register your SSL CA in the custom certificates panel above. +
+
+
+
+ + +
+
+ Application Registry +
+
+
+

If enabled, an additional registry API will be available for managing applications (Kubernetes manifests, Helm charts) via the App Registry specification. A great place to get started is to install the Helm Registry Plugin. +

+ +
+ Enable App Registry +
+
+
+ + +
+
+ BitTorrent-based download +
+
+
+

If enabled, all images in the registry can be downloaded using the quayctl tool via the BitTorrent protocol. A JWT-compatible BitTorrent tracker such as Chihaya must be run. +

+ +
+ Enable BitTorrent downloads +
+ + + + + + +
Announce URL: + +
+ The HTTP URL at which the torrents should be announced. A JWT-compatible tracker such as Chihaya must be run to ensure proper security. Documentation on running Chihaya with + this support can be found at Running Chihaya for Quay Enterprise. +
+
+
+
+ + +
+
+ rkt Conversion +
+
+
+

If enabled, all images in the registry can be fetched via rkt fetch or any other AppC discovery-compliant implementation.

+
+ +
+ Enable ACI Conversion +
+ +
+ Documentation on generating these keys can be found at Generating ACI Signing Keys. +
+ + + + + + + + + + + + + + +
GPG2 Public Key File: + +
+ The certificate must be in PEM format. +
+
GPG2 Private Key File: + +
GPG2 Private Key Name: + +
+
+
+ + +
+
+ E-mail +
+
+
+

Valid e-mail server configuration is required for notification e-mails and the ability of + users to reset their passwords.

+
+ +
+ Enable E-mails +
+ + + + + + + + + + + + + + + + + + + + + + + +
SMTP Server: + > +
SMTP Server Port: + +
TLS: +
+ Require TLS +
+
Mail Sender: + +
+ E-mail address from which all e-mails are sent. If not specified, + support@quay.io will be used. +
+
Authentication: +
+ Requires Authentication +
+ + + + + + + + + + +
Username: + +
Password: + +
+
+
+
+ + +
+
+ Internal Authentication +
+
+
+

+ Authentication for the registry can be handled by either the registry itself, LDAP, Keystone, or external JWT endpoint. +

+

+ Additional external authentication providers (such as GitHub) can be used in addition for login into the UI. +

+
+ +
+
+ It is highly recommended to require encrypted client passwords. External passwords used in the Docker client will be stored in plaintext! + Enable this requirement now. +
+ +
+ Note: The "Require Encrypted Client Passwords" feature is currently enabled which will + prevent passwords from being saved as plaintext by the Docker client. +
+
+ + + + + + + + + + + + + + + + + + + +
Authentication: + +
Team synchronization: +
+ Enable Team Synchronization Support +
+
+ If enabled, organization administrators who are also superusers can set teams to have their membership synchronized with a backing group in {{ config.AUTHENTICATION_TYPE }}. +
+
Resynchronization duration: + +
+ The duration before a team must be re-synchronized. Must be expressed in a duration string form: 30m, 1h, 1d. +
+
Self-service team syncing setup: +
If enabled, this feature will allow *any organization administrator* to read the membership of any {{ config.AUTHENTICATION_TYPE }} group.
+
+ Allow non-superusers to enable and manage team syncing +
+
+ If enabled, non-superusers will be able to enable and manage team sycning on teams under organizations in which they are administrators. +
+
+ + + + + + + + + + + + + + + + + + + + + + + +
Keystone API Version: + +
Keystone Authentication URL: + +
+ The URL (starting with http or https) of the Keystone Server endpoint for auth. +
+
Keystone Administrator Username: + +
+ The username for the Keystone admin. +
+
Keystone Administrator Password: + +
+ The password for the Keystone admin. +
+
Keystone Administrator Tenant: + +
+ The tenant (project/group) that contains the administrator user. +
+
+ + +
+ JSON Web Token authentication allows your organization to provide an HTTP endpoint that + verifies user credentials on behalf of . +
+ Documentation + on the API required can be found here: https://github.com/coreos/jwt-auth-example. +
+ + + + + + + + + + + + + + + + + + + + + + +
Authentication Issuer: + +
+ The id of the issuer signing the JWT token. Must be unique to your organization. +
+
Public Key: + +
+ A certificate containing the public key portion of the key pair used to sign + the JSON Web Tokens. This file must be in PEM format. +
+
User Verification Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for verifying username and password credentials. +
+ +
+ Credentials will be sent in the Authorization header as Basic Auth, and this endpoint should return 200 OK on success (or a 4** otherwise). +
+
User Query Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for looking up + users based on a prefix query. This is optional. +
+ +
+ The prefix query will be sent as a query parameter with name query. +
+
User Lookup Endpoint: + +
+ The URL (starting with http or https) on the JWT authentication server for looking up + a user by username or email address. +
+ +
+ The username or email address will be sent as a query parameter with name username. +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LDAP URI: + +
+ The full LDAP URI, including the ldap:// or ldaps:// prefix. +
+
Base DN: + +
+ A Distinguished Name path which forms the base path for looking up all LDAP records. +
+
+ Example: dc=my,dc=domain,dc=com +
+
User Relative DN: + +
+ A Distinguished Name path which forms the base path for looking up all user LDAP records, + relative to the Base DN defined above. +
+
+ Example: ou=employees +
+
Secondary User Relative DNs: + +
+ A list of Distinguished Name path(s) which forms the secondary base path(s) for + looking up all user LDAP records, relative to the Base DN defined above. These path(s) + will be tried if the user is not found via the primary relative DN. +
+
+ Example: [ou=employees] +
+
Administrator DN: +
+ The Distinguished Name for the Administrator account. This account must be able to login and view the records for all user accounts. +
+
+ Example: uid=admin,ou=employees,dc=my,dc=domain,dc=com +
+
Administrator DN Password: +
+ Note: This will be stored in + plaintext inside the config.yaml, so setting up a dedicated account or using + a password hash is highly recommended. +
+ +
+ The password for the Administrator DN. +
+
UID Attribute: + +
+ The name of the property field in your LDAP user records that stores your + users' username. Typically "uid". +
+
Mail Attribute: + +
+ The name of the property field in your LDAP user records that stores your + users' e-mail address(es). Typically "mail". +
+
Custom TLS Certificate: + +
+ If specified, the certificate (in PEM format) for the LDAP TLS connection. +
+
Allow insecure: +
+ Allow fallback to non-TLS connections +
+
+ If enabled, LDAP will fallback to insecure non-TLS connections if TLS does not succeed. +
+
+
+
+ +
+
+ External Authorization (OAuth) +
+
+ +
+
+ GitHub (Enterprise) Authentication +
+
+
+

+ If enabled, users can use GitHub or GitHub Enterprise to authenticate to the registry. +

+

+ Note: A registered GitHub (Enterprise) OAuth application is required. + View instructions on how to + + Create an OAuth Application in GitHub + +

+
+ +
+ Enable GitHub Authentication +
+ +
+ Warning: This provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
GitHub: + +
GitHub Endpoint: + + +
+ The GitHub Enterprise endpoint. Must start with http:// or https://. +
+
OAuth Client ID: + + +
OAuth Client Secret: + + +
Organization Filtering: +
+ Restrict By Organization Membership +
+ +
+ If enabled, only members of specified GitHub + Enterprise organizations will be allowed to login via GitHub + Enterprise. +
+ + + +
Binding Field: + +
+ If selected, when a user logs in via this provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this provider. This is not the recommended setup. +
+
+
+
+ + +
+
+ Google Authentication +
+
+
+

+ If enabled, users can use Google to authenticate to the registry. +

+

+ Note: A registered Google OAuth application is required. + Visit the + + Google Developer Console + + to register an application. +

+
+ +
+ Enable Google Authentication +
+ +
+ Warning: This provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + +
OAuth Client ID: + + +
OAuth Client Secret: + + +
Binding Field: + +
+ If selected, when a user logs in via this provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this provider. This is not the recommended setup. +
+
+
+
+ + +
+
+ + {{ config[provider]['SERVICE_NAME'] || (getOIDCProviderId(provider) + ' Authentication') }} + (Delete) +
+
+
+ Warning: This OIDC provider is not bound to your {{ config.AUTHENTICATION_TYPE }} authentication. Logging in via this provider will create a -only user, which is not the recommended approach. It is highly recommended to choose a "Binding Field" below. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Service ID: + {{ getOIDCProviderId(provider) }} +
OIDC Server: + + +
+ The URL of an OIDC-compliant server. +
+
Client ID: + +
Client Secret: + +
Service Name: + + +
+ The user friendly name to display for the service on the login page. +
+
Service Icon (optional): + + +
+ If specified, the icon to display for this login service on the login page. Can be either a URL to an icon or a CSS class name from Font Awesome +
+
Binding Field: + +
+ If selected, when a user logs in via this OIDC provider, they will be automatically bound to their user in {{ config.AUTHENTICATION_TYPE }} by matching the selected field from the OIDC provider to the associated user in {{ config.AUTHENTICATION_TYPE }}. +
+
+ For example, selecting Subject here with a backing authentication system of LDAP means that a user logging in via this OIDC provider will also be bound to their user in LDAP by username. +
+
+ If none selected, a user unique to will be created on initial login with this OIDC provider. This is not the recommended setup. +
+
Login Scopes: + +
+ If specified, the scopes to send to the OIDC provider when performing the login flow. Note that, if specified, these scopes will + override those set by default, so this list must include a scope for OpenID Connect + (typically the openid scope) or this provider will fail. +
+
+
+

Callback URLs for this service:

+
    +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback
  • +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback/attach
  • +
  • {{ mapped.TLS_SETTING == 'none' ? 'http' : 'https' }}://{{ config.SERVER_HOSTNAME || '(configure server hostname)' }}/oauth2/{{ getOIDCProviderId(provider).toLowerCase() }}/callback/cli
  • +
+
+
+
+ + + Add OIDC Provider + What is OIDC? +
+
+ + +
+
+ Access Settings +
+
+
+

Various settings around access and authentication to the registry.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Basic Credentials Login: +
+ Login to User Interface via credentials +
+
+
+ Login to User Interface via credentials must be enabled. Click here to enable. +
+
+ Login to User Interface via credentials is enabled (requires at least one OIDC provider to disable) +
+
+
+ If enabled, users will be able to login to the user interface via their username and password credentials. +
+
+ If disabled, users will only be able to login to the user interface via one of the configured External Authentication providers. +
+
External Application tokens +
+ Allow external application tokens +
+
+ If enabled, users will be able to generate external application tokens for use on the Docker and rkt CLI. Note + that these tokens will not be required unless "App Token" is chosen as the Internal Authentication method above. +
+
External application token expiration + +
+ The expiration time for user generated external application tokens. If none, tokens will never expire. +
+
Anonymous Access: +
+ Enable Anonymous Access +
+
+ If enabled, public repositories and search can be accessed by anyone that can + reach the registry, even if they are not authenticated. Disable to only allow + authenticated users to view and pull "public" resources. +
+
User Creation: +
+ Enable Open User Creation +
+
+ If enabled, user accounts can be created by anyone (unless restricted below to invited users). + Users can always be created in the users panel in this superuser tool, even if this feature is disabled. +
+
Invite-only User Creation: +
+ Enable Invite-only User Creation +
+
+ If enabled, user accounts can only be created when a user has been invited, by e-mail address, to join a team. + Users can always be created in the users panel in this superuser tool, even if this feature is enabled. +
+
Encrypted Client Password: +
+ Require Encrypted Client Passwords +
+
+ If enabled, users will not be able to login from the Docker command + line with a non-encrypted password and must generate an encrypted + password to use. +
+
+ This feature is highly recommended for setups with external authentication, as Docker currently stores passwords in plaintext on user's machines. +
+
Prefix username autocompletion: +
+ Allow prefix username autocompletion +
+
+ If disabled, autocompletion for users will only match on exact usernames. +
+
Team Invitations: +
+ Require Team Invitations +
+
+ If enabled, when adding a new user to a team, they will receive an invitation to join the team, with the option to decline. + Otherwise, users will be immediately part of a team when added by a team administrator. +
+
+
+
+ + +
+
+ Dockerfile Build Support +
+
+
+ If enabled, users can submit Dockerfiles to be built and pushed by . +
+ +
+ Enable Dockerfile Build +
+ +
+ Note: Build workers are required for this feature. + See Adding Build Workers for instructions on how to setup build workers. +
+
+
+ + +
+
+ GitHub (Enterprise) Build Triggers +
+
+
+

+ If enabled, users can setup GitHub or GitHub Enterprise triggers to invoke Registry builds. +

+

+ Note: A registered GitHub (Enterprise) OAuth application (separate from GitHub Authentication) is required. + View instructions on how to + + Create an OAuth Application in GitHub + +

+
+ +
+ Enable GitHub Triggers +
+ + + + + + + + + + + + + + + + + + +
GitHub: + +
GitHub Endpoint: + + +
+ The GitHub Enterprise endpoint. Must start with http:// or https://. +
+
OAuth Client ID: + + +
OAuth Client Secret: + + +
+
+
+ + +
+
+ BitBucket Build Triggers +
+
+
+

+ If enabled, users can setup BitBucket triggers to invoke Registry builds. +

+

+ Note: A registered BitBucket OAuth application is required. + View instructions on how to + + Create an OAuth Application in BitBucket + +

+
+ +
+ Enable BitBucket Triggers +
+ + + + + + + + + + +
OAuth Consumer Key: + + +
OAuth Consumer Secret: + + +
+
+
+ + +
+
+ GitLab Build Triggers +
+
+
+

+ If enabled, users can setup GitLab triggers to invoke Registry builds. +

+

+ Note: A registered GitLab OAuth application is required. + Visit the + + GitLab applications admin panel + + to create a new application. +

+

The callback URL to use is:   + {{ config.PREFERRED_URL_SCHEME || 'http' }}://{{ config.SERVER_HOSTNAME || 'localhost' }}/oauth2/gitlab/callback/trigger +

+
+ +
+ Enable GitLab Triggers +
+ + + + + + + + + + + + + + + + + + +
GitLab: + +
GitLab Endpoint: + + +
+ The GitLab Enterprise endpoint. Must start with http:// or https://. +
+
Application Id: + + +
Secret: + + +
+
+
+ + + + +
+ + +
+ + + + +
+
diff --git a/config_app/js/core-config-setup/core-config-setup.js b/config_app/js/core-config-setup/core-config-setup.js new file mode 100644 index 000000000..d7f91d9bf --- /dev/null +++ b/config_app/js/core-config-setup/core-config-setup.js @@ -0,0 +1,1452 @@ +import * as URI from 'urijs'; +import * as angular from 'angular'; +const templateUrl = require('./config-setup-tool.html'); +const urlParsedField = require('../config-field-templates/config-parsed-field.html'); +const urlVarField = require('../config-field-templates/config-variable-field.html'); +const urlListField = require('../config-field-templates/config-list-field.html'); +const urlFileField = require('../config-field-templates/config-file-field.html'); +const urlBoolField = require('../config-field-templates/config-bool-field.html'); +const urlNumericField = require('../config-field-templates/config-numeric-field.html'); +const urlContactsField = require('../config-field-templates/config-contacts-field.html'); +const urlMapField = require('../config-field-templates/config-map-field.html'); +const urlServiceKeyField = require('../config-field-templates/config-service-key-field.html'); +const urlStringField = require('../config-field-templates/config-string-field.html'); + +const urlStringListField = require('../config-field-templates/config-string-list-field.html'); +const urlCertField = require('../config-field-templates/config-certificates-field.html'); + + +angular.module("quay-config") + .directive('configSetupTool', () => { + var directiveDefinitionObject = { + priority: 1, + templateUrl, + replace: true, + transclude: true, + restrict: 'C', + scope: { + 'isActive': '=isActive', + 'configurationSaved': '&configurationSaved' + }, + controller: function($rootScope, $scope, $element, $timeout, ApiService) { + var authPassword = null; + + $scope.HOSTNAME_REGEX = '^[a-zA-Z-0-9\.]+(:[0-9]+)?$'; + $scope.GITHOST_REGEX = '^https?://([a-zA-Z0-9]+\.?\/?)+$'; + + $scope.SERVICES = [ + {'id': 'redis', 'title': 'Redis'}, + + {'id': 'registry-storage', 'title': 'Registry Storage'}, + + {'id': 'time-machine', 'title': 'Time Machine'}, + + {'id': 'access', 'title': 'Access Settings'}, + + {'id': 'ssl', 'title': 'SSL certificate and key', 'condition': function(config) { + return config.PREFERRED_URL_SCHEME == 'https'; + }}, + + {'id': 'ldap', 'title': 'LDAP Authentication', 'condition': function(config) { + return config.AUTHENTICATION_TYPE == 'LDAP'; + }, 'password': true}, + + {'id': 'jwt', 'title': 'JWT Authentication', 'condition': function(config) { + return config.AUTHENTICATION_TYPE == 'JWT'; + }, 'password': true}, + + {'id': 'keystone', 'title': 'Keystone Authentication', 'condition': function(config) { + return config.AUTHENTICATION_TYPE == 'Keystone'; + }, 'password': true}, + + {'id': 'apptoken-auth', 'title': 'App Token Authentication', 'condition': function(config) { + return config.AUTHENTICATION_TYPE == 'AppToken'; + }}, + + {'id': 'signer', 'title': 'ACI Signing', 'condition': function(config) { + return config.FEATURE_ACI_CONVERSION; + }}, + + {'id': 'mail', 'title': 'E-mail Support', 'condition': function(config) { + return config.FEATURE_MAILING; + }}, + + {'id': 'github-login', 'title': 'Github (Enterprise) Authentication', 'condition': function(config) { + return config.FEATURE_GITHUB_LOGIN; + }}, + + {'id': 'google-login', 'title': 'Google Authentication', 'condition': function(config) { + return config.FEATURE_GOOGLE_LOGIN; + }}, + + {'id': 'github-trigger', 'title': 'GitHub (Enterprise) Build Triggers', 'condition': function(config) { + return config.FEATURE_GITHUB_BUILD; + }}, + + {'id': 'bitbucket-trigger', 'title': 'BitBucket Build Triggers', 'condition': function(config) { + return config.FEATURE_BITBUCKET_BUILD; + }}, + + {'id': 'gitlab-trigger', 'title': 'GitLab Build Triggers', 'condition': function(config) { + return config.FEATURE_GITLAB_BUILD; + }}, + + {'id': 'security-scanner', 'title': 'Quay Security Scanner', 'condition': function(config) { + return config.FEATURE_SECURITY_SCANNER; + }}, + + {'id': 'bittorrent', 'title': 'BitTorrent downloads', 'condition': function(config) { + return config.FEATURE_BITTORRENT; + }}, + + {'id': 'oidc-login', 'title': 'OIDC Login(s)', 'condition': function(config) { + return $scope.getOIDCProviders(config).length > 0; + }}, + + {'id': 'actionlogarchiving', 'title': 'Action Log Rotation', 'condition': function(config) { + return config.FEATURE_ACTION_LOG_ROTATION; + }}, + ]; + + $scope.STORAGE_CONFIG_FIELDS = { + 'LocalStorage': [ + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/some/directory', 'kind': 'text'} + ], + + 'S3Storage': [ + {'name': 's3_bucket', 'title': 'S3 Bucket', 'placeholder': 'my-cool-bucket', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'}, + {'name': 's3_access_key', 'title': 'AWS Access Key (optional if using IAM)', 'placeholder': 'accesskeyhere', 'kind': 'text', 'optional': true}, + {'name': 's3_secret_key', 'title': 'AWS Secret Key (optional if using IAM)', 'placeholder': 'secretkeyhere', 'kind': 'text', 'optional': true}, + {'name': 'host', 'title': 'S3 Host (optional)', 'placeholder': 's3.amazonaws.com', 'kind': 'text', 'optional': true}, + {'name': 'port', 'title': 'S3 Port (optional)', 'placeholder': '443', 'kind': 'text', 'pattern': '^[0-9]+$', 'optional': true} + ], + + 'AzureStorage': [ + {'name': 'azure_container', 'title': 'Azure Storage Container', 'placeholder': 'container', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/container', 'kind': 'text'}, + {'name': 'azure_account_name', 'title': 'Azure Account Name', 'placeholder': 'accountnamehere', 'kind': 'text'}, + {'name': 'azure_account_key', 'title': 'Azure Account Key', 'placeholder': 'accountkeyhere', 'kind': 'text', 'optional': true}, + {'name': 'sas_token', 'title': 'Azure SAS Token', 'placeholder': 'sastokenhere', 'kind': 'text', 'optional': true}, + ], + + 'GoogleCloudStorage': [ + {'name': 'access_key', 'title': 'Cloud Access Key', 'placeholder': 'accesskeyhere', 'kind': 'text'}, + {'name': 'secret_key', 'title': 'Cloud Secret Key', 'placeholder': 'secretkeyhere', 'kind': 'text'}, + {'name': 'bucket_name', 'title': 'GCS Bucket', 'placeholder': 'my-cool-bucket', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'} + ], + + 'RadosGWStorage': [ + {'name': 'hostname', 'title': 'Rados Server Hostname', 'placeholder': 'my.rados.hostname', 'kind': 'text'}, + {'name': 'port', 'title': 'Custom Port (optional)', 'placeholder': '443', 'kind': 'text', 'pattern': '^[0-9]+$', 'optional': true}, + {'name': 'is_secure', 'title': 'Is Secure', 'placeholder': 'Require SSL', 'kind': 'bool'}, + {'name': 'access_key', 'title': 'Access Key', 'placeholder': 'accesskeyhere', 'kind': 'text', 'help_url': 'http://ceph.com/docs/master/radosgw/admin/'}, + {'name': 'secret_key', 'title': 'Secret Key', 'placeholder': 'secretkeyhere', 'kind': 'text'}, + {'name': 'bucket_name', 'title': 'Bucket Name', 'placeholder': 'my-cool-bucket', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'} + ], + + 'SwiftStorage': [ + {'name': 'auth_version', 'title': 'Swift Auth Version', 'kind': 'option', 'values': [1, 2, 3]}, + {'name': 'auth_url', 'title': 'Swift Auth URL', 'placeholder': 'http://swiftdomain/auth/v1.0', 'kind': 'text'}, + {'name': 'swift_container', 'title': 'Swift Container Name', 'placeholder': 'mycontainer', 'kind': 'text', + 'help_text': 'The swift container for all objects. Must already exist inside Swift.'}, + + {'name': 'storage_path', 'title': 'Storage Path', 'placeholder': '/path/inside/container', 'kind': 'text'}, + + {'name': 'swift_user', 'title': 'Username', 'placeholder': 'accesskeyhere', 'kind': 'text', + 'help_text': 'Note: For Swift V1, this is "username:password" (-U on the CLI).'}, + {'name': 'swift_password', 'title': 'Key/Password', 'placeholder': 'secretkeyhere', 'kind': 'text', + 'help_text': 'Note: For Swift V1, this is the API token (-K on the CLI).'}, + + {'name': 'ca_cert_path', 'title': 'CA Cert Filename', 'placeholder': 'conf/stack/swift.cert', 'kind': 'text', 'optional': true}, + + {'name': 'temp_url_key', 'title': 'Temp URL Key (optional)', 'placholder': 'key-here', 'kind': 'text', 'optional': true, + 'help_url': 'https://coreos.com/products/enterprise-registry/docs/latest/swift-temp-url.html', + 'help_text': 'If enabled, will allow for faster pulls directly from Swift.'}, + + {'name': 'os_options', 'title': 'OS Options', 'kind': 'map', + 'keys': ['tenant_id', 'auth_token', 'service_type', 'endpoint_type', 'tenant_name', 'object_storage_url', 'region_name', + 'project_id', 'project_name', 'project_domain_name', 'user_domain_name', 'user_domain_id']} + ], + + 'CloudFrontedS3Storage': [ + {'name': 's3_bucket', 'title': 'S3 Bucket', 'placeholder': 'my-cool-bucket', 'kind': 'text'}, + {'name': 'storage_path', 'title': 'Storage Directory', 'placeholder': '/path/inside/bucket', 'kind': 'text'}, + {'name': 's3_access_key', 'title': 'AWS Access Key (optional if using IAM)', 'placeholder': 'accesskeyhere', 'kind': 'text', 'optional': true}, + {'name': 's3_secret_key', 'title': 'AWS Secret Key (optional if using IAM)', 'placeholder': 'secretkeyhere', 'kind': 'text', 'optional': true}, + {'name': 'host', 'title': 'S3 Host (optional)', 'placeholder': 's3.amazonaws.com', 'kind': 'text', 'optional': true}, + {'name': 'port', 'title': 'S3 Port (optional)', 'placeholder': '443', 'kind': 'text', 'pattern': '^[0-9]+$', 'optional': true}, + + {'name': 'cloudfront_distribution_domain', 'title': 'CloudFront Distribution Domain Name', 'placeholder': 'somesubdomain.cloudfront.net', 'pattern': '^([0-9a-zA-Z]+\\.)+[0-9a-zA-Z]+$', 'kind': 'text'}, + {'name': 'cloudfront_key_id', 'title': 'CloudFront Key ID', 'placeholder': 'APKATHISISAKEYID', 'kind': 'text'}, + {'name': 'cloudfront_privatekey_filename', 'title': 'CloudFront Private Key', 'filesuffix': 'cloudfront-signing-key.pem', 'kind': 'file'}, + ], + }; + + $scope.enableFeature = function(config, feature) { + config[feature] = true; + }; + + $scope.validateHostname = function(hostname) { + if (hostname.indexOf('127.0.0.1') == 0 || hostname.indexOf('localhost') == 0) { + return 'Please specify a non-localhost hostname. "localhost" will refer to the container, not your machine.' + } + + return null; + }; + + $scope.config = null; + $scope.mapped = { + '$hasChanges': false + }; + + $scope.hasfile = {}; + $scope.validating = null; + $scope.savingConfiguration = false; + + $scope.removeOIDCProvider = function(provider) { + delete $scope.config[provider]; + }; + + $scope.addOIDCProvider = () => { + bootbox.prompt('Enter an ID for the OIDC provider', function(result) { + if (!result) { + return; + } + + result = result.toUpperCase(); + + if (!result.match(/^[A-Z0-9]+$/)) { + bootbox.alert('Invalid ID for OIDC provider: must be alphanumeric'); + return; + } + + if (result == 'GITHUB' || result == 'GOOGLE') { + bootbox.alert('Invalid ID for OIDC provider: cannot be a reserved name'); + return; + } + + var key = result + '_LOGIN_CONFIG'; + if ($scope.config[key]) { + bootbox.alert('Invalid ID for OIDC provider: already exists'); + return; + } + + $scope.config[key] = {}; + }); + }; + + $scope.getOIDCProviderId = function(key) { + var index = key.indexOf('_LOGIN_CONFIG'); + if (index <= 0) { + return null; + } + + return key.substr(0, index).toLowerCase(); + }; + + $scope.getOIDCProviders = function(config) { + var keys = Object.keys(config || {}); + return keys.filter(function(key) { + if (key == 'GITHUB_LOGIN_CONFIG' || key == 'GOOGLE_LOGIN_CONFIG') { + // Has custom UI and config. + return false; + } + + return !!$scope.getOIDCProviderId(key); + }); + }; + + $scope.getServices = function(config) { + var services = []; + if (!config) { return services; } + + for (var i = 0; i < $scope.SERVICES.length; ++i) { + var service = $scope.SERVICES[i]; + if (!service.condition || service.condition(config)) { + services.push({ + 'service': service, + 'status': 'validating' + }); + } + } + + return services; + }; + + $scope.validationStatus = function(serviceInfos) { + if (!serviceInfos) { return 'validating'; } + + var hasError = false; + for (var i = 0; i < serviceInfos.length; ++i) { + if (serviceInfos[i].status == 'validating') { + return 'validating'; + } + if (serviceInfos[i].status == 'error') { + hasError = true; + } + } + + return hasError ? 'failed' : 'success'; + }; + + $scope.cancelValidation = function() { + $('#validateAndSaveModal').modal('hide'); + $scope.validating = null; + $scope.savingConfiguration = false; + }; + + $scope.validateService = function(serviceInfo, opt_password) { + var params = { + 'service': serviceInfo.service.id + }; + + var data = { + 'config': $scope.config, + 'password': opt_password || '' + }; + + var errorDisplay = ApiService.errorDisplay( + 'Could not validate configuration. Please report this error.', + function() { + authPassword = null; + }); + + ApiService.scValidateConfig(data, params).then(function(resp) { + serviceInfo.status = resp.status ? 'success' : 'error'; + serviceInfo.errorMessage = $.trim(resp.reason || ''); + + if (!resp.status) { + authPassword = null; + } + + }, errorDisplay); + }; + + $scope.checkValidateAndSave = function() { + if ($scope.configform.$valid) { + saveStorageConfig(); + $scope.validateAndSave(); + return; + } + + var query = $element.find("input.ng-invalid:first"); + + if (query && query.length) { + query[0].scrollIntoView(); + query.focus(); + } + }; + + $scope.validateAndSave = function() { + $scope.validating = $scope.getServices($scope.config); + + var requirePassword = false; + for (var i = 0; i < $scope.validating.length; ++i) { + var serviceInfo = $scope.validating[i]; + if (serviceInfo.service.password) { + requirePassword = true; + break; + } + } + + if (!requirePassword) { + $scope.performValidateAndSave(); + return; + } + + var box = bootbox.dialog({ + "message": 'Please enter your superuser password to validate your auth configuration:' + + '
' + + '' + + '
', + "title": 'Enter Password', + "buttons": { + "success": { + "label": "Validate Config", + "className": "btn-success btn-continue", + "callback": function() { + $scope.performValidateAndSave($('#validatePassword').val()); + } + }, + "close": { + "label": "Cancel", + "className": "btn-default", + "callback": function() { + } + } + } + }); + + box.bind('shown.bs.modal', function(){ + box.find("input").focus(); + box.find("form").submit(function() { + if (!$('#validatePassword').val()) { return; } + box.modal('hide'); + }); + }); + }; + + $scope.performValidateAndSave = function(opt_password) { + $scope.savingConfiguration = false; + $scope.validating = $scope.getServices($scope.config); + + authPassword = opt_password; + + $('#validateAndSaveModal').modal({ + keyboard: false, + backdrop: 'static' + }); + + for (var i = 0; i < $scope.validating.length; ++i) { + var serviceInfo = $scope.validating[i]; + $scope.validateService(serviceInfo, opt_password); + } + }; + + $scope.saveConfiguration = function() { + $scope.savingConfiguration = true; + + // Make sure to note that fully verified setup is completed. We use this as a signal + // in the setup tool. + $scope.config['SETUP_COMPLETE'] = true; + + var data = { + 'config': $scope.config, + 'hostname': window.location.host, + 'password': authPassword || '' + }; + + var errorDisplay = ApiService.errorDisplay( + 'Could not save configuration. Please report this error.', + function() { + authPassword = null; + }); + + ApiService.scUpdateConfig(data).then(function(resp) { + authPassword = null; + + $scope.savingConfiguration = false; + $scope.mapped.$hasChanges = false; + + $('#validateAndSaveModal').modal('hide'); + + $scope.configurationSaved({'config': $scope.config}); + }, errorDisplay); + }; + + // Convert storage config to an array + var initializeStorageConfig = function($scope) { + var config = $scope.config.DISTRIBUTED_STORAGE_CONFIG || {}; + var defaultLocations = $scope.config.DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS || []; + var preference = $scope.config.DISTRIBUTED_STORAGE_PREFERENCE || []; + + $scope.serverStorageConfig = angular.copy(config); + $scope.storageConfig = []; + + Object.keys(config).forEach(function(location) { + $scope.storageConfig.push({ + location: location, + defaultLocation: defaultLocations.indexOf(location) >= 0, + data: angular.copy(config[location]), + error: {}, + }); + }); + + if (!$scope.storageConfig.length) { + $scope.addStorageConfig('default'); + return; + } + + // match DISTRIBUTED_STORAGE_PREFERENCE order first, remaining are + // ordered by unicode point value + $scope.storageConfig.sort(function(a, b) { + var indexA = preference.indexOf(a.location); + var indexB = preference.indexOf(b.location); + + if (indexA > -1 && indexB > -1) return indexA < indexB ? -1 : 1; + if (indexA > -1) return -1; + if (indexB > -1) return 1; + + return a.location < b.location ? -1 : 1; + }); + }; + + $scope.allowChangeLocationStorageConfig = function(location) { + if (!$scope.serverStorageConfig[location]) { return true }; + + // allow user to change location ID if another exists with the same ID + return $scope.storageConfig.filter(function(sc) { + return sc.location === location; + }).length >= 2; + }; + + $scope.allowRemoveStorageConfig = function(location) { + return $scope.storageConfig.length > 1 && $scope.allowChangeLocationStorageConfig(location); + }; + + $scope.canAddStorageConfig = function() { + return $scope.config && + $scope.config.FEATURE_STORAGE_REPLICATION && + $scope.storageConfig && + (!$scope.storageConfig.length || $scope.storageConfig.length < 10); + }; + + $scope.addStorageConfig = function(location) { + var storageType = 'LocalStorage'; + + // Use last storage type by default + if ($scope.storageConfig.length) { + storageType = $scope.storageConfig[$scope.storageConfig.length-1].data[0]; + } + + $scope.storageConfig.push({ + location: location || '', + defaultLocation: false, + data: [storageType, {}], + error: {}, + }); + }; + + $scope.removeStorageConfig = function(sc) { + $scope.storageConfig.splice($scope.storageConfig.indexOf(sc), 1); + }; + + var saveStorageConfig = function() { + var config = {}; + var defaultLocations = []; + var preference = []; + + $scope.storageConfig.forEach(function(sc) { + config[sc.location] = sc.data; + if (sc.defaultLocation) defaultLocations.push(sc.location); + preference.push(sc.location); + }); + + $scope.config.DISTRIBUTED_STORAGE_CONFIG = config; + $scope.config.DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS = defaultLocations; + $scope.config.DISTRIBUTED_STORAGE_PREFERENCE = preference; + }; + + var gitlabSelector = function(key) { + return function(value) { + if (!value || !$scope.config) { return; } + + if (!$scope.config[key]) { + $scope.config[key] = {}; + } + + if (value == 'enterprise') { + if ($scope.config[key]['GITLAB_ENDPOINT'] == 'https://gitlab.com/') { + $scope.config[key]['GITLAB_ENDPOINT'] = ''; + } + } else if (value == 'hosted') { + $scope.config[key]['GITLAB_ENDPOINT'] = 'https://gitlab.com/'; + } + }; + }; + + var githubSelector = function(key) { + return function(value) { + if (!value || !$scope.config) { return; } + + if (!$scope.config[key]) { + $scope.config[key] = {}; + } + + if (value == 'enterprise') { + if ($scope.config[key]['GITHUB_ENDPOINT'] == 'https://github.com/') { + $scope.config[key]['GITHUB_ENDPOINT'] = ''; + } + delete $scope.config[key]['API_ENDPOINT']; + } else if (value == 'hosted') { + $scope.config[key]['GITHUB_ENDPOINT'] = 'https://github.com/'; + $scope.config[key]['API_ENDPOINT'] = 'https://api.github.com/'; + } + }; + }; + + var getKey = function(config, path) { + if (!config) { + return null; + } + + var parts = path.split('.'); + var current = config; + for (var i = 0; i < parts.length; ++i) { + var part = parts[i]; + if (!current[part]) { return null; } + current = current[part]; + } + return current; + }; + + var initializeMappedLogic = function(config) { + var gle = getKey(config, 'GITHUB_LOGIN_CONFIG.GITHUB_ENDPOINT'); + var gte = getKey(config, 'GITHUB_TRIGGER_CONFIG.GITHUB_ENDPOINT'); + + $scope.mapped['GITHUB_LOGIN_KIND'] = gle == 'https://github.com/' ? 'hosted' : 'enterprise'; + $scope.mapped['GITHUB_TRIGGER_KIND'] = gte == 'https://github.com/' ? 'hosted' : 'enterprise'; + + var glabe = getKey(config, 'GITLAB_TRIGGER_KIND.GITHUB_ENDPOINT'); + $scope.mapped['GITLAB_TRIGGER_KIND'] = glabe == 'https://gitlab.com/' ? 'hosted' : 'enterprise'; + + $scope.mapped['redis'] = {}; + $scope.mapped['redis']['host'] = getKey(config, 'BUILDLOGS_REDIS.host') || getKey(config, 'USER_EVENTS_REDIS.host'); + $scope.mapped['redis']['port'] = getKey(config, 'BUILDLOGS_REDIS.port') || getKey(config, 'USER_EVENTS_REDIS.port'); + $scope.mapped['redis']['password'] = getKey(config, 'BUILDLOGS_REDIS.password') || getKey(config, 'USER_EVENTS_REDIS.password'); + + $scope.mapped['TLS_SETTING'] = 'none'; + if (config['PREFERRED_URL_SCHEME'] == 'https') { + if (config['EXTERNAL_TLS_TERMINATION'] === true) { + $scope.mapped['TLS_SETTING'] = 'external-tls'; + } else { + $scope.mapped['TLS_SETTING'] = 'internal-tls'; + } + } + }; + + var tlsSetter = function(value) { + if (value == null || !$scope.config) { return; } + + switch (value) { + case 'none': + $scope.config['PREFERRED_URL_SCHEME'] = 'http'; + delete $scope.config['EXTERNAL_TLS_TERMINATION']; + return; + + case 'external-tls': + $scope.config['PREFERRED_URL_SCHEME'] = 'https'; + $scope.config['EXTERNAL_TLS_TERMINATION'] = true; + return; + + case 'internal-tls': + $scope.config['PREFERRED_URL_SCHEME'] = 'https'; + delete $scope.config['EXTERNAL_TLS_TERMINATION']; + return; + } + }; + + var redisSetter = function(keyname) { + return function(value) { + if (value == null || !$scope.config) { return; } + + if (!$scope.config['BUILDLOGS_REDIS']) { + $scope.config['BUILDLOGS_REDIS'] = {}; + } + + if (!$scope.config['USER_EVENTS_REDIS']) { + $scope.config['USER_EVENTS_REDIS'] = {}; + } + + if (!value) { + delete $scope.config['BUILDLOGS_REDIS'][keyname]; + delete $scope.config['USER_EVENTS_REDIS'][keyname]; + return; + } + + $scope.config['BUILDLOGS_REDIS'][keyname] = value; + $scope.config['USER_EVENTS_REDIS'][keyname] = value; + }; + }; + + // Add mapped logic. + $scope.$watch('mapped.GITHUB_LOGIN_KIND', githubSelector('GITHUB_LOGIN_CONFIG')); + $scope.$watch('mapped.GITHUB_TRIGGER_KIND', githubSelector('GITHUB_TRIGGER_CONFIG')); + $scope.$watch('mapped.GITLAB_TRIGGER_KIND', gitlabSelector('GITLAB_TRIGGER_KIND')); + $scope.$watch('mapped.TLS_SETTING', tlsSetter); + + $scope.$watch('mapped.redis.host', redisSetter('host')); + $scope.$watch('mapped.redis.port', redisSetter('port')); + $scope.$watch('mapped.redis.password', redisSetter('password')); + + // Remove extra extra fields (which are not allowed) from storage config. + var updateFields = function(sc) { + var type = sc.data[0]; + var configObject = sc.data[1]; + var allowedFields = $scope.STORAGE_CONFIG_FIELDS[type]; + + // Remove any fields not allowed. + for (var fieldName in configObject) { + if (!configObject.hasOwnProperty(fieldName)) { + continue; + } + + var isValidField = $.grep(allowedFields, function(field) { + return field.name == fieldName; + }).length > 0; + + if (!isValidField) { + delete configObject[fieldName]; + } + } + + // Set any missing boolean fields to false. + for (var i = 0; i < allowedFields.length; ++i) { + if (allowedFields[i].kind == 'bool') { + configObject[allowedFields[i].name] = configObject[allowedFields[i].name] || false; + } + } + }; + + // Validate and update storage config on update. + var refreshStorageConfig = function() { + if (!$scope.config || !$scope.storageConfig) return; + + var locationCounts = {}; + var errors = []; + var valid = true; + + $scope.storageConfig.forEach(function(sc) { + // remove extra fields from storage config + updateFields(sc); + + if (!locationCounts[sc.location]) locationCounts[sc.location] = 0; + locationCounts[sc.location]++; + }); + + // validate storage config + $scope.storageConfig.forEach(function(sc) { + var error = {}; + + if ($scope.config.FEATURE_STORAGE_REPLICATION && sc.data[0] === 'LocalStorage') { + error.engine = 'Replication to a locally mounted directory is unsupported as it is only accessible on a single machine.'; + valid = false; + } + + if (locationCounts[sc.location] > 1) { + error.location = 'Location ID must be unique.'; + valid = false; + } + + errors.push(error); + }); + + $scope.storageConfigError = errors; + $scope.configform.$setValidity('storageConfig', valid); + }; + + $scope.$watch('config.INTERNAL_OIDC_SERVICE_ID', function(service_id) { + if (service_id) { + $scope.config['FEATURE_DIRECT_LOGIN'] = false; + } + }); + + $scope.$watch('config.FEATURE_STORAGE_REPLICATION', function() { + refreshStorageConfig(); + }); + + $scope.$watch('storageConfig', function() { + refreshStorageConfig(); + }, true); + + $scope.$watch('config', function(value) { + $scope.mapped['$hasChanges'] = true; + }, true); + + $scope.$watch('isActive', function(value) { + if (!value) { return; } + + ApiService.scGetConfig().then(function(resp) { + $scope.config = resp['config'] || {}; + initializeMappedLogic($scope.config); + initializeStorageConfig($scope); + $scope.mapped['$hasChanges'] = false; + }, ApiService.errorDisplay('Could not load config')); + }); + } + }; + + return directiveDefinitionObject; + }) + + .directive('configParsedField', function ($timeout) { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlParsedField, + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'binding': '=binding', + 'parser': '&parser', + 'serializer': '&serializer' + }, + controller: function($scope, $element, $transclude) { + $scope.childScope = null; + + $transclude(function(clone, scope) { + $scope.childScope = scope; + $scope.childScope['fields'] = {}; + $element.append(clone); + }); + + $scope.childScope.$watch('fields', function(value) { + // Note: We need the timeout here because Angular starts the digest of the + // parent scope AFTER the child scope, which means it can end up one action + // behind. The timeout ensures that the parent scope will be fully digest-ed + // and then we update the binding. Yes, this is a hack :-/. + $timeout(function() { + $scope.binding = $scope.serializer({'fields': value}); + }); + }, true); + + $scope.$watch('binding', function(value) { + var parsed = $scope.parser({'value': value}); + for (var key in parsed) { + if (parsed.hasOwnProperty(key)) { + $scope.childScope['fields'][key] = parsed[key]; + } + } + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configVariableField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlVarField, + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'binding': '=binding' + }, + controller: function($scope, $element) { + $scope.sections = {}; + $scope.currentSection = null; + + $scope.setSection = function(section) { + $scope.binding = section.value; + }; + + this.addSection = function(section, element) { + $scope.sections[section.value] = { + 'title': section.valueTitle, + 'value': section.value, + 'element': element + }; + + element.hide(); + + if (!$scope.binding) { + $scope.binding = section.value; + } + }; + + $scope.$watch('binding', function(binding) { + if (!binding) { return; } + + if ($scope.currentSection) { + $scope.currentSection.element.hide(); + } + + if ($scope.sections[binding]) { + $scope.sections[binding].element.show(); + $scope.currentSection = $scope.sections[binding]; + } + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('variableSection', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlVarField, + priority: 1, + require: '^configVariableField', + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'value': '@value', + 'valueTitle': '@valueTitle' + }, + controller: function($scope, $element) { + var parentCtrl = $element.parent().controller('configVariableField'); + parentCtrl.addSection($scope, $element); + } + }; + return directiveDefinitionObject; + }) + + .directive('configListField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlListField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding', + 'placeholder': '@placeholder', + 'defaultValue': '@defaultValue', + 'itemTitle': '@itemTitle', + 'itemPattern': '@itemPattern' + }, + controller: function($scope, $element) { + $scope.removeItem = function(item) { + var index = $scope.binding.indexOf(item); + if (index >= 0) { + $scope.binding.splice(index, 1); + } + }; + + $scope.addItem = function() { + if (!$scope.newItemName) { + return; + } + + if (!$scope.binding) { + $scope.binding = []; + } + + if ($scope.binding.indexOf($scope.newItemName) >= 0) { + return; + } + + $scope.binding.push($scope.newItemName); + $scope.newItemName = null; + }; + + $scope.patternMap = {}; + + $scope.getRegexp = function(pattern) { + if (!pattern) { + pattern = '.*'; + } + + if ($scope.patternMap[pattern]) { + return $scope.patternMap[pattern]; + } + + return $scope.patternMap[pattern] = new RegExp(pattern); + }; + + $scope.$watch('binding', function(binding) { + if (!binding && $scope.defaultValue) { + $scope.binding = eval($scope.defaultValue); + } + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configFileField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlFileField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'filename': '@filename', + 'skipCheckFile': '@skipCheckFile', + 'hasFile': '=hasFile', + 'binding': '=?binding' + }, + controller: function($scope, $element, Restangular, $upload) { + $scope.hasFile = false; + + var setHasFile = function(hasFile) { + $scope.hasFile = hasFile; + $scope.binding = hasFile ? $scope.filename : null; + }; + + $scope.onFileSelect = function(files) { + if (files.length < 1) { + setHasFile(false); + return; + } + + $scope.uploadProgress = 0; + $scope.upload = $upload.upload({ + url: '/api/v1/superuser/config/file/' + $scope.filename, + method: 'POST', + data: {'_csrf_token': window.__token}, + file: files[0], + }).progress(function(evt) { + $scope.uploadProgress = parseInt(100.0 * evt.loaded / evt.total); + if ($scope.uploadProgress == 100) { + $scope.uploadProgress = null; + setHasFile(true); + } + }).success(function(data, status, headers, config) { + $scope.uploadProgress = null; + setHasFile(true); + }); + }; + + var loadStatus = function(filename) { + Restangular.one('superuser/config/file/' + filename).get().then(function(resp) { + setHasFile(false); + }); + }; + + if ($scope.filename && $scope.skipCheckFile != "true") { + loadStatus($scope.filename); + } + } + }; + return directiveDefinitionObject; + }) + + .directive('configBoolField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlBoolField, + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'binding': '=binding' + }, + controller: function($scope, $element) { + } + }; + return directiveDefinitionObject; + }) + + .directive('configNumericField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlNumericField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding', + 'placeholder': '@placeholder', + 'defaultValue': '@defaultValue', + }, + controller: function($scope, $element) { + $scope.bindinginternal = 0; + + $scope.$watch('binding', function(binding) { + if ($scope.binding == 0 && $scope.defaultValue) { + $scope.binding = $scope.defaultValue * 1; + } + + $scope.bindinginternal = $scope.binding; + }); + + $scope.$watch('bindinginternal', function(binding) { + var newValue = $scope.bindinginternal * 1; + if (isNaN(newValue)) { + newValue = 0; + } + $scope.binding = newValue; + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configContactsField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlContactsField, + priority: 1, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding' + }, + controller: function($scope, $element) { + var padItems = function(items) { + // Remove the last item if both it and the second to last items are empty. + if (items.length > 1 && !items[items.length - 2].value && !items[items.length - 1].value) { + items.splice(items.length - 1, 1); + return; + } + + // If the last item is non-empty, add a new item. + if (items.length == 0 || items[items.length - 1].value) { + items.push({'value': ''}); + return; + } + }; + + $scope.itemHash = null; + $scope.$watch('items', function(items) { + if (!items) { return; } + padItems(items); + + var itemHash = ''; + var binding = []; + for (var i = 0; i < items.length; ++i) { + var item = items[i]; + if (item.value && (URI(item.value).host() || URI(item.value).path())) { + binding.push(item.value); + itemHash += item.value; + } + } + + $scope.itemHash = itemHash; + $scope.binding = binding; + }, true); + + $scope.$watch('binding', function(binding) { + var current = binding || []; + var items = []; + var itemHash = ''; + for (var i = 0; i < current.length; ++i) { + items.push({'value': current[i]}) + itemHash += current[i]; + } + + if ($scope.itemHash != itemHash) { + $scope.items = items; + } + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configContactField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlContactsField, + priority: 1, + replace: false, + transclude: true, + restrict: 'C', + scope: { + 'binding': '=binding' + }, + controller: function($scope, $element) { + $scope.kind = null; + $scope.value = null; + + var updateBinding = function() { + if ($scope.value == null) { return; } + var value = $scope.value || ''; + + switch ($scope.kind) { + case 'mailto': + $scope.binding = 'mailto:' + value; + return; + + case 'tel': + $scope.binding = 'tel:' + value; + return; + + case 'irc': + $scope.binding = 'irc://' + value; + return; + + default: + $scope.binding = value; + return; + } + }; + + $scope.$watch('kind', updateBinding); + $scope.$watch('value', updateBinding); + + $scope.$watch('binding', function(value) { + if (!value) { + $scope.kind = null; + $scope.value = null; + return; + } + + var uri = URI(value); + $scope.kind = uri.scheme(); + + switch ($scope.kind) { + case 'mailto': + case 'tel': + $scope.value = uri.path(); + break; + + case 'irc': + $scope.value = value.substr('irc://'.length); + break; + + default: + $scope.kind = 'http'; + $scope.value = value; + break; + } + }); + + $scope.getPlaceholder = function(kind) { + switch (kind) { + case 'mailto': + return 'some@example.com'; + + case 'tel': + return '555-555-5555'; + + case 'irc': + return 'myserver:port/somechannel'; + + default: + return 'http://some/url'; + } + }; + } + }; + return directiveDefinitionObject; + }) + + .directive('configMapField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlMapField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding', + 'keys': '=keys' + }, + controller: function($scope, $element) { + $scope.newKey = null; + $scope.newValue = null; + + $scope.hasValues = function(binding) { + return binding && Object.keys(binding).length; + }; + + $scope.removeKey = function(key) { + delete $scope.binding[key]; + }; + + $scope.addEntry = function() { + if (!$scope.newKey || !$scope.newValue) { return; } + + $scope.binding = $scope.binding || {}; + $scope.binding[$scope.newKey] = $scope.newValue; + $scope.newKey = null; + $scope.newValue = null; + } + } + }; + return directiveDefinitionObject; + }) + + .directive('configServiceKeyField', function (ApiService) { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlServiceKeyField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'serviceName': '@serviceName', + }, + controller: function($scope, $element) { + $scope.foundKeys = []; + $scope.loading = false; + $scope.loadError = false; + $scope.hasValidKey = false; + $scope.hasValidKeyStr = null; + + $scope.updateKeys = function() { + $scope.foundKeys = []; + $scope.loading = true; + + ApiService.listServiceKeys().then(function(resp) { + $scope.loading = false; + $scope.loadError = false; + + resp['keys'].forEach(function(key) { + if (key['service'] == $scope.serviceName) { + $scope.foundKeys.push(key); + } + }); + + $scope.hasValidKey = checkValidKey($scope.foundKeys); + $scope.hasValidKeyStr = $scope.hasValidKey ? 'true' : ''; + }, function() { + $scope.loading = false; + $scope.loadError = true; + }); + }; + + // Perform initial loading of the keys. + $scope.updateKeys(); + + $scope.isKeyExpired = function(key) { + if (key.expiration_date != null) { + var expiration_date = moment(key.expiration_date); + return moment().isAfter(expiration_date); + } + return false; + }; + + $scope.showRequestServiceKey = function() { + $scope.requestKeyInfo = { + 'service': $scope.serviceName + }; + }; + + $scope.handleKeyCreated = function() { + $scope.updateKeys(); + }; + + var checkValidKey = function(keys) { + for (var i = 0; i < keys.length; ++i) { + var key = keys[i]; + if (!key.approval) { + continue; + } + + if ($scope.isKeyExpired(key)) { + continue; + } + + return true; + } + + return false; + }; + } + }; + return directiveDefinitionObject; + }) + + .directive('configStringField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlStringField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding', + 'placeholder': '@placeholder', + 'pattern': '@pattern', + 'defaultValue': '@defaultValue', + 'validator': '&validator', + 'isOptional': '=isOptional' + }, + controller: function($scope, $element) { + var firstSet = true; + + $scope.patternMap = {}; + + $scope.getRegexp = function(pattern) { + if (!pattern) { + pattern = '.*'; + } + + if ($scope.patternMap[pattern]) { + return $scope.patternMap[pattern]; + } + + return $scope.patternMap[pattern] = new RegExp(pattern); + }; + + $scope.$watch('binding', function(binding) { + if (firstSet && !binding && $scope.defaultValue) { + $scope.binding = $scope.defaultValue; + firstSet = false; + } + + $scope.errorMessage = $scope.validator({'value': binding || ''}); + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configStringListField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlStringListField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + 'binding': '=binding', + 'itemTitle': '@itemTitle', + 'itemDelimiter': '@itemDelimiter', + 'placeholder': '@placeholder', + 'isOptional': '=isOptional' + }, + controller: function($scope, $element) { + $scope.$watch('internalBinding', function(value) { + if (value) { + $scope.binding = value.split($scope.itemDelimiter); + } + }); + + $scope.$watch('binding', function(value) { + if (value) { + $scope.internalBinding = value.join($scope.itemDelimiter); + } + }); + } + }; + return directiveDefinitionObject; + }) + + .directive('configCertificatesField', function () { + var directiveDefinitionObject = { + priority: 0, + templateUrl: urlCertField, + replace: false, + transclude: false, + restrict: 'C', + scope: { + }, + controller: function($scope, $element, $upload, ApiService, UserService) { + $scope.resetUpload = 0; + $scope.certsUploading = false; + + var loadCertificates = function() { + $scope.certificatesResource = ApiService.getCustomCertificatesAsResource().get(function(resp) { + $scope.certInfo = resp; + $scope.certsUploading = false; + }); + }; + + // UserService.updateUserIn($scope, function(user) { + // console.log(user) + // no need to check for user, since it's all local + loadCertificates(); + // }); + + $scope.handleCertsSelected = function(files, callback) { + $scope.certsUploading = true; + $upload.upload({ + url: '/api/v1/superuser/customcerts/' + files[0].name, + method: 'POST', + data: {'_csrf_token': window.__token}, + file: files[0] + }).success(function() { + callback(true); + $scope.resetUpload++; + loadCertificates(); + }).error(function(r) { + bootbox.alert('Could not upload certificate') + callback(false); + $scope.resetUpload++; + loadCertificates(); + }); + }; + + $scope.deleteCert = function(path) { + var errorDisplay = ApiService.errorDisplay('Could not delete certificate'); + var params = { + 'certpath': path + }; + + ApiService.deleteCustomCertificate(null, params).then(loadCertificates, errorDisplay); + }; + } + }; + return directiveDefinitionObject; + }); diff --git a/config_app/js/main.ts b/config_app/js/main.ts new file mode 100644 index 000000000..7be3b1163 --- /dev/null +++ b/config_app/js/main.ts @@ -0,0 +1,35 @@ +// imports shims, etc +import 'core-js'; + +import * as angular from 'angular'; +import { ConfigAppModule } from './config-app.module'; +import { bundle } from 'ng-metadata/core'; + +// load all app dependencies +require('../static/lib/angular-file-upload.min.js'); +require('../../static/js/tar'); + +const ng1QuayModule: string = bundle(ConfigAppModule, []).name; +angular.module('quay-config', [ng1QuayModule]) + .run(() => { + }); + +declare var require: any; +function requireAll(r) { + r.keys().forEach(r); +} + +// load all services +// require('./services/api-service'); +requireAll(require.context('./services', true, /\.js$/)); + + +// load all the components after services +requireAll(require.context('./setup', true, /\.js$/)); +requireAll(require.context('./core-config-setup', true, /\.js$/)); +requireAll(require.context('./components', true, /\.js$/)); + + +// Load all the main quay css +requireAll(require.context('../../static/css', true, /\.css$/)); +requireAll(require.context('../../static/lib', true, /\.css$/)); diff --git a/config_app/js/services/api-service.js b/config_app/js/services/api-service.js new file mode 100644 index 000000000..5163f1cba --- /dev/null +++ b/config_app/js/services/api-service.js @@ -0,0 +1,327 @@ +/** + * Service which exposes the server-defined API as a nice set of helper methods and automatic + * callbacks. Any method defined on the server is exposed here as an equivalent method. Also + * defines some helper functions for working with API responses. + */ +angular.module('quay-config').factory('ApiService', ['Restangular', '$q', 'UtilService', function(Restangular, $q, UtilService) { + var apiService = {}; + + if (!window.__endpoints) { + return apiService; + } + + var getResource = function(getMethod, operation, opt_parameters, opt_background) { + var resource = {}; + resource.withOptions = function(options) { + this.options = options; + return this; + }; + + resource.get = function(processor, opt_errorHandler) { + var options = this.options; + var result = { + 'loading': true, + 'value': null, + 'hasError': false + }; + + getMethod(options, opt_parameters, opt_background, true).then(function(resp) { + result.value = processor(resp); + result.loading = false; + }, function(resp) { + result.hasError = true; + result.loading = false; + if (opt_errorHandler) { + opt_errorHandler(resp); + } + }); + + return result; + }; + + return resource; + }; + + var buildUrl = function(path, parameters) { + // We already have /api/v1/ on the URLs, so remove them from the paths. + path = path.substr('/api/v1/'.length, path.length); + + // Build the path, adjusted with the inline parameters. + var used = {}; + var url = ''; + for (var i = 0; i < path.length; ++i) { + var c = path[i]; + if (c == '{') { + var end = path.indexOf('}', i); + var varName = path.substr(i + 1, end - i - 1); + + if (!parameters[varName]) { + throw new Error('Missing parameter: ' + varName); + } + + used[varName] = true; + url += parameters[varName]; + i = end; + continue; + } + + url += c; + } + + // Append any query parameters. + var isFirst = true; + for (var paramName in parameters) { + if (!parameters.hasOwnProperty(paramName)) { continue; } + if (used[paramName]) { continue; } + + var value = parameters[paramName]; + if (value) { + url += isFirst ? '?' : '&'; + url += paramName + '=' + encodeURIComponent(value) + isFirst = false; + } + } + + return url; + }; + + var getGenericOperationName = function(userOperationName) { + return userOperationName.replace('User', ''); + }; + + var getMatchingUserOperationName = function(orgOperationName, method, userRelatedResource) { + if (userRelatedResource) { + if (userRelatedResource[method.toLowerCase()]) { + return userRelatedResource[method.toLowerCase()]['operationId']; + } + } + + throw new Error('Could not find user operation matching org operation: ' + orgOperationName); + }; + + var freshLoginInProgress = []; + var reject = function(msg) { + for (var i = 0; i < freshLoginInProgress.length; ++i) { + freshLoginInProgress[i].deferred.reject({'data': {'message': msg}}); + } + freshLoginInProgress = []; + }; + + var retry = function() { + for (var i = 0; i < freshLoginInProgress.length; ++i) { + freshLoginInProgress[i].retry(); + } + freshLoginInProgress = []; + }; + + var freshLoginFailCheck = function(opName, opArgs) { + return function(resp) { + var deferred = $q.defer(); + + // If the error is a fresh login required, show the dialog. + // TODO: remove error_type (old style error) + var fresh_login_required = resp.data['title'] == 'fresh_login_required' || resp.data['error_type'] == 'fresh_login_required'; + if (resp.status == 401 && fresh_login_required) { + var retryOperation = function() { + apiService[opName].apply(apiService, opArgs).then(function(resp) { + deferred.resolve(resp); + }, function(resp) { + deferred.reject(resp); + }); + }; + + var verifyNow = function() { + if (!$('#freshPassword').val()) { + return; + } + + var info = { + 'password': $('#freshPassword').val() + }; + + $('#freshPassword').val(''); + + // Conduct the sign in of the user. + apiService.verifyUser(info).then(function() { + // On success, retry the operations. if it succeeds, then resolve the + // deferred promise with the result. Otherwise, reject the same. + retry(); + }, function(resp) { + // Reject with the sign in error. + reject('Invalid verification credentials'); + }); + }; + + // Add the retry call to the in progress list. If there is more than a single + // in progress call, we skip showing the dialog (since it has already been + // shown). + freshLoginInProgress.push({ + 'deferred': deferred, + 'retry': retryOperation + }) + + if (freshLoginInProgress.length > 1) { + return deferred.promise; + } + + var box = bootbox.dialog({ + "message": 'It has been more than a few minutes since you last logged in, ' + + 'so please verify your password to perform this sensitive operation:' + + '
' + + '' + + '
', + "title": 'Please Verify', + "buttons": { + "verify": { + "label": "Verify", + "className": "btn-success btn-continue", + "callback": verifyNow + }, + "close": { + "label": "Cancel", + "className": "btn-default", + "callback": function() { + reject('Verification canceled') + } + } + } + }); + + box.bind('shown.bs.modal', function(){ + box.find("input").focus(); + box.find("form").submit(function() { + if (!$('#freshPassword').val()) { return; } + + box.modal('hide'); + verifyNow(); + }); + }); + + // Return a new promise. We'll accept or reject it based on the result + // of the login. + return deferred.promise; + } + + // Otherwise, we just 'raise' the error via the reject method on the promise. + return $q.reject(resp); + }; + }; + + var buildMethodsForOperation = function(operation, method, path, resourceMap) { + var operationName = operation['operationId']; + var urlPath = path['x-path']; + + // Add the operation itself. + apiService[operationName] = function(opt_options, opt_parameters, opt_background, opt_forceget) { + var one = Restangular.one(buildUrl(urlPath, opt_parameters)); + if (opt_background) { + one.withHttpConfig({ + 'ignoreLoadingBar': true + }); + } + + var opObj = one[opt_forceget ? 'get' : 'custom' + method.toUpperCase()](opt_options); + + // If the operation requires_fresh_login, then add a specialized error handler that + // will defer the operation's result if sudo is requested. + if (operation['x-requires-fresh-login']) { + opObj = opObj.catch(freshLoginFailCheck(operationName, arguments)); + } + return opObj; + }; + + // If the method for the operation is a GET, add an operationAsResource method. + if (method == 'get') { + apiService[operationName + 'AsResource'] = function(opt_parameters, opt_background) { + var getMethod = apiService[operationName]; + return getResource(getMethod, operation, opt_parameters, opt_background); + }; + } + + // If the operation has a user-related operation, then make a generic operation for this operation + // that can call both the user and the organization versions of the operation, depending on the + // parameters given. + if (path['x-user-related']) { + var userOperationName = getMatchingUserOperationName(operationName, method, resourceMap[path['x-user-related']]); + var genericOperationName = getGenericOperationName(userOperationName); + apiService[genericOperationName] = function(orgname, opt_options, opt_parameters, opt_background) { + if (orgname) { + if (orgname.name) { + orgname = orgname.name; + } + + var params = jQuery.extend({'orgname' : orgname}, opt_parameters || {}, opt_background); + return apiService[operationName](opt_options, params); + } else { + return apiService[userOperationName](opt_options, opt_parameters, opt_background); + } + }; + } + }; + + + var allowedMethods = ['get', 'post', 'put', 'delete']; + var resourceMap = {}; + var forEachOperation = function(callback) { + for (var path in window.__endpoints) { + if (!window.__endpoints.hasOwnProperty(path)) { + continue; + } + + for (var method in window.__endpoints[path]) { + if (!window.__endpoints[path].hasOwnProperty(method)) { + continue; + } + + if (allowedMethods.indexOf(method.toLowerCase()) < 0) { continue; } + callback(window.__endpoints[path][method], method, window.__endpoints[path]); + } + } + }; + + // Build the map of resource names to their objects. + forEachOperation(function(operation, method, path) { + resourceMap[path['x-name']] = path; + }); + + // Construct the methods for each API endpoint. + forEachOperation(function(operation, method, path) { + buildMethodsForOperation(operation, method, path, resourceMap); + }); + + apiService.getErrorMessage = function(resp, defaultMessage) { + var message = defaultMessage; + if (resp && resp['data']) { + //TODO: remove error_message and error_description (old style error) + message = resp['data']['detail'] || resp['data']['error_message'] || resp['data']['message'] || resp['data']['error_description'] || message; + } + + return message; + }; + + apiService.errorDisplay = function(defaultMessage, opt_handler) { + return function(resp) { + var message = apiService.getErrorMessage(resp, defaultMessage); + if (opt_handler) { + var handlerMessage = opt_handler(resp); + if (handlerMessage) { + message = handlerMessage; + } + } + + message = UtilService.stringToHTML(message); + bootbox.dialog({ + "message": message, + "title": defaultMessage || 'Request Failure', + "buttons": { + "close": { + "label": "Close", + "className": "btn-primary" + } + } + }); + }; + }; + + return apiService; +}]); diff --git a/config_app/js/services/container-service.js b/config_app/js/services/container-service.js new file mode 100644 index 000000000..0ce3d106b --- /dev/null +++ b/config_app/js/services/container-service.js @@ -0,0 +1,43 @@ +/** + * Helper service for working with the registry's container. Only works in enterprise. + */ +angular.module('quay-config') + .factory('ContainerService', ['ApiService', '$timeout', 'Restangular', + function(ApiService, $timeout, Restangular) { + var containerService = {}; + containerService.restartContainer = function(callback) { + ApiService.errorDisplay('Removed Endpoint. This error should never be seen.') + }; + + containerService.scheduleStatusCheck = function(callback, opt_config) { + $timeout(function() { + containerService.checkStatus(callback, opt_config); + }, 2000); + }; + + containerService.checkStatus = function(callback, opt_config) { + var errorHandler = function(resp) { + if (resp.status == 404 || resp.status == 502 || resp.status == -1) { + // Container has not yet come back up, so we schedule another check. + containerService.scheduleStatusCheck(callback, opt_config); + return; + } + + return ApiService.errorDisplay('Cannot load status. Please report this to support')(resp); + }; + + // If config is specified, override the API base URL from this point onward. + // TODO(jschorr): Find a better way than this. This is safe, since this will only be called + // for a restart, but it is still ugly. + if (opt_config && opt_config['SERVER_HOSTNAME']) { + var scheme = opt_config['PREFERRED_URL_SCHEME'] || 'http'; + var baseUrl = scheme + '://' + opt_config['SERVER_HOSTNAME'] + '/api/v1/'; + Restangular.setBaseUrl(baseUrl); + } + + ApiService.scRegistryStatus(null, null, /* background */true) + .then(callback, errorHandler); + }; + + return containerService; + }]); diff --git a/config_app/js/services/cookie-service.js b/config_app/js/services/cookie-service.js new file mode 100644 index 000000000..af904124a --- /dev/null +++ b/config_app/js/services/cookie-service.js @@ -0,0 +1,23 @@ +/** + * Helper service for working with cookies. + */ +angular.module('quay-config').factory('CookieService', ['$cookies', function($cookies) { + var cookieService = {}; + cookieService.putPermanent = function(name, value) { + document.cookie = escape(name) + "=" + escape(value) + "; expires=Fri, 31 Dec 9999 23:59:59 GMT; path=/"; + }; + + cookieService.putSession = function(name, value) { + $cookies.put(name, value); + }; + + cookieService.clear = function(name) { + $cookies.remove(name); + }; + + cookieService.get = function(name) { + return $cookies.get(name); + }; + + return cookieService; +}]); diff --git a/config_app/js/services/features-config.js b/config_app/js/services/features-config.js new file mode 100644 index 000000000..e655f32bf --- /dev/null +++ b/config_app/js/services/features-config.js @@ -0,0 +1,91 @@ +/** + * Feature flags. + */ +angular.module('quay-config').factory('Features', [function() { + if (!window.__features) { + return {}; + } + + var features = window.__features; + features.getFeature = function(name, opt_defaultValue) { + var value = features[name]; + if (value == null) { + return opt_defaultValue; + } + return value; + }; + + features.hasFeature = function(name) { + return !!features.getFeature(name); + }; + + features.matchesFeatures = function(list) { + for (var i = 0; i < list.length; ++i) { + var value = features.getFeature(list[i]); + if (!value) { + return false; + } + } + return true; + }; + + return features; +}]); + +/** + * Application configuration. + */ +angular.module('quay-config').factory('Config', ['Features', function(Features) { + if (!window.__config) { + return {}; + } + + var config = window.__config; + config.getDomain = function() { + return config['SERVER_HOSTNAME']; + }; + + config.getHost = function(opt_auth) { + var auth = opt_auth; + if (auth) { + auth = auth + '@'; + } + + return config['PREFERRED_URL_SCHEME'] + '://' + auth + config['SERVER_HOSTNAME']; + }; + + config.getHttp = function() { + return config['PREFERRED_URL_SCHEME']; + }; + + config.getUrl = function(opt_path) { + var path = opt_path || ''; + return config['PREFERRED_URL_SCHEME'] + '://' + config['SERVER_HOSTNAME'] + path; + }; + + config.getValue = function(name, opt_defaultValue) { + var value = config[name]; + if (value == null) { + return opt_defaultValue; + } + return value; + }; + + config.getEnterpriseLogo = function(opt_defaultValue) { + if (!config.ENTERPRISE_LOGO_URL) { + if (opt_defaultValue) { + return opt_defaultValue; + } + + if (Features.BILLING) { + return '/static/img/quay-horizontal-color.svg'; + } else { + return '/static/img/QuayEnterprise_horizontal_color.svg'; + } + } + + return config.ENTERPRISE_LOGO_URL; + }; + + return config; +}]); \ No newline at end of file diff --git a/config_app/js/services/user-service.js b/config_app/js/services/user-service.js new file mode 100644 index 000000000..3203e9185 --- /dev/null +++ b/config_app/js/services/user-service.js @@ -0,0 +1,193 @@ +import * as Raven from 'raven-js'; + + +/** + * Service which monitors the current user session and provides methods for returning information + * about the user. + */ +angular.module('quay-config') + .factory('UserService', ['ApiService', 'CookieService', '$rootScope', 'Config', '$location', '$timeout', + +function(ApiService, CookieService, $rootScope, Config, $location, $timeout) { + var userResponse = { + verified: false, + anonymous: true, + username: null, + email: null, + organizations: [], + logins: [], + beforeload: true + }; + + var userService = {}; + var _EXTERNAL_SERVICES = ['ldap', 'jwtauthn', 'keystone', 'dex']; + + userService.hasEverLoggedIn = function() { + return CookieService.get('quay.loggedin') == 'true'; + }; + + userService.updateUserIn = function(scope, opt_callback) { + scope.$watch(function () { return userService.currentUser(); }, function (currentUser) { + if (currentUser) { + $timeout(function(){ + scope.user = currentUser; + if (opt_callback) { + opt_callback(currentUser); + } + }, 0, false); + }; + }, true); + }; + + userService.load = function(opt_callback) { + var handleUserResponse = function(loadedUser) { + userResponse = loadedUser; + + if (!userResponse.anonymous) { + if (Config.MIXPANEL_KEY) { + try { + mixpanel.identify(userResponse.username); + mixpanel.people.set({ + '$email': userResponse.email, + '$username': userResponse.username, + 'verified': userResponse.verified + }); + mixpanel.people.set_once({ + '$created': new Date() + }) + } catch (e) { + window.console.log(e); + } + } + + if (Config.MARKETO_MUNCHKIN_ID && userResponse['marketo_user_hash']) { + var associateLeadBody = {'Email': userResponse.email}; + if (window.Munchkin !== undefined) { + try { + Munchkin.munchkinFunction( + 'associateLead', + associateLeadBody, + userResponse['marketo_user_hash'] + ); + } catch (e) { + } + } else { + window.__quay_munchkin_queue.push([ + 'associateLead', + associateLeadBody, + userResponse['marketo_user_hash'] + ]); + } + } + + if (window.Raven !== undefined) { + try { + Raven.setUser({ + email: userResponse.email, + id: userResponse.username + }); + } catch (e) { + window.console.log(e); + } + } + + CookieService.putPermanent('quay.loggedin', 'true'); + } else { + if (window.Raven !== undefined) { + Raven.setUser(); + } + } + + // If the loaded user has a prompt, redirect them to the update page. + if (loadedUser.prompts && loadedUser.prompts.length) { + $location.path('/updateuser'); + return; + } + + if (opt_callback) { + opt_callback(loadedUser); + } + }; + + ApiService.getLoggedInUser().then(function(loadedUser) { + handleUserResponse(loadedUser); + }, function() { + handleUserResponse({'anonymous': true}); + }); + }; + + userService.isOrganization = function(name) { + return !!userService.getOrganization(name); + }; + + userService.getOrganization = function(name) { + if (!userResponse || !userResponse.organizations) { return null; } + for (var i = 0; i < userResponse.organizations.length; ++i) { + var org = userResponse.organizations[i]; + if (org.name == name) { + return org; + } + } + + return null; + }; + + userService.isNamespaceAdmin = function(namespace) { + if (namespace == userResponse.username) { + return true; + } + + var org = userService.getOrganization(namespace); + if (!org) { + return false; + } + + return org.is_org_admin; + }; + + userService.isKnownNamespace = function(namespace) { + if (namespace == userResponse.username) { + return true; + } + + var org = userService.getOrganization(namespace); + return !!org; + }; + + userService.getNamespace = function(namespace) { + var org = userService.getOrganization(namespace); + if (org) { + return org; + } + + if (namespace == userResponse.username) { + return userResponse; + } + + return null; + }; + + userService.getCLIUsername = function() { + if (!userResponse) { + return null; + } + + var externalUsername = null; + userResponse.logins.forEach(function(login) { + if (_EXTERNAL_SERVICES.indexOf(login.service) >= 0) { + externalUsername = login.service_identifier; + } + }); + + return externalUsername || userResponse.username; + }; + + userService.currentUser = function() { + return userResponse; + }; + + // Update the user in the root scope. + userService.updateUserIn($rootScope); + + return userService; +}]); diff --git a/config_app/js/services/util-service.js b/config_app/js/services/util-service.js new file mode 100644 index 000000000..34f0a4191 --- /dev/null +++ b/config_app/js/services/util-service.js @@ -0,0 +1,83 @@ +/** + * Service which exposes various utility methods. + */ +angular.module('quay-config').factory('UtilService', ['$sanitize', + function($sanitize) { + var utilService = {}; + + var adBlockEnabled = null; + + utilService.isAdBlockEnabled = function(callback) { + if (adBlockEnabled !== null) { + callback(adBlockEnabled); + return; + } + + if(typeof blockAdBlock === 'undefined') { + callback(true); + return; + } + + var bab = new BlockAdBlock({ + checkOnLoad: false, + resetOnEnd: true + }); + + bab.onDetected(function() { adBlockEnabled = true; callback(true); }); + bab.onNotDetected(function() { adBlockEnabled = false; callback(false); }); + bab.check(); + }; + + utilService.isEmailAddress = function(val) { + var emailRegex = /^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$/; + return emailRegex.test(val); + }; + + utilService.escapeHtmlString = function(text) { + var textStr = (text || '').toString(); + var adjusted = textStr.replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); + + return adjusted; + }; + + utilService.stringToHTML = function(text) { + text = utilService.escapeHtmlString(text); + text = text.replace(/\n/g, '
'); + return text; + }; + + utilService.getRestUrl = function(args) { + var url = ''; + for (var i = 0; i < arguments.length; ++i) { + if (i > 0) { + url += '/'; + } + url += encodeURI(arguments[i]) + } + return url; + }; + + utilService.textToSafeHtml = function(text) { + return $sanitize(utilService.escapeHtmlString(text)); + }; + + return utilService; + }]) + .factory('CoreDialog', [() => { + var service = {}; + service['fatal'] = function(title, message) { + bootbox.dialog({ + "title": title, + "message": "
" + message, + "buttons": {}, + "className": "co-dialog fatal-error", + "closeButton": false + }); + }; + + return service; + }]); diff --git a/config_app/js/setup/setup.component.js b/config_app/js/setup/setup.component.js new file mode 100644 index 000000000..3e828214c --- /dev/null +++ b/config_app/js/setup/setup.component.js @@ -0,0 +1,332 @@ +import * as URI from 'urijs'; +const templateUrl = require('./setup.html'); + +(function() { + /** + * The Setup page provides a nice GUI walkthrough experience for setting up Quay Enterprise. + */ + + angular.module('quay-config').directive('setup', () => { + const directiveDefinitionObject = { + priority: 1, + templateUrl, + replace: true, + transclude: true, + restrict: 'C', + scope: { + 'isActive': '=isActive', + 'configurationSaved': '&configurationSaved' + }, + controller: SetupCtrl, + }; + + return directiveDefinitionObject; + }) + + function SetupCtrl($scope, $timeout, ApiService, Features, UserService, ContainerService, CoreDialog) { + // if (!Features.SUPER_USERS) { + // return; + // } + + $scope.HOSTNAME_REGEX = '^[a-zA-Z-0-9_\.\-]+(:[0-9]+)?$'; + + $scope.validateHostname = function(hostname) { + if (hostname.indexOf('127.0.0.1') == 0 || hostname.indexOf('localhost') == 0) { + return 'Please specify a non-localhost hostname. "localhost" will refer to the container, not your machine.' + } + + return null; + }; + + // Note: The values of the enumeration are important for isStepFamily. For example, + // *all* states under the "configuring db" family must start with "config-db". + $scope.States = { + // Loading the state of the product. + 'LOADING': 'loading', + + // The configuration directory is missing. + 'MISSING_CONFIG_DIR': 'missing-config-dir', + + // The config.yaml exists but it is invalid. + 'INVALID_CONFIG': 'config-invalid', + + // DB is being configured. + 'CONFIG_DB': 'config-db', + + // DB information is being validated. + 'VALIDATING_DB': 'config-db-validating', + + // DB information is being saved to the config. + 'SAVING_DB': 'config-db-saving', + + // A validation error occurred with the database. + 'DB_ERROR': 'config-db-error', + + // Database is being setup. + 'DB_SETUP': 'setup-db', + + // Database setup has succeeded. + 'DB_SETUP_SUCCESS': 'setup-db-success', + + // An error occurred when setting up the database. + 'DB_SETUP_ERROR': 'setup-db-error', + + // The container is being restarted for the database changes. + 'DB_RESTARTING': 'setup-db-restarting', + + // A superuser is being configured. + 'CREATE_SUPERUSER': 'create-superuser', + + // The superuser is being created. + 'CREATING_SUPERUSER': 'create-superuser-creating', + + // An error occurred when setting up the superuser. + 'SUPERUSER_ERROR': 'create-superuser-error', + + // The superuser was created successfully. + 'SUPERUSER_CREATED': 'create-superuser-created', + + // General configuration is being setup. + 'CONFIG': 'config', + + // The configuration is fully valid. + 'VALID_CONFIG': 'valid-config', + + // The container is being restarted for the configuration changes. + 'CONFIG_RESTARTING': 'config-restarting', + + // The product is ready for use. + 'READY': 'ready' + } + + $scope.csrf_token = window.__token; + $scope.currentStep = $scope.States.LOADING; + $scope.errors = {}; + $scope.stepProgress = []; + $scope.hasSSL = false; + $scope.hostname = null; + $scope.currentConfig = null; + + $scope.currentState = { + 'hasDatabaseSSLCert': false + }; + + $scope.$watch('currentStep', function(currentStep) { + $scope.stepProgress = $scope.getProgress(currentStep); + + switch (currentStep) { + case $scope.States.CONFIG: + $('#setupModal').modal('hide'); + break; + + case $scope.States.MISSING_CONFIG_DIR: + $scope.showMissingConfigDialog(); + break; + + case $scope.States.INVALID_CONFIG: + $scope.showInvalidConfigDialog(); + break; + + case $scope.States.DB_SETUP: + $scope.performDatabaseSetup(); + // Fall-through. + + case $scope.States.CREATE_SUPERUSER: + case $scope.States.DB_RESTARTING: + case $scope.States.CONFIG_DB: + case $scope.States.VALID_CONFIG: + case $scope.States.READY: + $('#setupModal').modal({ + keyboard: false, + backdrop: 'static' + }); + break; + } + }); + + $scope.restartContainer = function(state) { + $scope.currentStep = state; + ContainerService.restartContainer(function() { + $scope.checkStatus() + }); + }; + + $scope.showSuperuserPanel = function() { + $('#setupModal').modal('hide'); + var prefix = $scope.hasSSL ? 'https' : 'http'; + var hostname = $scope.hostname; + if (!hostname) { + hostname = document.location.hostname; + if (document.location.port) { + hostname = hostname + ':' + document.location.port; + } + } + + window.location = prefix + '://' + hostname + '/superuser'; + }; + + $scope.configurationSaved = function(config) { + $scope.hasSSL = config['PREFERRED_URL_SCHEME'] == 'https'; + $scope.hostname = config['SERVER_HOSTNAME']; + $scope.currentConfig = config; + + $scope.currentStep = $scope.States.VALID_CONFIG; + }; + + $scope.getProgress = function(step) { + var isStep = $scope.isStep; + var isStepFamily = $scope.isStepFamily; + var States = $scope.States; + + return [ + isStepFamily(step, States.CONFIG_DB), + isStepFamily(step, States.DB_SETUP), + isStep(step, States.DB_RESTARTING), + isStepFamily(step, States.CREATE_SUPERUSER), + isStep(step, States.CONFIG), + isStep(step, States.VALID_CONFIG), + isStep(step, States.CONFIG_RESTARTING), + isStep(step, States.READY) + ]; + }; + + $scope.isStepFamily = function(step, family) { + if (!step) { return false; } + return step.indexOf(family) == 0; + }; + + $scope.isStep = function(step) { + for (var i = 1; i < arguments.length; ++i) { + if (arguments[i] == step) { + return true; + } + } + return false; + }; + + $scope.beginSetup = function() { + $scope.currentStep = $scope.States.CONFIG_DB; + }; + + $scope.showInvalidConfigDialog = function() { + var message = "The config.yaml file found in conf/stack could not be parsed." + var title = "Invalid configuration file"; + CoreDialog.fatal(title, message); + }; + + + $scope.showMissingConfigDialog = function() { + var message = "A volume should be mounted into the container at /conf/stack: " + + "

docker run -v /path/to/config:/conf/stack
" + + "
Once fixed, restart the container. For more information, " + + "" + + "Read the Setup Guide" + + var title = "Missing configuration volume"; + CoreDialog.fatal(title, message); + }; + + $scope.parseDbUri = function(value) { + if (!value) { return null; } + + // Format: mysql+pymysql://:@/ + var uri = URI(value); + return { + 'kind': uri.protocol(), + 'username': uri.username(), + 'password': uri.password(), + 'server': uri.host(), + 'database': uri.path() ? uri.path().substr(1) : '' + }; + }; + + $scope.serializeDbUri = function(fields) { + if (!fields['server']) { return ''; } + if (!fields['database']) { return ''; } + + var uri = URI(); + try { + uri = uri && uri.host(fields['server']); + uri = uri && uri.protocol(fields['kind']); + uri = uri && uri.username(fields['username']); + uri = uri && uri.password(fields['password']); + uri = uri && uri.path('/' + (fields['database'] || '')); + uri = uri && uri.toString(); + } catch (ex) { + return ''; + } + + return uri; + }; + + $scope.createSuperUser = function() { + $scope.currentStep = $scope.States.CREATING_SUPERUSER; + ApiService.scCreateInitialSuperuser($scope.superUser, null).then(function(resp) { + UserService.load(); + $scope.checkStatus(); + }, function(resp) { + $scope.currentStep = $scope.States.SUPERUSER_ERROR; + $scope.errors.SuperuserCreationError = ApiService.getErrorMessage(resp, 'Could not create superuser'); + }); + }; + + $scope.performDatabaseSetup = function() { + $scope.currentStep = $scope.States.DB_SETUP; + ApiService.scSetupDatabase(null, null).then(function(resp) { + if (resp['error']) { + $scope.currentStep = $scope.States.DB_SETUP_ERROR; + $scope.errors.DatabaseSetupError = resp['error']; + } else { + $scope.currentStep = $scope.States.DB_SETUP_SUCCESS; + } + }, ApiService.errorDisplay('Could not setup database. Please report this to support.')) + }; + + $scope.validateDatabase = function() { + $scope.currentStep = $scope.States.VALIDATING_DB; + $scope.databaseInvalid = null; + + var data = { + 'config': { + 'DB_URI': $scope.databaseUri + }, + 'hostname': window.location.host + }; + + if ($scope.currentState.hasDatabaseSSLCert) { + data['config']['DB_CONNECTION_ARGS'] = { + 'ssl': { + 'ca': 'conf/stack/database.pem' + } + }; + } + + var params = { + 'service': 'database' + }; + + ApiService.scValidateConfig(data, params).then(function(resp) { + var status = resp.status; + + if (status) { + $scope.currentStep = $scope.States.SAVING_DB; + ApiService.scUpdateConfig(data, null).then(function(resp) { + $scope.checkStatus(); + }, ApiService.errorDisplay('Cannot update config. Please report this to support')); + } else { + $scope.currentStep = $scope.States.DB_ERROR; + $scope.errors.DatabaseValidationError = resp.reason; + } + }, ApiService.errorDisplay('Cannot validate database. Please report this to support')); + }; + + $scope.checkStatus = function() { + ContainerService.checkStatus(function(resp) { + $scope.currentStep = resp['status']; + }, $scope.currentConfig); + }; + + // Load the initial status. + $scope.checkStatus(); + }; +})(); diff --git a/config_app/js/setup/setup.html b/config_app/js/setup/setup.html new file mode 100644 index 000000000..b40e1d78d --- /dev/null +++ b/config_app/js/setup/setup.html @@ -0,0 +1,311 @@ +
+
+
+
+
+ + Quay Enterprise Setup +
+ +
+
+ + + + + + + + + + + +
Almost done!
+
Configure your Redis database and other settings below
+
+ + + +
+
+
+
+ + + +
diff --git a/config_app/static/lib/angular-file-upload.min.js b/config_app/static/lib/angular-file-upload.min.js new file mode 100644 index 000000000..b9d0196f7 --- /dev/null +++ b/config_app/static/lib/angular-file-upload.min.js @@ -0,0 +1,2 @@ +/*! 1.4.0 */ +!function(){var a=angular.module("angularFileUpload",[]);a.service("$upload",["$http","$timeout",function(a,b){function c(c){c.method=c.method||"POST",c.headers=c.headers||{},c.transformRequest=c.transformRequest||function(b,c){return window.ArrayBuffer&&b instanceof window.ArrayBuffer?b:a.defaults.transformRequest[0](b,c)},window.XMLHttpRequest.__isShim&&(c.headers.__setXHR_=function(){return function(a){a&&(c.__XHR=a,c.xhrFn&&c.xhrFn(a),a.upload.addEventListener("progress",function(a){c.progress&&b(function(){c.progress&&c.progress(a)})},!1),a.upload.addEventListener("load",function(a){a.lengthComputable&&c.progress&&c.progress(a)},!1))}});var d=a(c);return d.progress=function(a){return c.progress=a,d},d.abort=function(){return c.__XHR&&b(function(){c.__XHR.abort()}),d},d.xhr=function(a){return c.xhrFn=a,d},d.then=function(a,b){return function(d,e,f){c.progress=f||c.progress;var g=b.apply(a,[d,e,f]);return g.abort=a.abort,g.progress=a.progress,g.xhr=a.xhr,g.then=a.then,g}}(d,d.then),d}this.upload=function(b){b.headers=b.headers||{},b.headers["Content-Type"]=void 0,b.transformRequest=b.transformRequest||a.defaults.transformRequest;var d=new FormData,e=b.transformRequest,f=b.data;return b.transformRequest=function(a,c){if(f)if(b.formDataAppender)for(var d in f){var g=f[d];b.formDataAppender(a,d,g)}else for(var d in f){var g=f[d];if("function"==typeof e)g=e(g,c);else for(var h=0;h0||navigator.msMaxTouchPoints>0)&&d.bind("touchend",function(a){a.preventDefault(),a.target.click()})}}]),a.directive("ngFileDropAvailable",["$parse","$timeout",function(a,b){return function(c,d,e){if("draggable"in document.createElement("span")){var f=a(e.ngFileDropAvailable);b(function(){f(c)})}}}]),a.directive("ngFileDrop",["$parse","$timeout",function(a,b){return function(c,d,e){function f(a,b){if(b.isDirectory){var c=b.createReader();i++,c.readEntries(function(b){for(var c=0;c0&&j[0].webkitGetAsEntry)for(var k=0;k + + + + + + + + + + + + + + + + + + + + + + + {% for script_path in main_scripts %} + + {% endfor %} + Config app + + +
+

What is my purpose

+

You make tarballs

+
+ +
+
+
+ + diff --git a/config_app/webpack.config.js b/config_app/webpack.config.js new file mode 100644 index 000000000..7ba392c27 --- /dev/null +++ b/config_app/webpack.config.js @@ -0,0 +1,61 @@ +const webpack = require('webpack'); +const path = require('path'); + +let config = { + entry: { + configapp: "./js/main.ts" + }, + output: { + path: path.resolve(__dirname, "static/build"), + filename: '[name]-quay-frontend.bundle.js', + }, + resolve: { + extensions: [".ts", ".js"], + modules: [ + // Allows us to use the top-level node modules + path.resolve(__dirname, '../node_modules'), + path.resolve(__dirname, '../static/css/') + ] + }, + externals: { + angular: "angular", + jquery: "$", + // moment: "moment", + // "raven-js": "Raven", + }, + module: { + rules: [ + { + test: /\.ts$/, + use: ["ts-loader"], + exclude: /node_modules/ + }, + { + test: /\.css$/, + use: [ + "style-loader", + "css-loader?minimize=true", + ], + }, + { + test: /\.html$/, + use: [ + 'ngtemplate-loader?relativeTo=' + (path.resolve(__dirname)), + 'html-loader', + ] + }, + ] + }, + plugins: [ + // Replace references to global variables with associated modules + new webpack.ProvidePlugin({ + FileSaver: 'file-saver', + angular: "angular", + $: "jquery", + // moment: "moment", + }), + ], + devtool: "cheap-module-source-map", +}; + +module.exports = config; diff --git a/endpoints/api/discovery.py b/endpoints/api/discovery.py index 001396888..66e7c74a3 100644 --- a/endpoints/api/discovery.py +++ b/endpoints/api/discovery.py @@ -1,3 +1,4 @@ +# TODO to extract the discovery stuff into a util at the top level and then use it both here and config_app discovery.py """ API discovery information. """ import re diff --git a/local-config-app.sh b/local-config-app.sh new file mode 100755 index 000000000..9c6192200 --- /dev/null +++ b/local-config-app.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +cat << "EOF" + __ __ + / \ / \ ______ _ _ __ __ __ _____ ____ _ _ _____ _____ _____ + / /\ / /\ \ / __ \ | | | | / \ \ \ / / / ____| / __ \ | \ | | | ___| |_ _| / ____| +/ / / / \ \ | | | | | | | | / /\ \ \ / | | | | | | | \| | | |__ | | | | _ +\ \ \ \ / / | |__| | | |__| | / ____ \ | | | |____ | |__| | | . ` | | __| _| |_ | |__| | + \ \/ \ \/ / \_ ___/ \____/ /_/ \_\ |_| \_____| \____/ |_| \_| |_| |_____| \_____| + \__/ \__/ \ \__ + \___\ by Red Hat + + Build, Store, and Distribute your Containers + + +EOF + +goreman -basedir "config_app" start diff --git a/package.json b/package.json index 5ed245cb3..6462e2d76 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,11 @@ "watch": "npm run clean && webpack --watch", "lint": "tslint --type-check -p tsconfig.json -e **/*.spec.ts", "analyze": "NODE_ENV=production webpack --profile --json | awk '{if(NR>1)print}' > static/build/stats.json && webpack-bundle-analyzer --mode static -r static/build/report.html static/build/stats.json", - "clean": "rm -f static/build/*" + "clean": "rm -f static/build/*", + + "clean-config-app": "rm -f config_app/static/build/*", + "watch-config-app": "npm run clean-config-app && cd config_app && webpack --watch", + "build-config-app": "npm run clean-config-app && cd config_app && NODE_ENV=production webpack --progress" }, "repository": { "type": "git", diff --git a/quay-entrypoint.sh b/quay-entrypoint.sh index a67c2c5eb..24c58b500 100755 --- a/quay-entrypoint.sh +++ b/quay-entrypoint.sh @@ -3,8 +3,8 @@ MODE="$1" display_usage() { - echo "This script takes one arguments." - echo -e "\nUsage: ${0} \n" + echo "This script takes one argument." + echo -e "\nUsage: ${0} \n" } if [[ "${MODE}" = "help" ]] @@ -32,6 +32,15 @@ EOF venv/bin/python -m displayversion case "$MODE" in + "shell") + echo "Entering shell mode" + /bin/bash + exit 0 + ;; + "config") + echo "Entering config mode, only copying config-app entrypoints" + cp -r ${QUAYDIR}/config_app/init/service/* /etc/service + ;; "interactive") echo "Copying $MODE files" cp -r ${QUAYCONF}/init/service/interactive/* /etc/service diff --git a/scripts/ci b/scripts/ci index 681e76faa..38b04afea 100755 --- a/scripts/ci +++ b/scripts/ci @@ -45,7 +45,7 @@ clean_cache() { quay_run() { - docker run --net=host -e TEST_DATABASE_URI -ti "${IMAGE}:${IMAGE_TAG}" "$@" + docker run --net=host --entrypoint "/bin/bash" -e TEST_DATABASE_URI -ti "${IMAGE}:${IMAGE_TAG}" -c "$*" } diff --git a/util/config/validators/validate_secscan.py b/util/config/validators/validate_secscan.py index 9f7c2d67f..c690e68d4 100644 --- a/util/config/validators/validate_secscan.py +++ b/util/config/validators/validate_secscan.py @@ -1,6 +1,6 @@ import time -from boot import setup_jwt_proxy +# from boot import setup_jwt_proxy from util.secscan.api import SecurityScannerAPI from util.config.validators import BaseValidator, ConfigValidationException @@ -23,9 +23,9 @@ class SecurityScannerValidator(BaseValidator): api = SecurityScannerAPI(config, None, server_hostname, client=client, skip_validation=True, uri_creator=uri_creator) - if not is_testing: + # if not is_testing: # Generate a temporary Quay key to use for signing the outgoing requests. - setup_jwt_proxy() + # setup_jwt_proxy() # We have to wait for JWT proxy to restart with the newly generated key. max_tries = 5 diff --git a/util/config/validators/validate_torrent.py b/util/config/validators/validate_torrent.py index d8137e12c..567285f0b 100644 --- a/util/config/validators/validate_torrent.py +++ b/util/config/validators/validate_torrent.py @@ -3,7 +3,9 @@ import logging from hashlib import sha1 from util.config.validators import BaseValidator, ConfigValidationException -from util.registry.torrent import jwt_from_infohash +# Temporarily removed because registry.torrent imports from app, add encoded_jwt back once extracted +# TODO(jschorr): extract app from following package and re-enable jwt_from_infohash in validator +# from util.registry.torrent import jwt_from_infohash logger = logging.getLogger(__name__) @@ -31,8 +33,8 @@ class BittorrentValidator(BaseValidator): 'port': 80, } - encoded_jwt = jwt_from_infohash(params['info_hash']) - params['jwt'] = encoded_jwt + # encoded_jwt = jwt_from_infohash(params['info_hash']) + # params['jwt'] = encoded_jwt resp = client.get(announce_url, timeout=5, params=params) logger.debug('Got tracker response: %s: %s', resp.status_code, resp.text)