Add some basic endpoints to the config app backend
rename files to avoid overlap with quay app
This commit is contained in:
parent
d080ca2cc6
commit
c378e408ef
39 changed files with 3095 additions and 384 deletions
16
config_app/config_util/config/__init__.py
Normal file
16
config_app/config_util/config/__init__.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
from config_util.config.fileprovider import FileConfigProvider
|
||||
from config_util.config.testprovider import TestConfigProvider
|
||||
from config_util.config.k8sprovider import KubernetesConfigProvider
|
||||
|
||||
|
||||
def get_config_provider(config_volume, yaml_filename, py_filename, testing=False, kubernetes=False):
|
||||
""" Loads and returns the config provider for the current environment. """
|
||||
if testing:
|
||||
return TestConfigProvider()
|
||||
|
||||
if kubernetes:
|
||||
return KubernetesConfigProvider(config_volume, yaml_filename, py_filename)
|
||||
|
||||
return FileConfigProvider(config_volume, yaml_filename, py_filename)
|
||||
|
||||
|
71
config_app/config_util/config/basefileprovider.py
Normal file
71
config_app/config_util/config/basefileprovider.py
Normal file
|
@ -0,0 +1,71 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
from config_util.config.baseprovider import (BaseProvider, import_yaml, export_yaml,
|
||||
CannotWriteConfigException)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseFileProvider(BaseProvider):
|
||||
""" Base implementation of the config provider that reads the data from the file system. """
|
||||
def __init__(self, config_volume, yaml_filename, py_filename):
|
||||
self.config_volume = config_volume
|
||||
self.yaml_filename = yaml_filename
|
||||
self.py_filename = py_filename
|
||||
|
||||
self.yaml_path = os.path.join(config_volume, yaml_filename)
|
||||
self.py_path = os.path.join(config_volume, py_filename)
|
||||
|
||||
def update_app_config(self, app_config):
|
||||
if os.path.exists(self.py_path):
|
||||
logger.debug('Applying config file: %s', self.py_path)
|
||||
app_config.from_pyfile(self.py_path)
|
||||
|
||||
if os.path.exists(self.yaml_path):
|
||||
logger.debug('Applying config file: %s', self.yaml_path)
|
||||
import_yaml(app_config, self.yaml_path)
|
||||
|
||||
def get_config(self):
|
||||
if not self.config_exists():
|
||||
return None
|
||||
|
||||
config_obj = {}
|
||||
import_yaml(config_obj, self.yaml_path)
|
||||
return config_obj
|
||||
|
||||
def config_exists(self):
|
||||
return self.volume_file_exists(self.yaml_filename)
|
||||
|
||||
def volume_exists(self):
|
||||
return os.path.exists(self.config_volume)
|
||||
|
||||
def volume_file_exists(self, filename):
|
||||
return os.path.exists(os.path.join(self.config_volume, filename))
|
||||
|
||||
def get_volume_file(self, filename, mode='r'):
|
||||
return open(os.path.join(self.config_volume, filename), mode=mode)
|
||||
|
||||
def get_volume_path(self, directory, filename):
|
||||
return os.path.join(directory, filename)
|
||||
|
||||
def list_volume_directory(self, path):
|
||||
dirpath = os.path.join(self.config_volume, path)
|
||||
if not os.path.exists(dirpath):
|
||||
return None
|
||||
|
||||
if not os.path.isdir(dirpath):
|
||||
return None
|
||||
|
||||
return os.listdir(dirpath)
|
||||
|
||||
def requires_restart(self, app_config):
|
||||
file_config = self.get_config()
|
||||
if not file_config:
|
||||
return False
|
||||
|
||||
for key in file_config:
|
||||
if app_config.get(key) != file_config[key]:
|
||||
return True
|
||||
|
||||
return False
|
128
config_app/config_util/config/baseprovider.py
Normal file
128
config_app/config_util/config/baseprovider.py
Normal file
|
@ -0,0 +1,128 @@
|
|||
import logging
|
||||
import yaml
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from six import add_metaclass
|
||||
|
||||
from jsonschema import validate, ValidationError
|
||||
|
||||
from config_util.config.schema import CONFIG_SCHEMA
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CannotWriteConfigException(Exception):
|
||||
""" Exception raised when the config cannot be written. """
|
||||
pass
|
||||
|
||||
|
||||
class SetupIncompleteException(Exception):
|
||||
""" Exception raised when attempting to verify config that has not yet been setup. """
|
||||
pass
|
||||
|
||||
|
||||
def import_yaml(config_obj, config_file):
|
||||
with open(config_file) as f:
|
||||
c = yaml.safe_load(f)
|
||||
if not c:
|
||||
logger.debug('Empty YAML config file')
|
||||
return
|
||||
|
||||
if isinstance(c, str):
|
||||
raise Exception('Invalid YAML config file: ' + str(c))
|
||||
|
||||
for key in c.iterkeys():
|
||||
if key.isupper():
|
||||
config_obj[key] = c[key]
|
||||
|
||||
if config_obj.get('SETUP_COMPLETE', True):
|
||||
try:
|
||||
validate(config_obj, CONFIG_SCHEMA)
|
||||
except ValidationError:
|
||||
# TODO: Change this into a real error
|
||||
logger.exception('Could not validate config schema')
|
||||
else:
|
||||
logger.debug('Skipping config schema validation because setup is not complete')
|
||||
|
||||
return config_obj
|
||||
|
||||
|
||||
def get_yaml(config_obj):
|
||||
return yaml.safe_dump(config_obj, encoding='utf-8', allow_unicode=True)
|
||||
|
||||
|
||||
def export_yaml(config_obj, config_file):
|
||||
try:
|
||||
with open(config_file, 'w') as f:
|
||||
f.write(get_yaml(config_obj))
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
|
||||
@add_metaclass(ABCMeta)
|
||||
class BaseProvider(object):
|
||||
""" A configuration provider helps to load, save, and handle config override in the application.
|
||||
"""
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def update_app_config(self, app_config):
|
||||
""" Updates the given application config object with the loaded override config. """
|
||||
|
||||
@abstractmethod
|
||||
def get_config(self):
|
||||
""" Returns the contents of the config override file, or None if none. """
|
||||
|
||||
@abstractmethod
|
||||
def save_config(self, config_object):
|
||||
""" Updates the contents of the config override file to those given. """
|
||||
|
||||
@abstractmethod
|
||||
def config_exists(self):
|
||||
""" Returns true if a config override file exists in the config volume. """
|
||||
|
||||
@abstractmethod
|
||||
def volume_exists(self):
|
||||
""" Returns whether the config override volume exists. """
|
||||
|
||||
@abstractmethod
|
||||
def volume_file_exists(self, filename):
|
||||
""" Returns whether the file with the given name exists under the config override volume. """
|
||||
|
||||
@abstractmethod
|
||||
def get_volume_file(self, filename, mode='r'):
|
||||
""" Returns a Python file referring to the given name under the config override volume. """
|
||||
|
||||
@abstractmethod
|
||||
def write_volume_file(self, filename, contents):
|
||||
""" Writes the given contents to the config override volumne, with the given filename. """
|
||||
|
||||
@abstractmethod
|
||||
def remove_volume_file(self, filename):
|
||||
""" Removes the config override volume file with the given filename. """
|
||||
|
||||
@abstractmethod
|
||||
def list_volume_directory(self, path):
|
||||
""" Returns a list of strings representing the names of the files found in the config override
|
||||
directory under the given path. If the path doesn't exist, returns None.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def save_volume_file(self, filename, flask_file):
|
||||
""" Saves the given flask file to the config override volume, with the given
|
||||
filename.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def requires_restart(self, app_config):
|
||||
""" If true, the configuration loaded into memory for the app does not match that on disk,
|
||||
indicating that this container requires a restart.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_volume_path(self, directory, filename):
|
||||
""" Helper for constructing file paths, which may differ between providers. For example,
|
||||
kubernetes can't have subfolders in configmaps """
|
60
config_app/config_util/config/fileprovider.py
Normal file
60
config_app/config_util/config/fileprovider.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
from config_util.config.baseprovider import export_yaml, CannotWriteConfigException
|
||||
from config_util.config.basefileprovider import BaseFileProvider
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _ensure_parent_dir(filepath):
|
||||
""" Ensures that the parent directory of the given file path exists. """
|
||||
try:
|
||||
parentpath = os.path.abspath(os.path.join(filepath, os.pardir))
|
||||
if not os.path.isdir(parentpath):
|
||||
os.makedirs(parentpath)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
|
||||
class FileConfigProvider(BaseFileProvider):
|
||||
""" Implementation of the config provider that reads and writes the data
|
||||
from/to the file system. """
|
||||
def __init__(self, config_volume, yaml_filename, py_filename):
|
||||
super(FileConfigProvider, self).__init__(config_volume, yaml_filename, py_filename)
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return 'file'
|
||||
|
||||
def save_config(self, config_obj):
|
||||
export_yaml(config_obj, self.yaml_path)
|
||||
|
||||
def write_volume_file(self, filename, contents):
|
||||
filepath = os.path.join(self.config_volume, filename)
|
||||
_ensure_parent_dir(filepath)
|
||||
|
||||
try:
|
||||
with open(filepath, mode='w') as f:
|
||||
f.write(contents)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
return filepath
|
||||
|
||||
def remove_volume_file(self, filename):
|
||||
filepath = os.path.join(self.config_volume, filename)
|
||||
os.remove(filepath)
|
||||
|
||||
def save_volume_file(self, filename, flask_file):
|
||||
filepath = os.path.join(self.config_volume, filename)
|
||||
_ensure_parent_dir(filepath)
|
||||
|
||||
# Write the file.
|
||||
try:
|
||||
flask_file.save(filepath)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
return filepath
|
170
config_app/config_util/config/k8sprovider.py
Normal file
170
config_app/config_util/config/k8sprovider.py
Normal file
|
@ -0,0 +1,170 @@
|
|||
import os
|
||||
import logging
|
||||
import json
|
||||
import base64
|
||||
import time
|
||||
|
||||
from requests import Request, Session
|
||||
|
||||
from config_util.config.baseprovider import CannotWriteConfigException, get_yaml
|
||||
from config_util.config.basefileprovider import BaseFileProvider
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
KUBERNETES_API_HOST = os.environ.get('KUBERNETES_SERVICE_HOST', '')
|
||||
port = os.environ.get('KUBERNETES_SERVICE_PORT')
|
||||
if port:
|
||||
KUBERNETES_API_HOST += ':' + port
|
||||
|
||||
SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
|
||||
|
||||
QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise')
|
||||
QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret')
|
||||
|
||||
class KubernetesConfigProvider(BaseFileProvider):
|
||||
""" Implementation of the config provider that reads and writes configuration
|
||||
data from a Kubernetes Secret. """
|
||||
def __init__(self, config_volume, yaml_filename, py_filename):
|
||||
super(KubernetesConfigProvider, self).__init__(config_volume, yaml_filename, py_filename)
|
||||
|
||||
# Load the service account token from the local store.
|
||||
if not os.path.exists(SERVICE_ACCOUNT_TOKEN_PATH):
|
||||
raise Exception('Cannot load Kubernetes service account token')
|
||||
|
||||
with open(SERVICE_ACCOUNT_TOKEN_PATH, 'r') as f:
|
||||
self._service_token = f.read()
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return 'k8s'
|
||||
|
||||
def get_volume_path(self, directory, filename):
|
||||
# NOTE: Overridden to ensure we don't have subdirectories, which aren't supported
|
||||
# in Kubernetes secrets.
|
||||
return "_".join([directory.rstrip('/'), filename])
|
||||
|
||||
def volume_file_exists(self, filename):
|
||||
# NOTE: Overridden because we don't have subdirectories, which aren't supported
|
||||
# in Kubernetes secrets.
|
||||
secret = self._lookup_secret()
|
||||
if not secret or not secret.get('data'):
|
||||
return False
|
||||
return filename in secret['data']
|
||||
|
||||
def list_volume_directory(self, path):
|
||||
# NOTE: Overridden because we don't have subdirectories, which aren't supported
|
||||
# in Kubernetes secrets.
|
||||
secret = self._lookup_secret()
|
||||
|
||||
if not secret:
|
||||
return []
|
||||
|
||||
paths = []
|
||||
for filename in secret.get('data', {}):
|
||||
if filename.startswith(path):
|
||||
paths.append(filename[len(path) + 1:])
|
||||
return paths
|
||||
|
||||
def save_config(self, config_obj):
|
||||
self._update_secret_file(self.yaml_filename, get_yaml(config_obj))
|
||||
|
||||
def write_volume_file(self, filename, contents):
|
||||
try:
|
||||
self._update_secret_file(filename, contents)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
def remove_volume_file(self, filename):
|
||||
try:
|
||||
self._update_secret_file(filename, None)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
def save_volume_file(self, filename, flask_file):
|
||||
filepath = super(KubernetesConfigProvider, self).save_volume_file(filename, flask_file)
|
||||
with open(filepath, 'r') as f:
|
||||
self.write_volume_file(filename, f.read())
|
||||
|
||||
def _assert_success(self, response):
|
||||
if response.status_code != 200:
|
||||
logger.error('Kubernetes API call failed with response: %s => %s', response.status_code,
|
||||
response.text)
|
||||
raise CannotWriteConfigException('Kubernetes API call failed: %s' % response.text)
|
||||
|
||||
def _update_secret_file(self, filename, value=None):
|
||||
# Check first that the namespace for Quay Enterprise exists. If it does not, report that
|
||||
# as an error, as it seems to be a common issue.
|
||||
namespace_url = 'namespaces/%s' % (QE_NAMESPACE)
|
||||
response = self._execute_k8s_api('GET', namespace_url)
|
||||
if response.status_code // 100 != 2:
|
||||
msg = 'A Kubernetes namespace with name `%s` must be created to save config' % QE_NAMESPACE
|
||||
raise CannotWriteConfigException(msg)
|
||||
|
||||
# Check if the secret exists. If not, then we create an empty secret and then update the file
|
||||
# inside.
|
||||
secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
|
||||
secret = self._lookup_secret()
|
||||
if secret is None:
|
||||
self._assert_success(self._execute_k8s_api('POST', secret_url, {
|
||||
"kind": "Secret",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": QE_CONFIG_SECRET
|
||||
},
|
||||
"data": {}
|
||||
}))
|
||||
|
||||
# Update the secret to reflect the file change.
|
||||
secret['data'] = secret.get('data', {})
|
||||
|
||||
if value is not None:
|
||||
secret['data'][filename] = base64.b64encode(value)
|
||||
else:
|
||||
secret['data'].pop(filename)
|
||||
|
||||
self._assert_success(self._execute_k8s_api('PUT', secret_url, secret))
|
||||
|
||||
# Wait until the local mounted copy of the secret has been updated, as
|
||||
# this is an eventual consistency operation, but the caller expects immediate
|
||||
# consistency.
|
||||
while True:
|
||||
matching_files = set()
|
||||
for secret_filename, encoded_value in secret['data'].iteritems():
|
||||
expected_value = base64.b64decode(encoded_value)
|
||||
try:
|
||||
with self.get_volume_file(secret_filename) as f:
|
||||
contents = f.read()
|
||||
|
||||
if contents == expected_value:
|
||||
matching_files.add(secret_filename)
|
||||
except IOError:
|
||||
continue
|
||||
|
||||
if matching_files == set(secret['data'].keys()):
|
||||
break
|
||||
|
||||
# Sleep for a second and then try again.
|
||||
time.sleep(1)
|
||||
|
||||
def _lookup_secret(self):
|
||||
secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
|
||||
response = self._execute_k8s_api('GET', secret_url)
|
||||
if response.status_code != 200:
|
||||
return None
|
||||
return json.loads(response.text)
|
||||
|
||||
def _execute_k8s_api(self, method, relative_url, data=None):
|
||||
headers = {
|
||||
'Authorization': 'Bearer ' + self._service_token
|
||||
}
|
||||
|
||||
if data:
|
||||
headers['Content-Type'] = 'application/json'
|
||||
|
||||
data = json.dumps(data) if data else None
|
||||
session = Session()
|
||||
url = 'https://%s/api/v1/%s' % (KUBERNETES_API_HOST, relative_url)
|
||||
|
||||
request = Request(method, url, data=data, headers=headers)
|
||||
return session.send(request.prepare(), verify=False, timeout=2)
|
914
config_app/config_util/config/schema.py
Normal file
914
config_app/config_util/config/schema.py
Normal file
|
@ -0,0 +1,914 @@
|
|||
# INTERNAL_ONLY_PROPERTIES defines the properties in the config that, while settable, should
|
||||
# not be documented for external users. These will generally be used for internal test or only
|
||||
# given to customers when they have been briefed on the side effects of using them.
|
||||
INTERNAL_ONLY_PROPERTIES = {
|
||||
'__module__',
|
||||
'__doc__',
|
||||
'create_transaction',
|
||||
|
||||
'TESTING',
|
||||
'SEND_FILE_MAX_AGE_DEFAULT',
|
||||
|
||||
'REPLICATION_QUEUE_NAME',
|
||||
'DOCKERFILE_BUILD_QUEUE_NAME',
|
||||
'CHUNK_CLEANUP_QUEUE_NAME',
|
||||
'SECSCAN_NOTIFICATION_QUEUE_NAME',
|
||||
'SECURITY_SCANNER_ISSUER_NAME',
|
||||
'NOTIFICATION_QUEUE_NAME',
|
||||
'NAMESPACE_GC_QUEUE_NAME',
|
||||
|
||||
'FEATURE_BILLING',
|
||||
'FEATURE_SUPPORT_CHAT',
|
||||
'BILLING_TYPE',
|
||||
|
||||
'INSTANCE_SERVICE_KEY_LOCATION',
|
||||
'INSTANCE_SERVICE_KEY_REFRESH',
|
||||
'INSTANCE_SERVICE_KEY_SERVICE',
|
||||
'INSTANCE_SERVICE_KEY_KID_LOCATION',
|
||||
'INSTANCE_SERVICE_KEY_EXPIRATION',
|
||||
'UNAPPROVED_SERVICE_KEY_TTL_SEC',
|
||||
'EXPIRED_SERVICE_KEY_TTL_SEC',
|
||||
'REGISTRY_JWT_AUTH_MAX_FRESH_S',
|
||||
|
||||
'BITTORRENT_FILENAME_PEPPER',
|
||||
'BITTORRENT_WEBSEED_LIFETIME',
|
||||
|
||||
'SERVICE_LOG_ACCOUNT_ID',
|
||||
'BUILDLOGS_OPTIONS',
|
||||
'LIBRARY_NAMESPACE',
|
||||
'STAGGER_WORKERS',
|
||||
'QUEUE_WORKER_METRICS_REFRESH_SECONDS',
|
||||
'PUSH_TEMP_TAG_EXPIRATION_SEC',
|
||||
'GARBAGE_COLLECTION_FREQUENCY',
|
||||
'PAGE_TOKEN_KEY',
|
||||
'BUILD_MANAGER',
|
||||
'JWTPROXY_AUDIENCE',
|
||||
'SYSTEM_SERVICE_BLACKLIST',
|
||||
'JWTPROXY_SIGNER',
|
||||
'SECURITY_SCANNER_INDEXING_MIN_ID',
|
||||
'STATIC_SITE_BUCKET',
|
||||
'LABEL_KEY_RESERVED_PREFIXES',
|
||||
'TEAM_SYNC_WORKER_FREQUENCY',
|
||||
'DOCUMENTATION_METADATA',
|
||||
'DOCUMENTATION_LOCATION',
|
||||
'JSONIFY_PRETTYPRINT_REGULAR',
|
||||
'SYSTEM_LOGS_FILE',
|
||||
'SYSTEM_LOGS_PATH',
|
||||
'SYSTEM_SERVICES_PATH',
|
||||
'TUF_GUN_PREFIX',
|
||||
'LOGGING_LEVEL',
|
||||
'SIGNED_GRANT_EXPIRATION_SEC',
|
||||
'PROMETHEUS_AGGREGATOR_URL',
|
||||
'DB_TRANSACTION_FACTORY',
|
||||
'NOTIFICATION_SEND_TIMEOUT',
|
||||
'QUEUE_METRICS_TYPE',
|
||||
'MAIL_FAIL_SILENTLY',
|
||||
'LOCAL_OAUTH_HANDLER',
|
||||
'USE_CDN',
|
||||
'ANALYTICS_TYPE',
|
||||
'LAST_ACCESSED_UPDATE_THRESHOLD_S',
|
||||
|
||||
'EXCEPTION_LOG_TYPE',
|
||||
'SENTRY_DSN',
|
||||
'SENTRY_PUBLIC_DSN',
|
||||
|
||||
'BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT',
|
||||
'THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT',
|
||||
|
||||
'SECURITY_SCANNER_ENDPOINT_BATCH',
|
||||
'SECURITY_SCANNER_API_TIMEOUT_SECONDS',
|
||||
'SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS',
|
||||
'SECURITY_SCANNER_ENGINE_VERSION_TARGET',
|
||||
'SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS',
|
||||
'SECURITY_SCANNER_API_VERSION',
|
||||
|
||||
'DATA_MODEL_CACHE_CONFIG',
|
||||
|
||||
# TODO: move this into the schema once we support signing in QE.
|
||||
'FEATURE_SIGNING',
|
||||
'TUF_SERVER',
|
||||
}
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
'type': 'object',
|
||||
'description': 'Schema for Quay configuration',
|
||||
'required': [
|
||||
'PREFERRED_URL_SCHEME',
|
||||
'SERVER_HOSTNAME',
|
||||
'DB_URI',
|
||||
'AUTHENTICATION_TYPE',
|
||||
'DISTRIBUTED_STORAGE_CONFIG',
|
||||
'BUILDLOGS_REDIS',
|
||||
'USER_EVENTS_REDIS',
|
||||
'DISTRIBUTED_STORAGE_PREFERENCE',
|
||||
'DEFAULT_TAG_EXPIRATION',
|
||||
'TAG_EXPIRATION_OPTIONS',
|
||||
],
|
||||
'properties': {
|
||||
# Hosting.
|
||||
'PREFERRED_URL_SCHEME': {
|
||||
'type': 'string',
|
||||
'description': 'The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`',
|
||||
'enum': ['http', 'https'],
|
||||
'x-example': 'https',
|
||||
},
|
||||
'SERVER_HOSTNAME': {
|
||||
'type': 'string',
|
||||
'description': 'The URL at which Quay is accessible, without the scheme.',
|
||||
'x-example': 'quay.io',
|
||||
},
|
||||
'EXTERNAL_TLS_TERMINATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'If TLS is supported, but terminated at a layer before Quay, must be true.',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# User-visible configuration.
|
||||
'REGISTRY_TITLE': {
|
||||
'type': 'string',
|
||||
'description': 'If specified, the long-form title for the registry. Defaults to `Quay Enterprise`.',
|
||||
'x-example': 'Corp Container Service',
|
||||
},
|
||||
'REGISTRY_TITLE_SHORT': {
|
||||
'type': 'string',
|
||||
'description': 'If specified, the short-form title for the registry. Defaults to `Quay Enterprise`.',
|
||||
'x-example': 'CCS',
|
||||
},
|
||||
'CONTACT_INFO': {
|
||||
'type': 'array',
|
||||
'minItems': 1,
|
||||
'uniqueItems': True,
|
||||
'description': 'If specified, contact information to display on the contact page. ' +
|
||||
'If only a single piece of contact information is specified, the contact footer will link directly.',
|
||||
'items': [
|
||||
{
|
||||
'type': 'string',
|
||||
'pattern': '^mailto:(.)+$',
|
||||
'x-example': 'mailto:support@quay.io',
|
||||
'description': 'Adds a link to send an e-mail',
|
||||
},
|
||||
{
|
||||
'type': 'string',
|
||||
'pattern': '^irc://(.)+$',
|
||||
'x-example': 'irc://chat.freenode.net:6665/quay',
|
||||
'description': 'Adds a link to visit an IRC chat room',
|
||||
},
|
||||
{
|
||||
'type': 'string',
|
||||
'pattern': '^tel:(.)+$',
|
||||
'x-example': 'tel:+1-888-930-3475',
|
||||
'description': 'Adds a link to call a phone number',
|
||||
},
|
||||
{
|
||||
'type': 'string',
|
||||
'pattern': '^http(s)?://(.)+$',
|
||||
'x-example': 'https://twitter.com/quayio',
|
||||
'description': 'Adds a link to a defined URL',
|
||||
},
|
||||
],
|
||||
},
|
||||
'SEARCH_RESULTS_PER_PAGE' : {
|
||||
'type': 'number',
|
||||
'description': 'Number of results returned per page by search page. Defaults to 10',
|
||||
'x-example': 10,
|
||||
},
|
||||
'SEARCH_MAX_RESULT_PAGE_COUNT' : {
|
||||
'type': 'number',
|
||||
'description': 'Maximum number of pages the user can paginate in search before they are limited. Defaults to 10',
|
||||
'x-example': 10,
|
||||
},
|
||||
|
||||
# E-mail.
|
||||
'FEATURE_MAILING': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether emails are enabled. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
'MAIL_SERVER': {
|
||||
'type': 'string',
|
||||
'description': 'The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.',
|
||||
'x-example': 'smtp.somedomain.com',
|
||||
},
|
||||
'MAIL_USE_TLS': {
|
||||
'type': 'boolean',
|
||||
'description': 'If specified, whether to use TLS for sending e-mails.',
|
||||
'x-example': True,
|
||||
},
|
||||
'MAIL_PORT': {
|
||||
'type': 'number',
|
||||
'description': 'The SMTP port to use. If not specified, defaults to 587.',
|
||||
'x-example': 588,
|
||||
},
|
||||
'MAIL_USERNAME': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'The SMTP username to use when sending e-mails.',
|
||||
'x-example': 'myuser',
|
||||
},
|
||||
'MAIL_PASSWORD': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'The SMTP password to use when sending e-mails.',
|
||||
'x-example': 'mypassword',
|
||||
},
|
||||
'MAIL_DEFAULT_SENDER': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `support@quay.io`.',
|
||||
'x-example': 'support@myco.com',
|
||||
},
|
||||
|
||||
# Database.
|
||||
'DB_URI': {
|
||||
'type': 'string',
|
||||
'description': 'The URI at which to access the database, including any credentials.',
|
||||
'x-example': 'mysql+pymysql://username:password@dns.of.database/quay',
|
||||
'x-reference': 'https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495',
|
||||
},
|
||||
'DB_CONNECTION_ARGS': {
|
||||
'type': 'object',
|
||||
'description': 'If specified, connection arguments for the database such as timeouts and SSL.',
|
||||
'properties': {
|
||||
'threadlocals': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to use thread-local connections. Should *ALWAYS* be `true`'
|
||||
},
|
||||
'autorollback': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to use auto-rollback connections. Should *ALWAYS* be `true`'
|
||||
},
|
||||
'ssl': {
|
||||
'type': 'object',
|
||||
'description': 'SSL connection configuration',
|
||||
'properties': {
|
||||
'ca': {
|
||||
'type': 'string',
|
||||
'description': '*Absolute container path* to the CA certificate to use for SSL connections',
|
||||
'x-example': 'conf/stack/ssl-ca-cert.pem',
|
||||
},
|
||||
},
|
||||
'required': ['ca'],
|
||||
},
|
||||
},
|
||||
'required': ['threadlocals', 'autorollback'],
|
||||
},
|
||||
'ALLOW_PULLS_WITHOUT_STRICT_LOGGING': {
|
||||
'type': 'boolean',
|
||||
'description': 'If true, pulls in which the pull audit log entry cannot be written will ' +
|
||||
'still succeed. Useful if the database can fallback into a read-only state ' +
|
||||
'and it is desired for pulls to continue during that time. Defaults to False.',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Storage.
|
||||
'FEATURE_STORAGE_REPLICATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to automatically replicate between storage engines. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'FEATURE_PROXY_STORAGE': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'MAXIMUM_LAYER_SIZE': {
|
||||
'type': 'string',
|
||||
'description': 'Maximum allowed size of an image layer. Defaults to 20G',
|
||||
'x-example': '100G',
|
||||
'pattern': '^[0-9]+(G|M)$',
|
||||
},
|
||||
'DISTRIBUTED_STORAGE_CONFIG': {
|
||||
'type': 'object',
|
||||
'description': 'Configuration for storage engine(s) to use in Quay. Each key is a unique ID' +
|
||||
' for a storage engine, with the value being a tuple of the type and ' +
|
||||
' configuration for that engine.',
|
||||
'x-example': {
|
||||
'local_storage': ['LocalStorage', {'storage_path': 'some/path/'}],
|
||||
},
|
||||
'items': {
|
||||
'type': 'array',
|
||||
},
|
||||
},
|
||||
'DISTRIBUTED_STORAGE_PREFERENCE': {
|
||||
'type': 'array',
|
||||
'description': 'The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to ' +
|
||||
'use. A preferred engine means it is first checked for pullig and images are ' +
|
||||
'pushed to it.',
|
||||
'items': {
|
||||
'type': 'string',
|
||||
'uniqueItems': True,
|
||||
},
|
||||
'x-example': ['s3_us_east', 's3_us_west'],
|
||||
},
|
||||
'DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS': {
|
||||
'type': 'array',
|
||||
'description': 'The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose ' +
|
||||
'images should be fully replicated, by default, to all other storage engines.',
|
||||
'items': {
|
||||
'type': 'string',
|
||||
'uniqueItems': True,
|
||||
},
|
||||
'x-example': ['s3_us_east', 's3_us_west'],
|
||||
},
|
||||
'USERFILES_LOCATION': {
|
||||
'type': 'string',
|
||||
'description': 'ID of the storage engine in which to place user-uploaded files',
|
||||
'x-example': 's3_us_east',
|
||||
},
|
||||
'USERFILES_PATH': {
|
||||
'type': 'string',
|
||||
'description': 'Path under storage in which to place user-uploaded files',
|
||||
'x-example': 'userfiles',
|
||||
},
|
||||
'ACTION_LOG_ARCHIVE_LOCATION': {
|
||||
'type': 'string',
|
||||
'description': 'If action log archiving is enabled, the storage engine in which to place the ' +
|
||||
'archived data.',
|
||||
'x-example': 's3_us_east',
|
||||
},
|
||||
'ACTION_LOG_ARCHIVE_PATH': {
|
||||
'type': 'string',
|
||||
'description': 'If action log archiving is enabled, the path in storage in which to place the ' +
|
||||
'archived data.',
|
||||
'x-example': 'archives/actionlogs',
|
||||
},
|
||||
'LOG_ARCHIVE_LOCATION': {
|
||||
'type': 'string',
|
||||
'description': 'If builds are enabled, the storage engine in which to place the ' +
|
||||
'archived build logs.',
|
||||
'x-example': 's3_us_east',
|
||||
},
|
||||
'LOG_ARCHIVE_PATH': {
|
||||
'type': 'string',
|
||||
'description': 'If builds are enabled, the path in storage in which to place the ' +
|
||||
'archived build logs.',
|
||||
'x-example': 'archives/buildlogs',
|
||||
},
|
||||
|
||||
# Authentication.
|
||||
'AUTHENTICATION_TYPE': {
|
||||
'type': 'string',
|
||||
'description': 'The authentication engine to use for credential authentication.',
|
||||
'x-example': 'Database',
|
||||
'enum': ['Database', 'LDAP', 'JWT', 'Keystone', 'OIDC'],
|
||||
},
|
||||
'SUPER_USERS': {
|
||||
'type': 'array',
|
||||
'description': 'Quay usernames of those users to be granted superuser privileges',
|
||||
'uniqueItems': True,
|
||||
'items': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
'DIRECT_OAUTH_CLIENTID_WHITELIST': {
|
||||
'type': 'array',
|
||||
'description': 'A list of client IDs of *Quay-managed* applications that are allowed ' +
|
||||
'to perform direct OAuth approval without user approval.',
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html',
|
||||
'uniqueItems': True,
|
||||
'items': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
|
||||
# Redis.
|
||||
'BUILDLOGS_REDIS': {
|
||||
'type': 'object',
|
||||
'description': 'Connection information for Redis for build logs caching',
|
||||
'required': ['host'],
|
||||
'properties': {
|
||||
'host': {
|
||||
'type': 'string',
|
||||
'description': 'The hostname at which Redis is accessible',
|
||||
'x-example': 'my.redis.cluster',
|
||||
},
|
||||
'port': {
|
||||
'type': 'number',
|
||||
'description': 'The port at which Redis is accessible',
|
||||
'x-example': 1234,
|
||||
},
|
||||
'password': {
|
||||
'type': 'string',
|
||||
'description': 'The password to connect to the Redis instance',
|
||||
'x-example': 'mypassword',
|
||||
},
|
||||
},
|
||||
},
|
||||
'USER_EVENTS_REDIS': {
|
||||
'type': 'object',
|
||||
'description': 'Connection information for Redis for user event handling',
|
||||
'required': ['host'],
|
||||
'properties': {
|
||||
'host': {
|
||||
'type': 'string',
|
||||
'description': 'The hostname at which Redis is accessible',
|
||||
'x-example': 'my.redis.cluster',
|
||||
},
|
||||
'port': {
|
||||
'type': 'number',
|
||||
'description': 'The port at which Redis is accessible',
|
||||
'x-example': 1234,
|
||||
},
|
||||
'password': {
|
||||
'type': 'string',
|
||||
'description': 'The password to connect to the Redis instance',
|
||||
'x-example': 'mypassword',
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
# OAuth configuration.
|
||||
'GITHUB_LOGIN_CONFIG': {
|
||||
'type': ['object', 'null'],
|
||||
'description': 'Configuration for using GitHub (Enterprise) as an external login provider',
|
||||
'required': ['CLIENT_ID', 'CLIENT_SECRET'],
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-auth.html',
|
||||
'properties': {
|
||||
'GITHUB_ENDPOINT': {
|
||||
'type': 'string',
|
||||
'description': 'The endpoint of the GitHub (Enterprise) being hit',
|
||||
'x-example': 'https://github.com/',
|
||||
},
|
||||
'API_ENDPOINT': {
|
||||
'type': 'string',
|
||||
'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com',
|
||||
'x-example': 'https://api.github.com/',
|
||||
},
|
||||
'CLIENT_ID': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_TRIGGER_CONFIG',
|
||||
'x-example': '0e8dbe15c4c7630b6780',
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
|
||||
},
|
||||
'CLIENT_SECRET': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client secret for this Quay instance',
|
||||
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
|
||||
},
|
||||
'ORG_RESTRICT': {
|
||||
'type': 'boolean',
|
||||
'description': 'If true, only users within the organization whitelist can login using this provider',
|
||||
'x-example': True,
|
||||
},
|
||||
'ALLOWED_ORGANIZATIONS': {
|
||||
'type': 'array',
|
||||
'description': 'The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option',
|
||||
'uniqueItems': True,
|
||||
'items': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
'BITBUCKET_TRIGGER_CONFIG': {
|
||||
'type': ['object', 'null'],
|
||||
'description': 'Configuration for using BitBucket for build triggers',
|
||||
'required': ['CONSUMER_KEY', 'CONSUMER_SECRET'],
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bitbucket-build.html',
|
||||
'properties': {
|
||||
'CONSUMER_KEY': {
|
||||
'type': 'string',
|
||||
'description': 'The registered consumer key (client ID) for this Quay instance',
|
||||
'x-example': '0e8dbe15c4c7630b6780',
|
||||
},
|
||||
'CONSUMER_SECRET': {
|
||||
'type': 'string',
|
||||
'description': 'The registered consumer secret (client secret) for this Quay instance',
|
||||
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
|
||||
},
|
||||
},
|
||||
},
|
||||
'GITHUB_TRIGGER_CONFIG': {
|
||||
'type': ['object', 'null'],
|
||||
'description': 'Configuration for using GitHub (Enterprise) for build triggers',
|
||||
'required': ['GITHUB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'],
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-build.html',
|
||||
'properties': {
|
||||
'GITHUB_ENDPOINT': {
|
||||
'type': 'string',
|
||||
'description': 'The endpoint of the GitHub (Enterprise) being hit',
|
||||
'x-example': 'https://github.com/',
|
||||
},
|
||||
'API_ENDPOINT': {
|
||||
'type': 'string',
|
||||
'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com',
|
||||
'x-example': 'https://api.github.com/',
|
||||
},
|
||||
'CLIENT_ID': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_LOGIN_CONFIG',
|
||||
'x-example': '0e8dbe15c4c7630b6780',
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
|
||||
},
|
||||
'CLIENT_SECRET': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client secret for this Quay instance',
|
||||
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
|
||||
},
|
||||
},
|
||||
},
|
||||
'GOOGLE_LOGIN_CONFIG': {
|
||||
'type': ['object', 'null'],
|
||||
'description': 'Configuration for using Google for external authentication',
|
||||
'required': ['CLIENT_ID', 'CLIENT_SECRET'],
|
||||
'properties': {
|
||||
'CLIENT_ID': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client ID for this Quay instance',
|
||||
'x-example': '0e8dbe15c4c7630b6780',
|
||||
},
|
||||
'CLIENT_SECRET': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client secret for this Quay instance',
|
||||
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
|
||||
},
|
||||
},
|
||||
},
|
||||
'GITLAB_TRIGGER_CONFIG': {
|
||||
'type': ['object', 'null'],
|
||||
'description': 'Configuration for using Gitlab (Enterprise) for external authentication',
|
||||
'required': ['GITLAB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'],
|
||||
'properties': {
|
||||
'GITLAB_ENDPOINT': {
|
||||
'type': 'string',
|
||||
'description': 'The endpoint at which Gitlab(Enterprise) is running',
|
||||
'x-example': 'https://gitlab.com',
|
||||
},
|
||||
'CLIENT_ID': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client ID for this Quay instance',
|
||||
'x-example': '0e8dbe15c4c7630b6780',
|
||||
},
|
||||
'CLIENT_SECRET': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client secret for this Quay instance',
|
||||
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
# Health.
|
||||
'HEALTH_CHECKER': {
|
||||
'description': 'The configured health check.',
|
||||
'x-example': ('RDSAwareHealthCheck', {'access_key': 'foo', 'secret_key': 'bar'}),
|
||||
},
|
||||
|
||||
# Metrics.
|
||||
'PROMETHEUS_NAMESPACE': {
|
||||
'type': 'string',
|
||||
'description': 'The prefix applied to all exposed Prometheus metrics. Defaults to `quay`',
|
||||
'x-example': 'myregistry',
|
||||
},
|
||||
|
||||
# Misc configuration.
|
||||
'BLACKLIST_V2_SPEC': {
|
||||
'type': 'string',
|
||||
'description': 'The Docker CLI versions to which Quay will respond that V2 is *unsupported*. Defaults to `<1.6.0`',
|
||||
'x-reference': 'http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec',
|
||||
'x-example': '<1.8.0',
|
||||
},
|
||||
'USER_RECOVERY_TOKEN_LIFETIME': {
|
||||
'type': 'string',
|
||||
'description': 'The length of time a token for recovering a user accounts is valid. Defaults to 30m.',
|
||||
'x-example': '10m',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
'SESSION_COOKIE_SECURE': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether the `secure` property should be set on session cookies. ' +
|
||||
'Defaults to False. Recommended to be True for all installations using SSL.',
|
||||
'x-example': True,
|
||||
'x-reference': 'https://en.wikipedia.org/wiki/Secure_cookies',
|
||||
},
|
||||
'PUBLIC_NAMESPACES': {
|
||||
'type': 'array',
|
||||
'description': 'If a namespace is defined in the public namespace list, then it will appear on *all*' +
|
||||
' user\'s repository list pages, regardless of whether that user is a member of the namespace.' +
|
||||
' Typically, this is used by an enterprise customer in configuring a set of "well-known"' +
|
||||
' namespaces.',
|
||||
'uniqueItems': True,
|
||||
'items': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
'AVATAR_KIND': {
|
||||
'type': 'string',
|
||||
'description': 'The types of avatars to display, either generated inline (local) or Gravatar (gravatar)',
|
||||
'enum': ['local', 'gravatar'],
|
||||
},
|
||||
'V2_PAGINATION_SIZE': {
|
||||
'type': 'number',
|
||||
'description': 'The number of results returned per page in V2 registry APIs',
|
||||
'x-example': 100,
|
||||
},
|
||||
'ENABLE_HEALTH_DEBUG_SECRET': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'If specified, a secret that can be given to health endpoints to see full debug info when' +
|
||||
'not authenticated as a superuser',
|
||||
'x-example': 'somesecrethere',
|
||||
},
|
||||
'BROWSER_API_CALLS_XHR_ONLY': {
|
||||
'type': 'boolean',
|
||||
'description': 'If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True.',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Time machine and tag expiration settings.
|
||||
'FEATURE_CHANGE_TAG_EXPIRATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True.',
|
||||
'x-example': False,
|
||||
},
|
||||
'DEFAULT_TAG_EXPIRATION': {
|
||||
'type': 'string',
|
||||
'description': 'The default, configurable tag expiration time for time machine. Defaults to `2w`.',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
'TAG_EXPIRATION_OPTIONS': {
|
||||
'type': 'array',
|
||||
'description': 'The options that users can select for expiration of tags in their namespace (if enabled)',
|
||||
'items': {
|
||||
'type': 'string',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
},
|
||||
|
||||
# Team syncing.
|
||||
'FEATURE_TEAM_SYNCING': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone)',
|
||||
'x-example': True,
|
||||
},
|
||||
'TEAM_RESYNC_STALE_TIME': {
|
||||
'type': 'string',
|
||||
'description': 'If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m)',
|
||||
'x-example': '2h',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
'FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP': {
|
||||
'type': 'boolean',
|
||||
'description': 'If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False.',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Security scanning.
|
||||
'FEATURE_SECURITY_SCANNER': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to turn of/off the security scanner. Defaults to False',
|
||||
'x-example': False,
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/security-scanning.html',
|
||||
},
|
||||
'FEATURE_SECURITY_NOTIFICATIONS': {
|
||||
'type': 'boolean',
|
||||
'description': 'If the security scanner is enabled, whether to turn of/off security notificaitons. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'SECURITY_SCANNER_ENDPOINT' : {
|
||||
'type': 'string',
|
||||
'pattern': '^http(s)?://(.)+$',
|
||||
'description': 'The endpoint for the security scanner',
|
||||
'x-example': 'http://192.168.99.101:6060' ,
|
||||
},
|
||||
'SECURITY_SCANNER_INDEXING_INTERVAL': {
|
||||
'type': 'number',
|
||||
'description': 'The number of seconds between indexing intervals in the security scanner. Defaults to 30.',
|
||||
'x-example': 30,
|
||||
},
|
||||
|
||||
# Bittorrent support.
|
||||
'FEATURE_BITTORRENT': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to allow using Bittorrent-based pulls. Defaults to False',
|
||||
'x-example': False,
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bittorrent.html',
|
||||
},
|
||||
'BITTORRENT_PIECE_SIZE': {
|
||||
'type': 'number',
|
||||
'description': 'The bittorent piece size to use. If not specified, defaults to 512 * 1024.',
|
||||
'x-example': 512 * 1024,
|
||||
},
|
||||
'BITTORRENT_ANNOUNCE_URL': {
|
||||
'type': 'string',
|
||||
'pattern': '^http(s)?://(.)+$',
|
||||
'description': 'The URL of the announce endpoint on the bittorrent tracker',
|
||||
'x-example': 'https://localhost:6881/announce',
|
||||
},
|
||||
|
||||
# Build
|
||||
'FEATURE_GITHUB_BUILD': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to support GitHub build triggers. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'FEATURE_BITBUCKET_BUILD': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to support Bitbucket build triggers. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'FEATURE_GITLAB_BUILD': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to support GitLab build triggers. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'FEATURE_BUILD_SUPPORT': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to support Dockerfile build. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
'DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT': {
|
||||
'type': ['number', 'null'],
|
||||
'description': 'If not None, the default maximum number of builds that can be queued in a namespace.',
|
||||
'x-example': 20,
|
||||
},
|
||||
'SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD': {
|
||||
'type': ['number', 'null'],
|
||||
'description': 'If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled. Defaults to 5.',
|
||||
'x-example': 10,
|
||||
},
|
||||
'SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD': {
|
||||
'type': ['number', 'null'],
|
||||
'description': 'If not None, the number of successive failures that can occur before a build trigger is automatically disabled. Defaults to 100.',
|
||||
'x-example': 50,
|
||||
},
|
||||
|
||||
# Login
|
||||
'FEATURE_GITHUB_LOGIN': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether GitHub login is supported. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'FEATURE_GOOGLE_LOGIN': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether Google login is supported. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Recaptcha
|
||||
'FEATURE_RECAPTCHA': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether Recaptcha is necessary for user login and recovery. Defaults to False',
|
||||
'x-example': False,
|
||||
'x-reference': 'https://www.google.com/recaptcha/intro/',
|
||||
},
|
||||
'RECAPTCHA_SITE_KEY': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'If recaptcha is enabled, the site key for the Recaptcha service',
|
||||
},
|
||||
'RECAPTCHA_SECRET_KEY': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'If recaptcha is enabled, the secret key for the Recaptcha service',
|
||||
},
|
||||
|
||||
# External application tokens.
|
||||
'FEATURE_APP_SPECIFIC_TOKENS': {
|
||||
'type': 'boolean',
|
||||
'description': 'If enabled, users can create tokens for use by the Docker CLI. Defaults to True',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
'APP_SPECIFIC_TOKEN_EXPIRATION': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'The expiration for external app tokens. Defaults to None.',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
|
||||
'EXPIRED_APP_SPECIFIC_TOKEN_GC': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d.',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
|
||||
# Feature Flag: Permanent Sessions.
|
||||
'FEATURE_PERMANENT_SESSIONS': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether sessions are permanent. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Super User Support.
|
||||
'FEATURE_SUPER_USERS': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether super users are supported. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Anonymous Users.
|
||||
'FEATURE_ANONYMOUS_ACCESS': {
|
||||
'type': 'boolean',
|
||||
'description': ' Whether to allow anonymous users to browse and pull public repositories. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: User Creation.
|
||||
'FEATURE_USER_CREATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether users can be created (by non-super users). Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Invite Only User Creation.
|
||||
'FEATURE_INVITE_ONLY_USER_CREATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether users being created must be invited by another user. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Encrypted Basic Auth.
|
||||
'FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Direct Login.
|
||||
'FEATURE_DIRECT_LOGIN': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether users can directly login to the UI. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Advertising V2.
|
||||
'FEATURE_ADVERTISE_V2': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether the v2/ endpoint is visible. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Log Rotation.
|
||||
'FEATURE_ACTION_LOG_ROTATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether or not to rotate old action logs to storage. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: ACI Conversion.
|
||||
'FEATURE_ACI_CONVERSION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to enable conversion to ACIs. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Library Support.
|
||||
'FEATURE_LIBRARY_SUPPORT': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Require Team Invite.
|
||||
'FEATURE_REQUIRE_TEAM_INVITE': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to require invitations when adding a user to a team. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Collecting and Supporting Metadata.
|
||||
'FEATURE_USER_METADATA': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to collect and support user metadata. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Support App Registry.
|
||||
'FEATURE_APP_REGISTRY': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to enable support for App repositories. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Public Reposiotires in _catalog Endpoint.
|
||||
'FEATURE_PUBLIC_CATALOG': {
|
||||
'type': 'boolean',
|
||||
'description': 'If set to true, the _catalog endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Reader Build Logs.
|
||||
'FEATURE_READER_BUILD_LOGS': {
|
||||
'type': 'boolean',
|
||||
'description': 'If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Usernames Autocomplete.
|
||||
'FEATURE_PARTIAL_USER_AUTOCOMPLETE': {
|
||||
'type': 'boolean',
|
||||
'description': 'If set to true, autocompletion will apply to partial usernames. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: User log access.
|
||||
'FEATURE_USER_LOG_ACCESS': {
|
||||
'type': 'boolean',
|
||||
'description': 'If set to true, users will have access to audit logs for their namespace. Defaults to False',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: User renaming.
|
||||
'FEATURE_USER_RENAME': {
|
||||
'type': 'boolean',
|
||||
'description': 'If set to true, users can rename their own namespace. Defaults to False',
|
||||
'x-example': True,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
81
config_app/config_util/config/testprovider.py
Normal file
81
config_app/config_util/config/testprovider.py
Normal file
|
@ -0,0 +1,81 @@
|
|||
import json
|
||||
import io
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from config_util.config.baseprovider import BaseProvider
|
||||
|
||||
REAL_FILES = ['test/data/signing-private.gpg', 'test/data/signing-public.gpg', 'test/data/test.pem']
|
||||
|
||||
|
||||
class TestConfigProvider(BaseProvider):
|
||||
""" Implementation of the config provider for testing. Everything is kept in-memory instead on
|
||||
the real file system. """
|
||||
def __init__(self):
|
||||
self.clear()
|
||||
|
||||
def clear(self):
|
||||
self.files = {}
|
||||
self._config = {}
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return 'test'
|
||||
|
||||
def update_app_config(self, app_config):
|
||||
self._config = app_config
|
||||
|
||||
def get_config(self):
|
||||
if not 'config.yaml' in self.files:
|
||||
return None
|
||||
|
||||
return json.loads(self.files.get('config.yaml', '{}'))
|
||||
|
||||
def save_config(self, config_obj):
|
||||
self.files['config.yaml'] = json.dumps(config_obj)
|
||||
|
||||
def config_exists(self):
|
||||
return 'config.yaml' in self.files
|
||||
|
||||
def volume_exists(self):
|
||||
return True
|
||||
|
||||
def volume_file_exists(self, filename):
|
||||
if filename in REAL_FILES:
|
||||
return True
|
||||
|
||||
return filename in self.files
|
||||
|
||||
def save_volume_file(self, filename, flask_file):
|
||||
self.files[filename] = flask_file.read()
|
||||
|
||||
def write_volume_file(self, filename, contents):
|
||||
self.files[filename] = contents
|
||||
|
||||
def get_volume_file(self, filename, mode='r'):
|
||||
if filename in REAL_FILES:
|
||||
return open(filename, mode=mode)
|
||||
|
||||
return io.BytesIO(self.files[filename])
|
||||
|
||||
def remove_volume_file(self, filename):
|
||||
self.files.pop(filename, None)
|
||||
|
||||
def list_volume_directory(self, path):
|
||||
paths = []
|
||||
for filename in self.files:
|
||||
if filename.startswith(path):
|
||||
paths.append(filename[len(path)+1:])
|
||||
|
||||
return paths
|
||||
|
||||
def requires_restart(self, app_config):
|
||||
return False
|
||||
|
||||
def reset_for_test(self):
|
||||
self._config['SUPER_USERS'] = ['devtable']
|
||||
self.files = {}
|
||||
|
||||
def get_volume_path(self, directory, filename):
|
||||
return os.path.join(directory, filename)
|
||||
|
Reference in a new issue