Add some basic endpoints to the config app backend
rename files to avoid overlap with quay app
This commit is contained in:
parent
d080ca2cc6
commit
c378e408ef
39 changed files with 3095 additions and 384 deletions
|
@ -1,3 +1,3 @@
|
|||
app: PYTHONPATH="../" gunicorn -c conf/gunicorn_local.py application:application
|
||||
app: PYTHONPATH="./" gunicorn -c conf/gunicorn_local.py config_application:application
|
||||
# webpack: npm run watch-config-app
|
||||
|
||||
|
|
39
config_app/_init_config.py
Normal file
39
config_app/_init_config.py
Normal file
|
@ -0,0 +1,39 @@
|
|||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/"))
|
||||
STATIC_DIR = os.path.join(ROOT_DIR, 'static/')
|
||||
STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/')
|
||||
STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/')
|
||||
TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/')
|
||||
|
||||
|
||||
# TODO(config): Remove this external folder dependency
|
||||
EXTERNAL_REPO_REQUIRE_PATH = os.path.dirname(ROOT_DIR)
|
||||
|
||||
|
||||
def _get_version_number_changelog():
|
||||
try:
|
||||
with open(os.path.join(ROOT_DIR, 'CHANGELOG.md')) as f:
|
||||
return re.search(r'(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0)
|
||||
except IOError:
|
||||
return ''
|
||||
|
||||
|
||||
def _get_git_sha():
|
||||
if os.path.exists("GIT_HEAD"):
|
||||
with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f:
|
||||
return f.read()
|
||||
else:
|
||||
try:
|
||||
return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8]
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
pass
|
||||
return "unknown"
|
||||
|
||||
|
||||
__version__ = _get_version_number_changelog()
|
||||
__gitrev__ = _get_git_sha()
|
|
@ -1,4 +0,0 @@
|
|||
from flask import Flask
|
||||
|
||||
app = Flask(__name__)
|
||||
|
|
@ -5,12 +5,12 @@ sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
|
|||
import logging
|
||||
|
||||
from Crypto import Random
|
||||
from util.log import logfile_path
|
||||
from util.workers import get_worker_count
|
||||
from config_util.log import logfile_path
|
||||
from config_util.workers import get_worker_count
|
||||
|
||||
|
||||
logconfig = logfile_path(debug=True)
|
||||
bind = '0.0.0.0:5000'
|
||||
bind = '127.0.0.1:5000'
|
||||
workers = get_worker_count('local', 2, minimum=2, maximum=8)
|
||||
worker_class = 'gevent'
|
||||
daemon = False
|
||||
|
|
36
config_app/conf/logging.conf
Normal file
36
config_app/conf/logging.conf
Normal file
|
@ -0,0 +1,36 @@
|
|||
[loggers]
|
||||
keys=root,gunicorn.error,gunicorn.access
|
||||
|
||||
[handlers]
|
||||
keys=console
|
||||
|
||||
[formatters]
|
||||
keys=generic,json
|
||||
|
||||
[logger_root]
|
||||
level=INFO
|
||||
handlers=console
|
||||
|
||||
[handler_console]
|
||||
class=StreamHandler
|
||||
formatter=generic
|
||||
args=(sys.stdout, )
|
||||
|
||||
[formatter_generic]
|
||||
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||
class=logging.Formatter
|
||||
|
||||
[formatter_json]
|
||||
class=loghandler_config.JsonFormatter
|
||||
|
||||
[logger_gunicorn.error]
|
||||
level=ERROR
|
||||
handlers=console
|
||||
propagate=0
|
||||
qualname=gunicorn.error
|
||||
|
||||
[logger_gunicorn.access]
|
||||
handlers=console
|
||||
propagate=0
|
||||
qualname=gunicorn.access
|
||||
level=DEBUG
|
41
config_app/conf/logging_debug.conf
Normal file
41
config_app/conf/logging_debug.conf
Normal file
|
@ -0,0 +1,41 @@
|
|||
[loggers]
|
||||
keys=root,boto,gunicorn.error,gunicorn.access
|
||||
|
||||
[handlers]
|
||||
keys=console
|
||||
|
||||
[formatters]
|
||||
keys=generic,json
|
||||
|
||||
[logger_root]
|
||||
level=DEBUG
|
||||
handlers=console
|
||||
|
||||
[logger_boto]
|
||||
level=INFO
|
||||
handlers=console
|
||||
qualname=boto
|
||||
|
||||
[logger_gunicorn.access]
|
||||
handlers=console
|
||||
propagate=0
|
||||
qualname=gunicorn.access
|
||||
level=DEBUG
|
||||
|
||||
[handler_console]
|
||||
class=StreamHandler
|
||||
formatter=generic
|
||||
args=(sys.stdout, )
|
||||
|
||||
[logger_gunicorn.error]
|
||||
level=ERROR
|
||||
handlers=console
|
||||
propagate=0
|
||||
qualname=gunicorn.error
|
||||
|
||||
[formatter_generic]
|
||||
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||
class=logging.Formatter
|
||||
|
||||
[formatter_json]
|
||||
class=loghandler_config.JsonFormatter
|
41
config_app/conf/logging_debug_json.conf
Normal file
41
config_app/conf/logging_debug_json.conf
Normal file
|
@ -0,0 +1,41 @@
|
|||
[loggers]
|
||||
keys=root,boto,gunicorn.error,gunicorn.access
|
||||
|
||||
[handlers]
|
||||
keys=console
|
||||
|
||||
[formatters]
|
||||
keys=generic,json
|
||||
|
||||
[logger_root]
|
||||
level=DEBUG
|
||||
handlers=console
|
||||
|
||||
[logger_boto]
|
||||
level=INFO
|
||||
handlers=console
|
||||
qualname=boto
|
||||
|
||||
[logger_gunicorn.access]
|
||||
handlers=console
|
||||
propagate=0
|
||||
qualname=gunicorn.access
|
||||
level=DEBUG
|
||||
|
||||
[handler_console]
|
||||
class=StreamHandler
|
||||
formatter=json
|
||||
args=(sys.stdout, )
|
||||
|
||||
[logger_gunicorn.error]
|
||||
level=ERROR
|
||||
handlers=console
|
||||
propagate=0
|
||||
qualname=gunicorn.error
|
||||
|
||||
[formatter_generic]
|
||||
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||
class=logging.Formatter
|
||||
|
||||
[formatter_json]
|
||||
class=loghandler_config.JsonFormatter
|
36
config_app/conf/logging_json.conf
Normal file
36
config_app/conf/logging_json.conf
Normal file
|
@ -0,0 +1,36 @@
|
|||
[loggers]
|
||||
keys=root,gunicorn.error,gunicorn.access
|
||||
|
||||
[handlers]
|
||||
keys=console
|
||||
|
||||
[formatters]
|
||||
keys=json,generic
|
||||
|
||||
[logger_root]
|
||||
level=INFO
|
||||
handlers=console
|
||||
|
||||
[handler_console]
|
||||
class=StreamHandler
|
||||
formatter=json
|
||||
args=(sys.stdout, )
|
||||
|
||||
[formatter_generic]
|
||||
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||
class=logging.Formatter
|
||||
|
||||
[formatter_json]
|
||||
class=loghandler_config.JsonFormatter
|
||||
|
||||
[logger_gunicorn.error]
|
||||
level=ERROR
|
||||
handlers=console
|
||||
propagate=0
|
||||
qualname=gunicorn.error
|
||||
|
||||
[logger_gunicorn.access]
|
||||
handlers=console
|
||||
propagate=0
|
||||
qualname=gunicorn.access
|
||||
level=DEBUG
|
31
config_app/config_app.py
Normal file
31
config_app/config_app.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
import os
|
||||
import logging
|
||||
from flask import Flask
|
||||
from _init_config import CONF_DIR
|
||||
from config_util.config import get_config_provider
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/')
|
||||
|
||||
|
||||
is_testing = 'TEST' in os.environ
|
||||
is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ
|
||||
|
||||
config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config_app_config.py',
|
||||
testing=is_testing, kubernetes=is_kubernetes)
|
||||
|
||||
if is_testing:
|
||||
from config_test.testconfig import TestConfig
|
||||
logger.debug('Loading test config.')
|
||||
app.config.from_object(TestConfig())
|
||||
else:
|
||||
from config_app_config import DefaultConfig
|
||||
logger.debug('Loading default config.')
|
||||
app.config.from_object(DefaultConfig())
|
||||
# app.teardown_request(database.close_db_filter)
|
||||
|
||||
# Load the override config via the provider.
|
||||
config_provider.update_app_config(app.config)
|
544
config_app/config_app_config.py
Normal file
544
config_app/config_app_config.py
Normal file
|
@ -0,0 +1,544 @@
|
|||
from uuid import uuid4
|
||||
|
||||
import os.path
|
||||
import requests
|
||||
|
||||
from _init_config import ROOT_DIR, CONF_DIR, EXTERNAL_REPO_REQUIRE_PATH
|
||||
|
||||
|
||||
def build_requests_session():
|
||||
sess = requests.Session()
|
||||
adapter = requests.adapters.HTTPAdapter(pool_connections=100,
|
||||
pool_maxsize=100)
|
||||
sess.mount('http://', adapter)
|
||||
sess.mount('https://', adapter)
|
||||
return sess
|
||||
|
||||
|
||||
# The set of configuration key names that will be accessible in the client. Since these
|
||||
# values are sent to the frontend, DO NOT PLACE ANY SECRETS OR KEYS in this list.
|
||||
CLIENT_WHITELIST = ['SERVER_HOSTNAME', 'PREFERRED_URL_SCHEME', 'MIXPANEL_KEY',
|
||||
'STRIPE_PUBLISHABLE_KEY', 'ENTERPRISE_LOGO_URL', 'SENTRY_PUBLIC_DSN',
|
||||
'AUTHENTICATION_TYPE', 'REGISTRY_TITLE', 'REGISTRY_TITLE_SHORT',
|
||||
'CONTACT_INFO', 'AVATAR_KIND', 'LOCAL_OAUTH_HANDLER', 'DOCUMENTATION_LOCATION',
|
||||
'DOCUMENTATION_METADATA', 'SETUP_COMPLETE', 'DEBUG', 'MARKETO_MUNCHKIN_ID',
|
||||
'STATIC_SITE_BUCKET', 'RECAPTCHA_SITE_KEY', 'CHANNEL_COLORS',
|
||||
'TAG_EXPIRATION_OPTIONS', 'INTERNAL_OIDC_SERVICE_ID',
|
||||
'SEARCH_RESULTS_PER_PAGE', 'SEARCH_MAX_RESULT_PAGE_COUNT']
|
||||
|
||||
|
||||
def frontend_visible_config(config_dict):
|
||||
visible_dict = {}
|
||||
for name in CLIENT_WHITELIST:
|
||||
if name.lower().find('secret') >= 0:
|
||||
raise Exception('Cannot whitelist secrets: %s' % name)
|
||||
|
||||
if name in config_dict:
|
||||
visible_dict[name] = config_dict.get(name, None)
|
||||
|
||||
return visible_dict
|
||||
|
||||
|
||||
# Configuration that should not be changed by end users
|
||||
class ImmutableConfig(object):
|
||||
|
||||
# Requests based HTTP client with a large request pool
|
||||
HTTPCLIENT = build_requests_session()
|
||||
|
||||
# Status tag config
|
||||
STATUS_TAGS = {}
|
||||
for tag_name in ['building', 'failed', 'none', 'ready', 'cancelled']:
|
||||
tag_path = os.path.join(EXTERNAL_REPO_REQUIRE_PATH, 'buildstatus', tag_name + '.svg')
|
||||
with open(tag_path) as tag_svg:
|
||||
STATUS_TAGS[tag_name] = tag_svg.read()
|
||||
|
||||
# Reverse DNS prefixes that are reserved for internal use on labels and should not be allowable
|
||||
# to be set via the API.
|
||||
DEFAULT_LABEL_KEY_RESERVED_PREFIXES = ['com.docker.', 'io.docker.', 'org.dockerproject.',
|
||||
'org.opencontainers.', 'io.cncf.',
|
||||
'io.kubernetes.', 'io.k8s.',
|
||||
'io.quay', 'com.coreos', 'com.tectonic',
|
||||
'internal', 'quay']
|
||||
|
||||
# Colors for local avatars.
|
||||
AVATAR_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728',
|
||||
'#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2',
|
||||
'#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79',
|
||||
'#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b',
|
||||
'#8c6d31', '#ad494a', '#e7ba52', '#a55194']
|
||||
|
||||
# Colors for channels.
|
||||
CHANNEL_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728',
|
||||
'#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2',
|
||||
'#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79',
|
||||
'#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b',
|
||||
'#8c6d31', '#ad494a', '#e7ba52', '#a55194']
|
||||
|
||||
PROPAGATE_EXCEPTIONS = True
|
||||
|
||||
|
||||
class DefaultConfig(ImmutableConfig):
|
||||
# Flask config
|
||||
JSONIFY_PRETTYPRINT_REGULAR = False
|
||||
SESSION_COOKIE_SECURE = False
|
||||
|
||||
LOGGING_LEVEL = 'DEBUG'
|
||||
SEND_FILE_MAX_AGE_DEFAULT = 0
|
||||
PREFERRED_URL_SCHEME = 'http'
|
||||
SERVER_HOSTNAME = 'localhost:5000'
|
||||
|
||||
REGISTRY_TITLE = 'Quay Enterprise'
|
||||
REGISTRY_TITLE_SHORT = 'Quay Enterprise'
|
||||
|
||||
CONTACT_INFO = [
|
||||
'mailto:support@quay.io',
|
||||
'irc://chat.freenode.net:6665/quay',
|
||||
'tel:+1-888-930-3475',
|
||||
'https://twitter.com/quayio',
|
||||
]
|
||||
|
||||
# Mail config
|
||||
MAIL_SERVER = ''
|
||||
MAIL_USE_TLS = True
|
||||
MAIL_PORT = 587
|
||||
MAIL_USERNAME = None
|
||||
MAIL_PASSWORD = None
|
||||
MAIL_DEFAULT_SENDER = 'support@quay.io'
|
||||
MAIL_FAIL_SILENTLY = False
|
||||
TESTING = True
|
||||
|
||||
# DB config
|
||||
DB_URI = 'sqlite:///test/data/test.db'
|
||||
DB_CONNECTION_ARGS = {
|
||||
'threadlocals': True,
|
||||
'autorollback': True,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def create_transaction(db):
|
||||
return db.transaction()
|
||||
|
||||
DB_TRANSACTION_FACTORY = create_transaction
|
||||
|
||||
# If set to true, TLS is used, but is terminated by an external service (such as a load balancer).
|
||||
# Note that PREFERRED_URL_SCHEME must be `https` when this flag is set or it can lead to undefined
|
||||
# behavior.
|
||||
EXTERNAL_TLS_TERMINATION = False
|
||||
|
||||
# If true, CDN URLs will be used for our external dependencies, rather than the local
|
||||
# copies.
|
||||
USE_CDN = False
|
||||
|
||||
# Authentication
|
||||
AUTHENTICATION_TYPE = 'Database'
|
||||
|
||||
# Build logs
|
||||
BUILDLOGS_REDIS = {'host': 'localhost'}
|
||||
BUILDLOGS_OPTIONS = []
|
||||
|
||||
# Real-time user events
|
||||
USER_EVENTS_REDIS = {'host': 'localhost'}
|
||||
|
||||
# Stripe config
|
||||
BILLING_TYPE = 'FakeStripe'
|
||||
|
||||
# Analytics
|
||||
ANALYTICS_TYPE = 'FakeAnalytics'
|
||||
|
||||
# Build Queue Metrics
|
||||
QUEUE_METRICS_TYPE = 'Null'
|
||||
QUEUE_WORKER_METRICS_REFRESH_SECONDS = 300
|
||||
|
||||
# Exception logging
|
||||
EXCEPTION_LOG_TYPE = 'FakeSentry'
|
||||
SENTRY_DSN = None
|
||||
SENTRY_PUBLIC_DSN = None
|
||||
|
||||
# Github Config
|
||||
GITHUB_LOGIN_CONFIG = None
|
||||
GITHUB_TRIGGER_CONFIG = None
|
||||
|
||||
# Google Config.
|
||||
GOOGLE_LOGIN_CONFIG = None
|
||||
|
||||
# Bitbucket Config.
|
||||
BITBUCKET_TRIGGER_CONFIG = None
|
||||
|
||||
# Gitlab Config.
|
||||
GITLAB_TRIGGER_CONFIG = None
|
||||
|
||||
NOTIFICATION_QUEUE_NAME = 'notification'
|
||||
DOCKERFILE_BUILD_QUEUE_NAME = 'dockerfilebuild'
|
||||
REPLICATION_QUEUE_NAME = 'imagestoragereplication'
|
||||
SECSCAN_NOTIFICATION_QUEUE_NAME = 'security_notification'
|
||||
CHUNK_CLEANUP_QUEUE_NAME = 'chunk_cleanup'
|
||||
NAMESPACE_GC_QUEUE_NAME = 'namespacegc'
|
||||
|
||||
# Super user config. Note: This MUST BE an empty list for the default config.
|
||||
SUPER_USERS = []
|
||||
|
||||
# Feature Flag: Whether sessions are permanent.
|
||||
FEATURE_PERMANENT_SESSIONS = True
|
||||
|
||||
# Feature Flag: Whether super users are supported.
|
||||
FEATURE_SUPER_USERS = True
|
||||
|
||||
# Feature Flag: Whether to allow anonymous users to browse and pull public repositories.
|
||||
FEATURE_ANONYMOUS_ACCESS = True
|
||||
|
||||
# Feature Flag: Whether billing is required.
|
||||
FEATURE_BILLING = False
|
||||
|
||||
# Feature Flag: Whether user accounts automatically have usage log access.
|
||||
FEATURE_USER_LOG_ACCESS = False
|
||||
|
||||
# Feature Flag: Whether GitHub login is supported.
|
||||
FEATURE_GITHUB_LOGIN = False
|
||||
|
||||
# Feature Flag: Whether Google login is supported.
|
||||
FEATURE_GOOGLE_LOGIN = False
|
||||
|
||||
# Feature Flag: whether to enable support chat
|
||||
FEATURE_SUPPORT_CHAT = False
|
||||
|
||||
# Feature Flag: Whether to support GitHub build triggers.
|
||||
FEATURE_GITHUB_BUILD = False
|
||||
|
||||
# Feature Flag: Whether to support Bitbucket build triggers.
|
||||
FEATURE_BITBUCKET_BUILD = False
|
||||
|
||||
# Feature Flag: Whether to support GitLab build triggers.
|
||||
FEATURE_GITLAB_BUILD = False
|
||||
|
||||
# Feature Flag: Dockerfile build support.
|
||||
FEATURE_BUILD_SUPPORT = True
|
||||
|
||||
# Feature Flag: Whether emails are enabled.
|
||||
FEATURE_MAILING = True
|
||||
|
||||
# Feature Flag: Whether users can be created (by non-super users).
|
||||
FEATURE_USER_CREATION = True
|
||||
|
||||
# Feature Flag: Whether users being created must be invited by another user. If FEATURE_USER_CREATION is off,
|
||||
# this flag has no effect.
|
||||
FEATURE_INVITE_ONLY_USER_CREATION = False
|
||||
|
||||
# Feature Flag: Whether users can be renamed
|
||||
FEATURE_USER_RENAME = False
|
||||
|
||||
# Feature Flag: Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for
|
||||
# basic auth.
|
||||
FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH = False
|
||||
|
||||
# Feature Flag: Whether to automatically replicate between storage engines.
|
||||
FEATURE_STORAGE_REPLICATION = False
|
||||
|
||||
# Feature Flag: Whether users can directly login to the UI.
|
||||
FEATURE_DIRECT_LOGIN = True
|
||||
|
||||
# Feature Flag: Whether the v2/ endpoint is visible
|
||||
FEATURE_ADVERTISE_V2 = True
|
||||
|
||||
# Semver spec for which Docker versions we will blacklist
|
||||
# Documentation: http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec
|
||||
BLACKLIST_V2_SPEC = '<1.6.0'
|
||||
|
||||
# Feature Flag: Whether or not to rotate old action logs to storage.
|
||||
FEATURE_ACTION_LOG_ROTATION = False
|
||||
|
||||
# Feature Flag: Whether to enable conversion to ACIs.
|
||||
FEATURE_ACI_CONVERSION = False
|
||||
|
||||
# Feature Flag: Whether to allow for "namespace-less" repositories when pulling and pushing from
|
||||
# Docker.
|
||||
FEATURE_LIBRARY_SUPPORT = True
|
||||
|
||||
# Feature Flag: Whether to require invitations when adding a user to a team.
|
||||
FEATURE_REQUIRE_TEAM_INVITE = True
|
||||
|
||||
# Feature Flag: Whether to proxy all direct download URLs in storage via the registry's nginx.
|
||||
FEATURE_PROXY_STORAGE = False
|
||||
|
||||
# Feature Flag: Whether to collect and support user metadata.
|
||||
FEATURE_USER_METADATA = False
|
||||
|
||||
# Feature Flag: Whether to support signing
|
||||
FEATURE_SIGNING = False
|
||||
|
||||
# Feature Flag: Whether to enable support for App repositories.
|
||||
FEATURE_APP_REGISTRY = False
|
||||
|
||||
# Feature Flag: If set to true, the _catalog endpoint returns public repositories. Otherwise,
|
||||
# only private repositories can be returned.
|
||||
FEATURE_PUBLIC_CATALOG = False
|
||||
|
||||
# Feature Flag: If set to true, build logs may be read by those with read access to the repo,
|
||||
# rather than only write access or admin access.
|
||||
FEATURE_READER_BUILD_LOGS = False
|
||||
|
||||
# Feature Flag: If set to true, autocompletion will apply to partial usernames.
|
||||
FEATURE_PARTIAL_USER_AUTOCOMPLETE = True
|
||||
|
||||
# If a namespace is defined in the public namespace list, then it will appear on *all*
|
||||
# user's repository list pages, regardless of whether that user is a member of the namespace.
|
||||
# Typically, this is used by an enterprise customer in configuring a set of "well-known"
|
||||
# namespaces.
|
||||
PUBLIC_NAMESPACES = []
|
||||
|
||||
# The namespace to use for library repositories.
|
||||
# Note: This must remain 'library' until Docker removes their hard-coded namespace for libraries.
|
||||
# See: https://github.com/docker/docker/blob/master/registry/session.go#L320
|
||||
LIBRARY_NAMESPACE = 'library'
|
||||
|
||||
BUILD_MANAGER = ('enterprise', {})
|
||||
|
||||
DISTRIBUTED_STORAGE_CONFIG = {
|
||||
'local_eu': ['LocalStorage', {'storage_path': 'test/data/registry/eu'}],
|
||||
'local_us': ['LocalStorage', {'storage_path': 'test/data/registry/us'}],
|
||||
}
|
||||
|
||||
DISTRIBUTED_STORAGE_PREFERENCE = ['local_us']
|
||||
DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS = ['local_us']
|
||||
|
||||
# Health checker.
|
||||
HEALTH_CHECKER = ('LocalHealthCheck', {})
|
||||
|
||||
# Userfiles
|
||||
USERFILES_LOCATION = 'local_us'
|
||||
USERFILES_PATH = 'userfiles/'
|
||||
|
||||
# Build logs archive
|
||||
LOG_ARCHIVE_LOCATION = 'local_us'
|
||||
LOG_ARCHIVE_PATH = 'logarchive/'
|
||||
|
||||
# Action logs archive
|
||||
ACTION_LOG_ARCHIVE_LOCATION = 'local_us'
|
||||
ACTION_LOG_ARCHIVE_PATH = 'actionlogarchive/'
|
||||
|
||||
# System logs.
|
||||
SYSTEM_LOGS_PATH = "/var/log/"
|
||||
SYSTEM_LOGS_FILE = "/var/log/syslog"
|
||||
SYSTEM_SERVICES_PATH = os.path.join(CONF_DIR, "init/service/")
|
||||
|
||||
# Allow registry pulls when unable to write to the audit log
|
||||
ALLOW_PULLS_WITHOUT_STRICT_LOGGING = False
|
||||
|
||||
# Services that should not be shown in the logs view.
|
||||
SYSTEM_SERVICE_BLACKLIST = []
|
||||
|
||||
# Temporary tag expiration in seconds, this may actually be longer based on GC policy
|
||||
PUSH_TEMP_TAG_EXPIRATION_SEC = 60 * 60 # One hour per layer
|
||||
|
||||
# Signed registry grant token expiration in seconds
|
||||
SIGNED_GRANT_EXPIRATION_SEC = 60 * 60 * 24 # One day to complete a push/pull
|
||||
|
||||
# Registry v2 JWT Auth config
|
||||
REGISTRY_JWT_AUTH_MAX_FRESH_S = 60 * 60 + 60 # At most signed one hour, accounting for clock skew
|
||||
|
||||
# The URL endpoint to which we redirect OAuth when generating a token locally.
|
||||
LOCAL_OAUTH_HANDLER = '/oauth/localapp'
|
||||
|
||||
# The various avatar background colors.
|
||||
AVATAR_KIND = 'local'
|
||||
|
||||
# The location of the Quay documentation.
|
||||
DOCUMENTATION_LOCATION = 'http://docs.quay.io'
|
||||
DOCUMENTATION_METADATA = 'https://coreos.github.io/quay-docs/search.json'
|
||||
|
||||
# How often the Garbage Collection worker runs.
|
||||
GARBAGE_COLLECTION_FREQUENCY = 30 # seconds
|
||||
|
||||
# How long notifications will try to send before timing out.
|
||||
NOTIFICATION_SEND_TIMEOUT = 10
|
||||
|
||||
# Security scanner
|
||||
FEATURE_SECURITY_SCANNER = False
|
||||
FEATURE_SECURITY_NOTIFICATIONS = False
|
||||
|
||||
# The endpoint for the security scanner.
|
||||
SECURITY_SCANNER_ENDPOINT = 'http://192.168.99.101:6060'
|
||||
|
||||
# The number of seconds between indexing intervals in the security scanner
|
||||
SECURITY_SCANNER_INDEXING_INTERVAL = 30
|
||||
|
||||
# If specified, the security scanner will only index images newer than the provided ID.
|
||||
SECURITY_SCANNER_INDEXING_MIN_ID = None
|
||||
|
||||
# If specified, the endpoint to be used for all POST calls to the security scanner.
|
||||
SECURITY_SCANNER_ENDPOINT_BATCH = None
|
||||
|
||||
# If specified, GET requests that return non-200 will be retried at the following instances.
|
||||
SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS = []
|
||||
|
||||
# The indexing engine version running inside the security scanner.
|
||||
SECURITY_SCANNER_ENGINE_VERSION_TARGET = 3
|
||||
|
||||
# The version of the API to use for the security scanner.
|
||||
SECURITY_SCANNER_API_VERSION = 'v1'
|
||||
|
||||
# API call timeout for the security scanner.
|
||||
SECURITY_SCANNER_API_TIMEOUT_SECONDS = 10
|
||||
|
||||
# POST call timeout for the security scanner.
|
||||
SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS = 480
|
||||
|
||||
# The issuer name for the security scanner.
|
||||
SECURITY_SCANNER_ISSUER_NAME = 'security_scanner'
|
||||
|
||||
# JWTProxy Settings
|
||||
# The address (sans schema) to proxy outgoing requests through the jwtproxy
|
||||
# to be signed
|
||||
JWTPROXY_SIGNER = 'localhost:8080'
|
||||
|
||||
# The audience that jwtproxy should verify on incoming requests
|
||||
# If None, will be calculated off of the SERVER_HOSTNAME (default)
|
||||
JWTPROXY_AUDIENCE = None
|
||||
|
||||
# Torrent management flags
|
||||
FEATURE_BITTORRENT = False
|
||||
BITTORRENT_PIECE_SIZE = 512 * 1024
|
||||
BITTORRENT_ANNOUNCE_URL = 'https://localhost:6881/announce'
|
||||
BITTORRENT_FILENAME_PEPPER = str(uuid4())
|
||||
BITTORRENT_WEBSEED_LIFETIME = 3600
|
||||
|
||||
# "Secret" key for generating encrypted paging tokens. Only needed to be secret to
|
||||
# hide the ID range for production (in which this value is overridden). Should *not*
|
||||
# be relied upon for secure encryption otherwise.
|
||||
# This value is a Fernet key and should be 32bytes URL-safe base64 encoded.
|
||||
PAGE_TOKEN_KEY = '0OYrc16oBuksR8T3JGB-xxYSlZ2-7I_zzqrLzggBJ58='
|
||||
|
||||
# The timeout for service key approval.
|
||||
UNAPPROVED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 # One day
|
||||
|
||||
# How long to wait before GCing an expired service key.
|
||||
EXPIRED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 * 7 # One week
|
||||
|
||||
# The ID of the user account in the database to be used for service audit logs. If none, the
|
||||
# lowest user in the database will be used.
|
||||
SERVICE_LOG_ACCOUNT_ID = None
|
||||
|
||||
# The service key ID for the instance service.
|
||||
# NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated.
|
||||
INSTANCE_SERVICE_KEY_SERVICE = 'quay'
|
||||
|
||||
# The location of the key ID file generated for this instance.
|
||||
INSTANCE_SERVICE_KEY_KID_LOCATION = os.path.join(CONF_DIR, 'quay.kid')
|
||||
|
||||
# The location of the private key generated for this instance.
|
||||
# NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated.
|
||||
INSTANCE_SERVICE_KEY_LOCATION = os.path.join(CONF_DIR, 'quay.pem')
|
||||
|
||||
# This instance's service key expiration in minutes.
|
||||
INSTANCE_SERVICE_KEY_EXPIRATION = 120
|
||||
|
||||
# Number of minutes between expiration refresh in minutes. Should be the expiration / 2 minus
|
||||
# some additional window time.
|
||||
INSTANCE_SERVICE_KEY_REFRESH = 55
|
||||
|
||||
# The whitelist of client IDs for OAuth applications that allow for direct login.
|
||||
DIRECT_OAUTH_CLIENTID_WHITELIST = []
|
||||
|
||||
# URL that specifies the location of the prometheus stats aggregator.
|
||||
PROMETHEUS_AGGREGATOR_URL = 'http://localhost:9092'
|
||||
|
||||
# Namespace prefix for all prometheus metrics.
|
||||
PROMETHEUS_NAMESPACE = 'quay'
|
||||
|
||||
# Overridable list of reverse DNS prefixes that are reserved for internal use on labels.
|
||||
LABEL_KEY_RESERVED_PREFIXES = []
|
||||
|
||||
# Delays workers from starting until a random point in time between 0 and their regular interval.
|
||||
STAGGER_WORKERS = True
|
||||
|
||||
# Location of the static marketing site.
|
||||
STATIC_SITE_BUCKET = None
|
||||
|
||||
# Site key and secret key for using recaptcha.
|
||||
FEATURE_RECAPTCHA = False
|
||||
RECAPTCHA_SITE_KEY = None
|
||||
RECAPTCHA_SECRET_KEY = None
|
||||
|
||||
# Server where TUF metadata can be found
|
||||
TUF_SERVER = None
|
||||
|
||||
# Prefix to add to metadata e.g. <prefix>/<namespace>/<reponame>
|
||||
TUF_GUN_PREFIX = None
|
||||
|
||||
# Maximum size allowed for layers in the registry.
|
||||
MAXIMUM_LAYER_SIZE = '20G'
|
||||
|
||||
# Feature Flag: Whether team syncing from the backing auth is enabled.
|
||||
FEATURE_TEAM_SYNCING = False
|
||||
TEAM_RESYNC_STALE_TIME = '30m'
|
||||
TEAM_SYNC_WORKER_FREQUENCY = 60 # seconds
|
||||
|
||||
# Feature Flag: If enabled, non-superusers can setup team syncing.
|
||||
FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP = False
|
||||
|
||||
# The default configurable tag expiration time for time machine.
|
||||
DEFAULT_TAG_EXPIRATION = '2w'
|
||||
|
||||
# The options to present in namespace settings for the tag expiration. If empty, no option
|
||||
# will be given and the default will be displayed read-only.
|
||||
TAG_EXPIRATION_OPTIONS = ['0s', '1d', '1w', '2w', '4w']
|
||||
|
||||
# Feature Flag: Whether users can view and change their tag expiration.
|
||||
FEATURE_CHANGE_TAG_EXPIRATION = True
|
||||
|
||||
# Defines a secret for enabling the health-check endpoint's debug information.
|
||||
ENABLE_HEALTH_DEBUG_SECRET = None
|
||||
|
||||
# The lifetime for a user recovery token before it becomes invalid.
|
||||
USER_RECOVERY_TOKEN_LIFETIME = '30m'
|
||||
|
||||
# If specified, when app specific passwords expire by default.
|
||||
APP_SPECIFIC_TOKEN_EXPIRATION = None
|
||||
|
||||
# Feature Flag: If enabled, users can create and use app specific tokens to login via the CLI.
|
||||
FEATURE_APP_SPECIFIC_TOKENS = True
|
||||
|
||||
# How long expired app specific tokens should remain visible to users before being automatically
|
||||
# deleted. Set to None to turn off garbage collection.
|
||||
EXPIRED_APP_SPECIFIC_TOKEN_GC = '1d'
|
||||
|
||||
# The size of pages returned by the Docker V2 API.
|
||||
V2_PAGINATION_SIZE = 50
|
||||
|
||||
# If enabled, ensures that API calls are made with the X-Requested-With header
|
||||
# when called from a browser.
|
||||
BROWSER_API_CALLS_XHR_ONLY = True
|
||||
|
||||
# If set to a non-None integer value, the default number of maximum builds for a namespace.
|
||||
DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT = None
|
||||
|
||||
# If set to a non-None integer value, the default number of maximum builds for a namespace whose
|
||||
# creator IP is deemed a threat.
|
||||
THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT = None
|
||||
|
||||
# For Billing Support Only: The number of allowed builds on a namespace that has been billed
|
||||
# successfully.
|
||||
BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT = None
|
||||
|
||||
# Configuration for the data model cache.
|
||||
DATA_MODEL_CACHE_CONFIG = {
|
||||
'engine': 'memcached',
|
||||
'endpoint': ('127.0.0.1', 18080),
|
||||
}
|
||||
|
||||
# Defines the number of successive failures of a build trigger's build before the trigger is
|
||||
# automatically disabled.
|
||||
SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD = 100
|
||||
|
||||
# Defines the number of successive internal errors of a build trigger's build before the
|
||||
# trigger is automatically disabled.
|
||||
SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD = 5
|
||||
|
||||
# Defines the delay required (in seconds) before the last_accessed field of a user/robot or access
|
||||
# token will be updated after the previous update.
|
||||
LAST_ACCESSED_UPDATE_THRESHOLD_S = 60
|
||||
|
||||
# Defines the number of results per page used to show search results
|
||||
SEARCH_RESULTS_PER_PAGE = 10
|
||||
|
||||
# Defines the maximum number of pages the user can paginate before they are limited
|
||||
SEARCH_MAX_RESULT_PAGE_COUNT = 10
|
|
@ -1,7 +1,7 @@
|
|||
from app import app as application
|
||||
from config_app import app as application
|
||||
|
||||
# Bind all of the blueprints
|
||||
import web
|
||||
import config_web
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
|
@ -1,14 +1,12 @@
|
|||
import logging
|
||||
|
||||
from config_app import app
|
||||
from config_app.util.config import config_provider
|
||||
|
||||
from flask import Blueprint, request, session
|
||||
from flask_restful import Resource, abort, Api, reqparse
|
||||
from flask import Blueprint
|
||||
from flask_restful import Resource, Api
|
||||
from flask_restful.utils.cors import crossdomain
|
||||
|
||||
from config_app import app
|
||||
from functools import partial, wraps
|
||||
|
||||
from jsonschema import validate, ValidationError
|
||||
from config_endpoints.exception import InvalidResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
api_bp = Blueprint('api', __name__)
|
||||
|
@ -17,6 +15,8 @@ CROSS_DOMAIN_HEADERS = ['Authorization', 'Content-Type', 'X-Requested-With']
|
|||
|
||||
|
||||
class ApiExceptionHandlingApi(Api):
|
||||
pass
|
||||
|
||||
@crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS)
|
||||
def handle_error(self, error):
|
||||
print('HANDLING ERROR IN API')
|
||||
|
@ -25,19 +25,12 @@ class ApiExceptionHandlingApi(Api):
|
|||
|
||||
api = ApiExceptionHandlingApi()
|
||||
|
||||
|
||||
class HelloWorld(Resource):
|
||||
def get(self):
|
||||
print("hit the dummy endpoint")
|
||||
return {'hello': 'world'}
|
||||
|
||||
|
||||
api.add_resource(HelloWorld, '/')
|
||||
|
||||
api.init_app(api_bp)
|
||||
|
||||
|
||||
def verify_not_prod(func):
|
||||
@add_method_metadata('enterprise_only', True)
|
||||
@wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
# Verify that we are not running on a production (i.e. hosted) stack. If so, we fail.
|
||||
# This should never happen (because of the feature-flag on SUPER_USERS), but we want to be
|
||||
|
@ -58,6 +51,7 @@ def resource(*urls, **kwargs):
|
|||
if not api_resource:
|
||||
return None
|
||||
|
||||
print('registering resource: ', urls)
|
||||
api_resource.registered = True
|
||||
api.add_resource(api_resource, *urls, **kwargs)
|
||||
return api_resource
|
||||
|
@ -105,47 +99,31 @@ def no_cache(f):
|
|||
return add_no_cache
|
||||
|
||||
|
||||
def define_json_response(schema_name):
|
||||
def wrapper(func):
|
||||
@add_method_metadata('response_schema', schema_name)
|
||||
@wraps(func)
|
||||
def wrapped(self, *args, **kwargs):
|
||||
schema = self.schemas[schema_name]
|
||||
resp = func(self, *args, **kwargs)
|
||||
|
||||
if app.config['TESTING']:
|
||||
try:
|
||||
validate(resp, schema)
|
||||
except ValidationError as ex:
|
||||
raise InvalidResponse(ex.message)
|
||||
|
||||
return resp
|
||||
return wrapped
|
||||
return wrapper
|
||||
|
||||
|
||||
nickname = partial(add_method_metadata, 'nickname')
|
||||
|
||||
api.init_app(api_bp)
|
||||
# api.decorators = [csrf_protect(),
|
||||
# crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS),
|
||||
# process_oauth, time_decorator(api_bp.name, metric_queue),
|
||||
# require_xhr_from_browser]
|
||||
|
||||
import config_endpoints.api
|
||||
import config_endpoints.api.discovery
|
||||
import config_endpoints.api.suconfig
|
||||
import config_endpoints.api.superuser
|
||||
import config_endpoints.api.user
|
||||
|
||||
|
||||
|
||||
@resource('/v1/superuser/config')
|
||||
class SuperUserConfig(ApiResource):
|
||||
""" Resource for fetching and updating the current configuration, if any. """
|
||||
schemas = {
|
||||
'UpdateConfig': {
|
||||
'type': 'object',
|
||||
'description': 'Updates the YAML config file',
|
||||
'required': [
|
||||
'config',
|
||||
'hostname'
|
||||
],
|
||||
'properties': {
|
||||
'config': {
|
||||
'type': 'object'
|
||||
},
|
||||
'hostname': {
|
||||
'type': 'string'
|
||||
},
|
||||
'password': {
|
||||
'type': 'string'
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@verify_not_prod
|
||||
@nickname('scGetConfig')
|
||||
def get(self):
|
||||
""" Returns the currently defined configuration, if any. """
|
||||
config_object = config_provider.get_config()
|
||||
return {
|
||||
'config': config_object
|
||||
}
|
||||
|
|
252
config_app/config_endpoints/api/discovery.py
Normal file
252
config_app/config_endpoints/api/discovery.py
Normal file
|
@ -0,0 +1,252 @@
|
|||
import logging
|
||||
import sys
|
||||
from collections import OrderedDict
|
||||
|
||||
from config_app import app
|
||||
from config_endpoints.api import method_metadata
|
||||
from config_endpoints.common import fully_qualified_name, PARAM_REGEX, TYPE_CONVERTER
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def generate_route_data():
|
||||
include_internal = True
|
||||
compact = True
|
||||
|
||||
def swagger_parameter(name, description, kind='path', param_type='string', required=True,
|
||||
enum=None, schema=None):
|
||||
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject
|
||||
parameter_info = {
|
||||
'name': name,
|
||||
'in': kind,
|
||||
'required': required
|
||||
}
|
||||
|
||||
if schema:
|
||||
parameter_info['schema'] = {
|
||||
'$ref': '#/definitions/%s' % schema
|
||||
}
|
||||
else:
|
||||
parameter_info['type'] = param_type
|
||||
|
||||
if enum is not None and len(list(enum)) > 0:
|
||||
parameter_info['enum'] = list(enum)
|
||||
|
||||
return parameter_info
|
||||
|
||||
paths = {}
|
||||
models = {}
|
||||
tags = []
|
||||
tags_added = set()
|
||||
operation_ids = set()
|
||||
|
||||
for rule in app.url_map.iter_rules():
|
||||
endpoint_method = app.view_functions[rule.endpoint]
|
||||
|
||||
# Verify that we have a view class for this API method.
|
||||
if not 'view_class' in dir(endpoint_method):
|
||||
continue
|
||||
|
||||
view_class = endpoint_method.view_class
|
||||
|
||||
# Hide the class if it is internal.
|
||||
internal = method_metadata(view_class, 'internal')
|
||||
if not include_internal and internal:
|
||||
continue
|
||||
|
||||
# Build the tag.
|
||||
parts = fully_qualified_name(view_class).split('.')
|
||||
tag_name = parts[-2]
|
||||
if not tag_name in tags_added:
|
||||
tags_added.add(tag_name)
|
||||
tags.append({
|
||||
'name': tag_name,
|
||||
'description': (sys.modules[view_class.__module__].__doc__ or '').strip()
|
||||
})
|
||||
|
||||
# Build the Swagger data for the path.
|
||||
swagger_path = PARAM_REGEX.sub(r'{\2}', rule.rule)
|
||||
full_name = fully_qualified_name(view_class)
|
||||
path_swagger = {
|
||||
'x-name': full_name,
|
||||
'x-path': swagger_path,
|
||||
'x-tag': tag_name
|
||||
}
|
||||
|
||||
related_user_res = method_metadata(view_class, 'related_user_resource')
|
||||
if related_user_res is not None:
|
||||
path_swagger['x-user-related'] = fully_qualified_name(related_user_res)
|
||||
|
||||
paths[swagger_path] = path_swagger
|
||||
|
||||
# Add any global path parameters.
|
||||
param_data_map = view_class.__api_path_params if '__api_path_params' in dir(view_class) else {}
|
||||
if param_data_map:
|
||||
path_parameters_swagger = []
|
||||
for path_parameter in param_data_map:
|
||||
description = param_data_map[path_parameter].get('description')
|
||||
path_parameters_swagger.append(swagger_parameter(path_parameter, description))
|
||||
|
||||
path_swagger['parameters'] = path_parameters_swagger
|
||||
|
||||
# Add the individual HTTP operations.
|
||||
method_names = list(rule.methods.difference(['HEAD', 'OPTIONS']))
|
||||
for method_name in method_names:
|
||||
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object
|
||||
method = getattr(view_class, method_name.lower(), None)
|
||||
if method is None:
|
||||
logger.debug('Unable to find method for %s in class %s', method_name, view_class)
|
||||
continue
|
||||
|
||||
operationId = method_metadata(method, 'nickname')
|
||||
operation_swagger = {
|
||||
'operationId': operationId,
|
||||
'parameters': [],
|
||||
}
|
||||
|
||||
if operationId is None:
|
||||
continue
|
||||
|
||||
if operationId in operation_ids:
|
||||
raise Exception('Duplicate operation Id: %s' % operationId)
|
||||
|
||||
operation_ids.add(operationId)
|
||||
|
||||
# Mark the method as internal.
|
||||
internal = method_metadata(method, 'internal')
|
||||
if internal is not None:
|
||||
operation_swagger['x-internal'] = True
|
||||
|
||||
if include_internal:
|
||||
requires_fresh_login = method_metadata(method, 'requires_fresh_login')
|
||||
if requires_fresh_login is not None:
|
||||
operation_swagger['x-requires-fresh-login'] = True
|
||||
|
||||
# Add the path parameters.
|
||||
if rule.arguments:
|
||||
for path_parameter in rule.arguments:
|
||||
description = param_data_map.get(path_parameter, {}).get('description')
|
||||
operation_swagger['parameters'].append(swagger_parameter(path_parameter, description))
|
||||
|
||||
# Add the query parameters.
|
||||
if '__api_query_params' in dir(method):
|
||||
for query_parameter_info in method.__api_query_params:
|
||||
name = query_parameter_info['name']
|
||||
description = query_parameter_info['help']
|
||||
param_type = TYPE_CONVERTER[query_parameter_info['type']]
|
||||
required = query_parameter_info['required']
|
||||
|
||||
operation_swagger['parameters'].append(
|
||||
swagger_parameter(name, description, kind='query',
|
||||
param_type=param_type,
|
||||
required=required,
|
||||
enum=query_parameter_info['choices']))
|
||||
|
||||
# Add the OAuth security block.
|
||||
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject
|
||||
scope = method_metadata(method, 'oauth2_scope')
|
||||
if scope and not compact:
|
||||
operation_swagger['security'] = [{'oauth2_implicit': [scope.scope]}]
|
||||
|
||||
# Add the responses block.
|
||||
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject
|
||||
response_schema_name = method_metadata(method, 'response_schema')
|
||||
if not compact:
|
||||
if response_schema_name:
|
||||
models[response_schema_name] = view_class.schemas[response_schema_name]
|
||||
|
||||
models['ApiError'] = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'status': {
|
||||
'type': 'integer',
|
||||
'description': 'Status code of the response.'
|
||||
},
|
||||
'type': {
|
||||
'type': 'string',
|
||||
'description': 'Reference to the type of the error.'
|
||||
},
|
||||
'detail': {
|
||||
'type': 'string',
|
||||
'description': 'Details about the specific instance of the error.'
|
||||
},
|
||||
'title': {
|
||||
'type': 'string',
|
||||
'description': 'Unique error code to identify the type of error.'
|
||||
},
|
||||
'error_message': {
|
||||
'type': 'string',
|
||||
'description': 'Deprecated; alias for detail'
|
||||
},
|
||||
'error_type': {
|
||||
'type': 'string',
|
||||
'description': 'Deprecated; alias for detail'
|
||||
}
|
||||
},
|
||||
'required': [
|
||||
'status',
|
||||
'type',
|
||||
'title',
|
||||
]
|
||||
}
|
||||
|
||||
responses = {
|
||||
'400': {
|
||||
'description': 'Bad Request',
|
||||
},
|
||||
|
||||
'401': {
|
||||
'description': 'Session required',
|
||||
},
|
||||
|
||||
'403': {
|
||||
'description': 'Unauthorized access',
|
||||
},
|
||||
|
||||
'404': {
|
||||
'description': 'Not found',
|
||||
},
|
||||
}
|
||||
|
||||
for _, body in responses.items():
|
||||
body['schema'] = {'$ref': '#/definitions/ApiError'}
|
||||
|
||||
if method_name == 'DELETE':
|
||||
responses['204'] = {
|
||||
'description': 'Deleted'
|
||||
}
|
||||
elif method_name == 'POST':
|
||||
responses['201'] = {
|
||||
'description': 'Successful creation'
|
||||
}
|
||||
else:
|
||||
responses['200'] = {
|
||||
'description': 'Successful invocation'
|
||||
}
|
||||
|
||||
if response_schema_name:
|
||||
responses['200']['schema'] = {
|
||||
'$ref': '#/definitions/%s' % response_schema_name
|
||||
}
|
||||
|
||||
operation_swagger['responses'] = responses
|
||||
|
||||
# Add the request block.
|
||||
request_schema_name = method_metadata(method, 'request_schema')
|
||||
if request_schema_name and not compact:
|
||||
models[request_schema_name] = view_class.schemas[request_schema_name]
|
||||
|
||||
operation_swagger['parameters'].append(
|
||||
swagger_parameter('body', 'Request body contents.', kind='body',
|
||||
schema=request_schema_name))
|
||||
|
||||
# Add the operation to the parent path.
|
||||
if not internal or (internal and include_internal):
|
||||
path_swagger[method_name.lower()] = operation_swagger
|
||||
|
||||
tags.sort(key=lambda t: t['name'])
|
||||
paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]['x-tag']))
|
||||
|
||||
if compact:
|
||||
return {'paths': paths}
|
87
config_app/config_endpoints/api/suconfig.py
Normal file
87
config_app/config_endpoints/api/suconfig.py
Normal file
|
@ -0,0 +1,87 @@
|
|||
import logging
|
||||
|
||||
from config_endpoints.api import resource, ApiResource, verify_not_prod, nickname
|
||||
from config_app import app, config_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@resource('/v1/superuser/config')
|
||||
class SuperUserConfig(ApiResource):
|
||||
""" Resource for fetching and updating the current configuration, if any. """
|
||||
schemas = {
|
||||
'UpdateConfig': {
|
||||
'type': 'object',
|
||||
'description': 'Updates the YAML config file',
|
||||
'required': [
|
||||
'config',
|
||||
'hostname'
|
||||
],
|
||||
'properties': {
|
||||
'config': {
|
||||
'type': 'object'
|
||||
},
|
||||
'hostname': {
|
||||
'type': 'string'
|
||||
},
|
||||
'password': {
|
||||
'type': 'string'
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@verify_not_prod
|
||||
@nickname('scGetConfig')
|
||||
def get(self):
|
||||
""" Returns the currently defined configuration, if any. """
|
||||
config_object = config_provider.get_config()
|
||||
logger.debug(config_object)
|
||||
logger.debug(config_provider)
|
||||
# Todo: do we even need this endpoint? Since we'll be loading the config in browser
|
||||
return {
|
||||
'config': config_object
|
||||
}
|
||||
|
||||
|
||||
@resource('/v1/superuser/registrystatus')
|
||||
class SuperUserRegistryStatus(ApiResource):
|
||||
""" Resource for determining the status of the registry, such as if config exists,
|
||||
if a database is configured, and if it has any defined users.
|
||||
"""
|
||||
@nickname('scRegistryStatus')
|
||||
@verify_not_prod
|
||||
def get(self):
|
||||
""" Returns the status of the registry. """
|
||||
|
||||
# If we have SETUP_COMPLETE, then we're ready to go!
|
||||
if app.config.get('SETUP_COMPLETE', False):
|
||||
return {
|
||||
'provider_id': config_provider.provider_id,
|
||||
'requires_restart': config_provider.requires_restart(app.config),
|
||||
'status': 'ready'
|
||||
}
|
||||
|
||||
# If there is no conf/stack volume, then report that status.
|
||||
if not config_provider.volume_exists():
|
||||
return {
|
||||
'status': 'missing-config-dir'
|
||||
}
|
||||
|
||||
# If there is no config file, we need to setup the database.
|
||||
if not config_provider.config_exists():
|
||||
return {
|
||||
'status': 'config-db'
|
||||
}
|
||||
|
||||
# If the database isn't yet valid, then we need to set it up.
|
||||
# if not database_is_valid():
|
||||
# return {
|
||||
# 'status': 'setup-db'
|
||||
# }
|
||||
#
|
||||
# return {
|
||||
# 'status': 'create-superuser' if not database_has_users() else 'config'
|
||||
# }
|
||||
|
||||
return {}
|
151
config_app/config_endpoints/api/superuser.py
Normal file
151
config_app/config_endpoints/api/superuser.py
Normal file
|
@ -0,0 +1,151 @@
|
|||
import os
|
||||
import logging
|
||||
import pathvalidate
|
||||
from flask import request
|
||||
|
||||
from config_endpoints.exception import InvalidRequest
|
||||
from config_endpoints.api import resource, ApiResource, verify_not_prod, nickname
|
||||
from config_util.ssl import load_certificate, CertInvalidException
|
||||
from config_app import app, config_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
EXTRA_CA_DIRECTORY = 'extra_ca_certs'
|
||||
|
||||
|
||||
@resource('/v1/superuser/customcerts/<certpath>')
|
||||
class SuperUserCustomCertificate(ApiResource):
|
||||
""" Resource for managing a custom certificate. """
|
||||
|
||||
@nickname('uploadCustomCertificate')
|
||||
@verify_not_prod
|
||||
def post(self, certpath):
|
||||
uploaded_file = request.files['file']
|
||||
if not uploaded_file:
|
||||
raise InvalidRequest('Missing certificate file')
|
||||
|
||||
# Save the certificate.
|
||||
certpath = pathvalidate.sanitize_filename(certpath)
|
||||
if not certpath.endswith('.crt'):
|
||||
raise InvalidRequest('Invalid certificate file: must have suffix `.crt`')
|
||||
|
||||
logger.debug('Saving custom certificate %s', certpath)
|
||||
cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath)
|
||||
config_provider.save_volume_file(cert_full_path, uploaded_file)
|
||||
logger.debug('Saved custom certificate %s', certpath)
|
||||
|
||||
# Validate the certificate.
|
||||
try:
|
||||
logger.debug('Loading custom certificate %s', certpath)
|
||||
with config_provider.get_volume_file(cert_full_path) as f:
|
||||
load_certificate(f.read())
|
||||
except CertInvalidException:
|
||||
logger.exception('Got certificate invalid error for cert %s', certpath)
|
||||
return '', 204
|
||||
except IOError:
|
||||
logger.exception('Got IO error for cert %s', certpath)
|
||||
return '', 204
|
||||
|
||||
# Call the update script to install the certificate immediately.
|
||||
if not app.config['TESTING']:
|
||||
logger.debug('Calling certs_install.sh')
|
||||
if os.system('/conf/init/certs_install.sh') != 0:
|
||||
raise Exception('Could not install certificates')
|
||||
|
||||
logger.debug('certs_install.sh completed')
|
||||
|
||||
return '', 204
|
||||
|
||||
@nickname('deleteCustomCertificate')
|
||||
@verify_not_prod
|
||||
def delete(self, certpath):
|
||||
cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath)
|
||||
config_provider.remove_volume_file(cert_full_path)
|
||||
return '', 204
|
||||
|
||||
|
||||
@resource('/v1/superuser/customcerts')
|
||||
class SuperUserCustomCertificates(ApiResource):
|
||||
""" Resource for managing custom certificates. """
|
||||
|
||||
@nickname('getCustomCertificates')
|
||||
@verify_not_prod
|
||||
def get(self):
|
||||
has_extra_certs_path = config_provider.volume_file_exists(EXTRA_CA_DIRECTORY)
|
||||
extra_certs_found = config_provider.list_volume_directory(EXTRA_CA_DIRECTORY)
|
||||
if extra_certs_found is None:
|
||||
return {
|
||||
'status': 'file' if has_extra_certs_path else 'none',
|
||||
}
|
||||
|
||||
cert_views = []
|
||||
for extra_cert_path in extra_certs_found:
|
||||
try:
|
||||
cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, extra_cert_path)
|
||||
with config_provider.get_volume_file(cert_full_path) as f:
|
||||
certificate = load_certificate(f.read())
|
||||
cert_views.append({
|
||||
'path': extra_cert_path,
|
||||
'names': list(certificate.names),
|
||||
'expired': certificate.expired,
|
||||
})
|
||||
except CertInvalidException as cie:
|
||||
cert_views.append({
|
||||
'path': extra_cert_path,
|
||||
'error': cie.message,
|
||||
})
|
||||
except IOError as ioe:
|
||||
cert_views.append({
|
||||
'path': extra_cert_path,
|
||||
'error': ioe.message,
|
||||
})
|
||||
|
||||
return {
|
||||
'status': 'directory',
|
||||
'certs': cert_views,
|
||||
}
|
||||
|
||||
# TODO(config) port this endpoint when (https://github.com/quay/quay/pull/3055) merged to ensure no conflicts
|
||||
# @resource('/v1/superuser/keys')
|
||||
# class SuperUserServiceKeyManagement(ApiResource):
|
||||
# """ Resource for managing service keys."""
|
||||
# schemas = {
|
||||
# 'CreateServiceKey': {
|
||||
# 'id': 'CreateServiceKey',
|
||||
# 'type': 'object',
|
||||
# 'description': 'Description of creation of a service key',
|
||||
# 'required': ['service', 'expiration'],
|
||||
# 'properties': {
|
||||
# 'service': {
|
||||
# 'type': 'string',
|
||||
# 'description': 'The service authenticating with this key',
|
||||
# },
|
||||
# 'name': {
|
||||
# 'type': 'string',
|
||||
# 'description': 'The friendly name of a service key',
|
||||
# },
|
||||
# 'metadata': {
|
||||
# 'type': 'object',
|
||||
# 'description': 'The key/value pairs of this key\'s metadata',
|
||||
# },
|
||||
# 'notes': {
|
||||
# 'type': 'string',
|
||||
# 'description': 'If specified, the extra notes for the key',
|
||||
# },
|
||||
# 'expiration': {
|
||||
# 'description': 'The expiration date as a unix timestamp',
|
||||
# 'anyOf': [{'type': 'number'}, {'type': 'null'}],
|
||||
# },
|
||||
# },
|
||||
# },
|
||||
# }
|
||||
#
|
||||
# @verify_not_prod
|
||||
# @nickname('listServiceKeys')
|
||||
# def get(self):
|
||||
# keys = pre_oci_model.list_all_service_keys()
|
||||
#
|
||||
# return jsonify({
|
||||
# 'keys': [key.to_dict() for key in keys],
|
||||
# })
|
||||
#
|
||||
|
18
config_app/config_endpoints/api/user.py
Normal file
18
config_app/config_endpoints/api/user.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
from config_endpoints.api import resource, ApiResource, nickname
|
||||
|
||||
|
||||
@resource('/v1/user/')
|
||||
class User(ApiResource):
|
||||
""" Operations related to users. """
|
||||
|
||||
@nickname('getLoggedInUser')
|
||||
def get(self):
|
||||
""" Get user information for the authenticated user. """
|
||||
# user = get_authenticated_user()
|
||||
|
||||
# return user_view(user)
|
||||
return {
|
||||
'anonymous': False,
|
||||
# 'username': user.username,
|
||||
}
|
||||
|
|
@ -1,16 +1,10 @@
|
|||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from collections import OrderedDict
|
||||
from cachetools import lru_cache
|
||||
|
||||
from flask import make_response, render_template
|
||||
from flask_restful import reqparse
|
||||
|
||||
from config_app.config_endpoints.api import method_metadata
|
||||
from config_app.app import app
|
||||
|
||||
|
||||
def truthy_bool(param):
|
||||
return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'}
|
||||
|
@ -60,246 +54,3 @@ def fully_qualified_name(method_view_class):
|
|||
return '%s.%s' % (method_view_class.__module__, method_view_class.__name__)
|
||||
|
||||
|
||||
# @lru_cache(maxsize=1)
|
||||
def generate_route_data():
|
||||
include_internal = True
|
||||
compact = True
|
||||
|
||||
def swagger_parameter(name, description, kind='path', param_type='string', required=True,
|
||||
enum=None, schema=None):
|
||||
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject
|
||||
parameter_info = {
|
||||
'name': name,
|
||||
'in': kind,
|
||||
'required': required
|
||||
}
|
||||
|
||||
if schema:
|
||||
parameter_info['schema'] = {
|
||||
'$ref': '#/definitions/%s' % schema
|
||||
}
|
||||
else:
|
||||
parameter_info['type'] = param_type
|
||||
|
||||
if enum is not None and len(list(enum)) > 0:
|
||||
parameter_info['enum'] = list(enum)
|
||||
|
||||
return parameter_info
|
||||
|
||||
paths = {}
|
||||
models = {}
|
||||
tags = []
|
||||
tags_added = set()
|
||||
operation_ids = set()
|
||||
|
||||
print('APP URL MAp:')
|
||||
print(app.url_map)
|
||||
for rule in app.url_map.iter_rules():
|
||||
endpoint_method = app.view_functions[rule.endpoint]
|
||||
|
||||
# Verify that we have a view class for this API method.
|
||||
if not 'view_class' in dir(endpoint_method):
|
||||
continue
|
||||
|
||||
view_class = endpoint_method.view_class
|
||||
|
||||
# Hide the class if it is internal.
|
||||
internal = method_metadata(view_class, 'internal')
|
||||
if not include_internal and internal:
|
||||
continue
|
||||
|
||||
# Build the tag.
|
||||
parts = fully_qualified_name(view_class).split('.')
|
||||
tag_name = parts[-2]
|
||||
if not tag_name in tags_added:
|
||||
tags_added.add(tag_name)
|
||||
tags.append({
|
||||
'name': tag_name,
|
||||
'description': (sys.modules[view_class.__module__].__doc__ or '').strip()
|
||||
})
|
||||
|
||||
# Build the Swagger data for the path.
|
||||
swagger_path = PARAM_REGEX.sub(r'{\2}', rule.rule)
|
||||
full_name = fully_qualified_name(view_class)
|
||||
path_swagger = {
|
||||
'x-name': full_name,
|
||||
'x-path': swagger_path,
|
||||
'x-tag': tag_name
|
||||
}
|
||||
|
||||
related_user_res = method_metadata(view_class, 'related_user_resource')
|
||||
if related_user_res is not None:
|
||||
path_swagger['x-user-related'] = fully_qualified_name(related_user_res)
|
||||
|
||||
paths[swagger_path] = path_swagger
|
||||
|
||||
# Add any global path parameters.
|
||||
param_data_map = view_class.__api_path_params if '__api_path_params' in dir(view_class) else {}
|
||||
if param_data_map:
|
||||
path_parameters_swagger = []
|
||||
for path_parameter in param_data_map:
|
||||
description = param_data_map[path_parameter].get('description')
|
||||
path_parameters_swagger.append(swagger_parameter(path_parameter, description))
|
||||
|
||||
path_swagger['parameters'] = path_parameters_swagger
|
||||
|
||||
# Add the individual HTTP operations.
|
||||
method_names = list(rule.methods.difference(['HEAD', 'OPTIONS']))
|
||||
for method_name in method_names:
|
||||
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object
|
||||
method = getattr(view_class, method_name.lower(), None)
|
||||
if method is None:
|
||||
logger.debug('Unable to find method for %s in class %s', method_name, view_class)
|
||||
continue
|
||||
|
||||
operationId = method_metadata(method, 'nickname')
|
||||
operation_swagger = {
|
||||
'operationId': operationId,
|
||||
'parameters': [],
|
||||
}
|
||||
|
||||
if operationId is None:
|
||||
continue
|
||||
|
||||
if operationId in operation_ids:
|
||||
raise Exception('Duplicate operation Id: %s' % operationId)
|
||||
|
||||
operation_ids.add(operationId)
|
||||
|
||||
# Mark the method as internal.
|
||||
internal = method_metadata(method, 'internal')
|
||||
if internal is not None:
|
||||
operation_swagger['x-internal'] = True
|
||||
|
||||
if include_internal:
|
||||
requires_fresh_login = method_metadata(method, 'requires_fresh_login')
|
||||
if requires_fresh_login is not None:
|
||||
operation_swagger['x-requires-fresh-login'] = True
|
||||
|
||||
# Add the path parameters.
|
||||
if rule.arguments:
|
||||
for path_parameter in rule.arguments:
|
||||
description = param_data_map.get(path_parameter, {}).get('description')
|
||||
operation_swagger['parameters'].append(swagger_parameter(path_parameter, description))
|
||||
|
||||
# Add the query parameters.
|
||||
if '__api_query_params' in dir(method):
|
||||
for query_parameter_info in method.__api_query_params:
|
||||
name = query_parameter_info['name']
|
||||
description = query_parameter_info['help']
|
||||
param_type = TYPE_CONVERTER[query_parameter_info['type']]
|
||||
required = query_parameter_info['required']
|
||||
|
||||
operation_swagger['parameters'].append(
|
||||
swagger_parameter(name, description, kind='query',
|
||||
param_type=param_type,
|
||||
required=required,
|
||||
enum=query_parameter_info['choices']))
|
||||
|
||||
# Add the OAuth security block.
|
||||
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject
|
||||
scope = method_metadata(method, 'oauth2_scope')
|
||||
if scope and not compact:
|
||||
operation_swagger['security'] = [{'oauth2_implicit': [scope.scope]}]
|
||||
|
||||
# Add the responses block.
|
||||
# https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject
|
||||
response_schema_name = method_metadata(method, 'response_schema')
|
||||
if not compact:
|
||||
if response_schema_name:
|
||||
models[response_schema_name] = view_class.schemas[response_schema_name]
|
||||
|
||||
models['ApiError'] = {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'status': {
|
||||
'type': 'integer',
|
||||
'description': 'Status code of the response.'
|
||||
},
|
||||
'type': {
|
||||
'type': 'string',
|
||||
'description': 'Reference to the type of the error.'
|
||||
},
|
||||
'detail': {
|
||||
'type': 'string',
|
||||
'description': 'Details about the specific instance of the error.'
|
||||
},
|
||||
'title': {
|
||||
'type': 'string',
|
||||
'description': 'Unique error code to identify the type of error.'
|
||||
},
|
||||
'error_message': {
|
||||
'type': 'string',
|
||||
'description': 'Deprecated; alias for detail'
|
||||
},
|
||||
'error_type': {
|
||||
'type': 'string',
|
||||
'description': 'Deprecated; alias for detail'
|
||||
}
|
||||
},
|
||||
'required': [
|
||||
'status',
|
||||
'type',
|
||||
'title',
|
||||
]
|
||||
}
|
||||
|
||||
responses = {
|
||||
'400': {
|
||||
'description': 'Bad Request',
|
||||
},
|
||||
|
||||
'401': {
|
||||
'description': 'Session required',
|
||||
},
|
||||
|
||||
'403': {
|
||||
'description': 'Unauthorized access',
|
||||
},
|
||||
|
||||
'404': {
|
||||
'description': 'Not found',
|
||||
},
|
||||
}
|
||||
|
||||
for _, body in responses.items():
|
||||
body['schema'] = {'$ref': '#/definitions/ApiError'}
|
||||
|
||||
if method_name == 'DELETE':
|
||||
responses['204'] = {
|
||||
'description': 'Deleted'
|
||||
}
|
||||
elif method_name == 'POST':
|
||||
responses['201'] = {
|
||||
'description': 'Successful creation'
|
||||
}
|
||||
else:
|
||||
responses['200'] = {
|
||||
'description': 'Successful invocation'
|
||||
}
|
||||
|
||||
if response_schema_name:
|
||||
responses['200']['schema'] = {
|
||||
'$ref': '#/definitions/%s' % response_schema_name
|
||||
}
|
||||
|
||||
operation_swagger['responses'] = responses
|
||||
|
||||
# Add the request block.
|
||||
request_schema_name = method_metadata(method, 'request_schema')
|
||||
if request_schema_name and not compact:
|
||||
models[request_schema_name] = view_class.schemas[request_schema_name]
|
||||
|
||||
operation_swagger['parameters'].append(
|
||||
swagger_parameter('body', 'Request body contents.', kind='body',
|
||||
schema=request_schema_name))
|
||||
|
||||
# Add the operation to the parent path.
|
||||
if not internal or (internal and include_internal):
|
||||
path_swagger[method_name.lower()] = operation_swagger
|
||||
|
||||
tags.sort(key=lambda t: t['name'])
|
||||
paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]['x-tag']))
|
||||
|
||||
if compact:
|
||||
return {'paths': paths}
|
||||
|
|
123
config_app/config_endpoints/exception.py
Normal file
123
config_app/config_endpoints/exception.py
Normal file
|
@ -0,0 +1,123 @@
|
|||
from enum import Enum
|
||||
|
||||
from flask import url_for
|
||||
from werkzeug.exceptions import HTTPException
|
||||
|
||||
|
||||
class ApiErrorType(Enum):
|
||||
external_service_timeout = 'external_service_timeout'
|
||||
invalid_request = 'invalid_request'
|
||||
invalid_response = 'invalid_response'
|
||||
invalid_token = 'invalid_token'
|
||||
expired_token = 'expired_token'
|
||||
insufficient_scope = 'insufficient_scope'
|
||||
fresh_login_required = 'fresh_login_required'
|
||||
exceeds_license = 'exceeds_license'
|
||||
not_found = 'not_found'
|
||||
downstream_issue = 'downstream_issue'
|
||||
|
||||
|
||||
ERROR_DESCRIPTION = {
|
||||
ApiErrorType.external_service_timeout.value: "An external service timed out. Retrying the request may resolve the issue.",
|
||||
ApiErrorType.invalid_request.value: "The request was invalid. It may have contained invalid values or was improperly formatted.",
|
||||
ApiErrorType.invalid_response.value: "The response was invalid.",
|
||||
ApiErrorType.invalid_token.value: "The access token provided was invalid.",
|
||||
ApiErrorType.expired_token.value: "The access token provided has expired.",
|
||||
ApiErrorType.insufficient_scope.value: "The access token did not have sufficient scope to access the requested resource.",
|
||||
ApiErrorType.fresh_login_required.value: "The action requires a fresh login to succeed.",
|
||||
ApiErrorType.exceeds_license.value: "The action was refused because the current license does not allow it.",
|
||||
ApiErrorType.not_found.value: "The resource was not found.",
|
||||
ApiErrorType.downstream_issue.value: "An error occurred in a downstream service.",
|
||||
}
|
||||
|
||||
|
||||
class ApiException(HTTPException):
|
||||
"""
|
||||
Represents an error in the application/problem+json format.
|
||||
|
||||
See: https://tools.ietf.org/html/rfc7807
|
||||
|
||||
- "type" (string) - A URI reference that identifies the
|
||||
problem type.
|
||||
|
||||
- "title" (string) - A short, human-readable summary of the problem
|
||||
type. It SHOULD NOT change from occurrence to occurrence of the
|
||||
problem, except for purposes of localization
|
||||
|
||||
- "status" (number) - The HTTP status code
|
||||
|
||||
- "detail" (string) - A human-readable explanation specific to this
|
||||
occurrence of the problem.
|
||||
|
||||
- "instance" (string) - A URI reference that identifies the specific
|
||||
occurrence of the problem. It may or may not yield further
|
||||
information if dereferenced.
|
||||
"""
|
||||
|
||||
def __init__(self, error_type, status_code, error_description, payload=None):
|
||||
Exception.__init__(self)
|
||||
self.error_description = error_description
|
||||
self.code = status_code
|
||||
self.payload = payload
|
||||
self.error_type = error_type
|
||||
self.data = self.to_dict()
|
||||
|
||||
super(ApiException, self).__init__(error_description, None)
|
||||
|
||||
def to_dict(self):
|
||||
rv = dict(self.payload or ())
|
||||
|
||||
if self.error_description is not None:
|
||||
rv['detail'] = self.error_description
|
||||
rv['error_message'] = self.error_description # TODO: deprecate
|
||||
|
||||
rv['error_type'] = self.error_type.value # TODO: deprecate
|
||||
rv['title'] = self.error_type.value
|
||||
rv['type'] = url_for('api.error', error_type=self.error_type.value, _external=True)
|
||||
rv['status'] = self.code
|
||||
|
||||
return rv
|
||||
|
||||
|
||||
class ExternalServiceError(ApiException):
|
||||
def __init__(self, error_description, payload=None):
|
||||
ApiException.__init__(self, ApiErrorType.external_service_timeout, 520, error_description, payload)
|
||||
|
||||
|
||||
class InvalidRequest(ApiException):
|
||||
def __init__(self, error_description, payload=None):
|
||||
ApiException.__init__(self, ApiErrorType.invalid_request, 400, error_description, payload)
|
||||
|
||||
|
||||
class InvalidResponse(ApiException):
|
||||
def __init__(self, error_description, payload=None):
|
||||
ApiException.__init__(self, ApiErrorType.invalid_response, 400, error_description, payload)
|
||||
|
||||
|
||||
class InvalidToken(ApiException):
|
||||
def __init__(self, error_description, payload=None):
|
||||
ApiException.__init__(self, ApiErrorType.invalid_token, 401, error_description, payload)
|
||||
|
||||
class ExpiredToken(ApiException):
|
||||
def __init__(self, error_description, payload=None):
|
||||
ApiException.__init__(self, ApiErrorType.expired_token, 401, error_description, payload)
|
||||
|
||||
|
||||
class FreshLoginRequired(ApiException):
|
||||
def __init__(self, payload=None):
|
||||
ApiException.__init__(self, ApiErrorType.fresh_login_required, 401, "Requires fresh login", payload)
|
||||
|
||||
|
||||
class ExceedsLicenseException(ApiException):
|
||||
def __init__(self, payload=None):
|
||||
ApiException.__init__(self, ApiErrorType.exceeds_license, 402, 'Payment Required', payload)
|
||||
|
||||
|
||||
class NotFound(ApiException):
|
||||
def __init__(self, payload=None):
|
||||
ApiException.__init__(self, ApiErrorType.not_found, 404, 'Not Found', payload)
|
||||
|
||||
|
||||
class DownstreamIssue(ApiException):
|
||||
def __init__(self, error_description, payload=None):
|
||||
ApiException.__init__(self, ApiErrorType.downstream_issue, 520, error_description, payload)
|
|
@ -1,17 +1,23 @@
|
|||
from flask import Blueprint
|
||||
from common import render_page_template
|
||||
from config_app.config_endpoints.common import generate_route_data
|
||||
from util.cache import no_cache
|
||||
from config_endpoints.common import render_page_template
|
||||
from config_endpoints.api.discovery import generate_route_data
|
||||
# from config_util.cache import no_cache
|
||||
|
||||
|
||||
setup_web = Blueprint('setup_web', __name__, template_folder='templates')
|
||||
|
||||
|
||||
# @lru_cache(maxsize=1)
|
||||
def _get_route_data():
|
||||
return generate_route_data()
|
||||
|
||||
|
||||
def render_page_template_with_routedata(name, *args, **kwargs):
|
||||
return render_page_template(name, generate_route_data(), *args, **kwargs)
|
||||
return render_page_template(name, _get_route_data(), *args, **kwargs)
|
||||
|
||||
|
||||
# @no_cache
|
||||
@setup_web.route('/', methods=['GET'], defaults={'path': ''})
|
||||
@no_cache
|
||||
def index(path, **kwargs):
|
||||
return render_page_template_with_routedata('index.html', js_bundle_name='configapp', **kwargs)
|
||||
|
||||
|
|
108
config_app/config_test/testconfig.py
Normal file
108
config_app/config_test/testconfig.py
Normal file
|
@ -0,0 +1,108 @@
|
|||
import os
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from config import DefaultConfig
|
||||
|
||||
|
||||
class FakeTransaction(object):
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, value, traceback):
|
||||
pass
|
||||
|
||||
|
||||
TEST_DB_FILE = NamedTemporaryFile(delete=True)
|
||||
|
||||
|
||||
class TestConfig(DefaultConfig):
|
||||
TESTING = True
|
||||
SECRET_KEY = 'a36c9d7d-25a9-4d3f-a586-3d2f8dc40a83'
|
||||
BILLING_TYPE = 'FakeStripe'
|
||||
|
||||
TEST_DB_FILE = TEST_DB_FILE
|
||||
DB_URI = os.environ.get('TEST_DATABASE_URI', 'sqlite:///{0}'.format(TEST_DB_FILE.name))
|
||||
DB_CONNECTION_ARGS = {
|
||||
'threadlocals': True,
|
||||
'autorollback': True,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def create_transaction(db):
|
||||
return FakeTransaction()
|
||||
|
||||
DB_TRANSACTION_FACTORY = create_transaction
|
||||
|
||||
DISTRIBUTED_STORAGE_CONFIG = {'local_us': ['FakeStorage', {}], 'local_eu': ['FakeStorage', {}]}
|
||||
DISTRIBUTED_STORAGE_PREFERENCE = ['local_us']
|
||||
|
||||
BUILDLOGS_MODULE_AND_CLASS = ('test.testlogs', 'testlogs.TestBuildLogs')
|
||||
BUILDLOGS_OPTIONS = ['devtable', 'building', 'deadbeef-dead-beef-dead-beefdeadbeef', False]
|
||||
|
||||
USERFILES_LOCATION = 'local_us'
|
||||
|
||||
FEATURE_SUPER_USERS = True
|
||||
FEATURE_BILLING = True
|
||||
FEATURE_MAILING = True
|
||||
SUPER_USERS = ['devtable']
|
||||
|
||||
LICENSE_USER_LIMIT = 500
|
||||
LICENSE_EXPIRATION = datetime.now() + timedelta(weeks=520)
|
||||
LICENSE_EXPIRATION_WARNING = datetime.now() + timedelta(weeks=520)
|
||||
|
||||
FEATURE_GITHUB_BUILD = True
|
||||
FEATURE_BITTORRENT = True
|
||||
FEATURE_ACI_CONVERSION = True
|
||||
|
||||
CLOUDWATCH_NAMESPACE = None
|
||||
|
||||
FEATURE_SECURITY_SCANNER = True
|
||||
FEATURE_SECURITY_NOTIFICATIONS = True
|
||||
SECURITY_SCANNER_ENDPOINT = 'http://fakesecurityscanner/'
|
||||
SECURITY_SCANNER_API_VERSION = 'v1'
|
||||
SECURITY_SCANNER_ENGINE_VERSION_TARGET = 1
|
||||
SECURITY_SCANNER_API_TIMEOUT_SECONDS = 1
|
||||
|
||||
FEATURE_SIGNING = True
|
||||
|
||||
SIGNING_ENGINE = 'gpg2'
|
||||
|
||||
GPG2_PRIVATE_KEY_NAME = 'EEB32221'
|
||||
GPG2_PRIVATE_KEY_FILENAME = 'test/data/signing-private.gpg'
|
||||
GPG2_PUBLIC_KEY_FILENAME = 'test/data/signing-public.gpg'
|
||||
|
||||
INSTANCE_SERVICE_KEY_KID_LOCATION = 'test/data/test.kid'
|
||||
INSTANCE_SERVICE_KEY_LOCATION = 'test/data/test.pem'
|
||||
|
||||
PROMETHEUS_AGGREGATOR_URL = None
|
||||
|
||||
GITHUB_LOGIN_CONFIG = {}
|
||||
GOOGLE_LOGIN_CONFIG = {}
|
||||
|
||||
FEATURE_GITHUB_LOGIN = True
|
||||
FEATURE_GOOGLE_LOGIN = True
|
||||
|
||||
TESTOIDC_LOGIN_CONFIG = {
|
||||
'CLIENT_ID': 'foo',
|
||||
'CLIENT_SECRET': 'bar',
|
||||
'OIDC_SERVER': 'http://fakeoidc',
|
||||
'DEBUGGING': True,
|
||||
'LOGIN_BINDING_FIELD': 'sub',
|
||||
}
|
||||
|
||||
RECAPTCHA_SITE_KEY = 'somekey'
|
||||
RECAPTCHA_SECRET_KEY = 'somesecretkey'
|
||||
|
||||
FEATURE_APP_REGISTRY = True
|
||||
FEATURE_TEAM_SYNCING = True
|
||||
FEATURE_CHANGE_TAG_EXPIRATION = True
|
||||
|
||||
TAG_EXPIRATION_OPTIONS = ['0s', '1s', '1d', '1w', '2w', '4w']
|
||||
|
||||
DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT = None
|
||||
|
||||
DATA_MODEL_CACHE_CONFIG = {
|
||||
'engine': 'inmemory',
|
||||
}
|
16
config_app/config_util/config/__init__.py
Normal file
16
config_app/config_util/config/__init__.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
from config_util.config.fileprovider import FileConfigProvider
|
||||
from config_util.config.testprovider import TestConfigProvider
|
||||
from config_util.config.k8sprovider import KubernetesConfigProvider
|
||||
|
||||
|
||||
def get_config_provider(config_volume, yaml_filename, py_filename, testing=False, kubernetes=False):
|
||||
""" Loads and returns the config provider for the current environment. """
|
||||
if testing:
|
||||
return TestConfigProvider()
|
||||
|
||||
if kubernetes:
|
||||
return KubernetesConfigProvider(config_volume, yaml_filename, py_filename)
|
||||
|
||||
return FileConfigProvider(config_volume, yaml_filename, py_filename)
|
||||
|
||||
|
71
config_app/config_util/config/basefileprovider.py
Normal file
71
config_app/config_util/config/basefileprovider.py
Normal file
|
@ -0,0 +1,71 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
from config_util.config.baseprovider import (BaseProvider, import_yaml, export_yaml,
|
||||
CannotWriteConfigException)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseFileProvider(BaseProvider):
|
||||
""" Base implementation of the config provider that reads the data from the file system. """
|
||||
def __init__(self, config_volume, yaml_filename, py_filename):
|
||||
self.config_volume = config_volume
|
||||
self.yaml_filename = yaml_filename
|
||||
self.py_filename = py_filename
|
||||
|
||||
self.yaml_path = os.path.join(config_volume, yaml_filename)
|
||||
self.py_path = os.path.join(config_volume, py_filename)
|
||||
|
||||
def update_app_config(self, app_config):
|
||||
if os.path.exists(self.py_path):
|
||||
logger.debug('Applying config file: %s', self.py_path)
|
||||
app_config.from_pyfile(self.py_path)
|
||||
|
||||
if os.path.exists(self.yaml_path):
|
||||
logger.debug('Applying config file: %s', self.yaml_path)
|
||||
import_yaml(app_config, self.yaml_path)
|
||||
|
||||
def get_config(self):
|
||||
if not self.config_exists():
|
||||
return None
|
||||
|
||||
config_obj = {}
|
||||
import_yaml(config_obj, self.yaml_path)
|
||||
return config_obj
|
||||
|
||||
def config_exists(self):
|
||||
return self.volume_file_exists(self.yaml_filename)
|
||||
|
||||
def volume_exists(self):
|
||||
return os.path.exists(self.config_volume)
|
||||
|
||||
def volume_file_exists(self, filename):
|
||||
return os.path.exists(os.path.join(self.config_volume, filename))
|
||||
|
||||
def get_volume_file(self, filename, mode='r'):
|
||||
return open(os.path.join(self.config_volume, filename), mode=mode)
|
||||
|
||||
def get_volume_path(self, directory, filename):
|
||||
return os.path.join(directory, filename)
|
||||
|
||||
def list_volume_directory(self, path):
|
||||
dirpath = os.path.join(self.config_volume, path)
|
||||
if not os.path.exists(dirpath):
|
||||
return None
|
||||
|
||||
if not os.path.isdir(dirpath):
|
||||
return None
|
||||
|
||||
return os.listdir(dirpath)
|
||||
|
||||
def requires_restart(self, app_config):
|
||||
file_config = self.get_config()
|
||||
if not file_config:
|
||||
return False
|
||||
|
||||
for key in file_config:
|
||||
if app_config.get(key) != file_config[key]:
|
||||
return True
|
||||
|
||||
return False
|
|
@ -6,7 +6,7 @@ from six import add_metaclass
|
|||
|
||||
from jsonschema import validate, ValidationError
|
||||
|
||||
from util.config.schema import CONFIG_SCHEMA
|
||||
from config_util.config.schema import CONFIG_SCHEMA
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
from util.config.provider.baseprovider import export_yaml, CannotWriteConfigException
|
||||
from util.config.provider.basefileprovider import BaseFileProvider
|
||||
from config_util.config.baseprovider import export_yaml, CannotWriteConfigException
|
||||
from config_util.config.basefileprovider import BaseFileProvider
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
170
config_app/config_util/config/k8sprovider.py
Normal file
170
config_app/config_util/config/k8sprovider.py
Normal file
|
@ -0,0 +1,170 @@
|
|||
import os
|
||||
import logging
|
||||
import json
|
||||
import base64
|
||||
import time
|
||||
|
||||
from requests import Request, Session
|
||||
|
||||
from config_util.config.baseprovider import CannotWriteConfigException, get_yaml
|
||||
from config_util.config.basefileprovider import BaseFileProvider
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
KUBERNETES_API_HOST = os.environ.get('KUBERNETES_SERVICE_HOST', '')
|
||||
port = os.environ.get('KUBERNETES_SERVICE_PORT')
|
||||
if port:
|
||||
KUBERNETES_API_HOST += ':' + port
|
||||
|
||||
SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
|
||||
|
||||
QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise')
|
||||
QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret')
|
||||
|
||||
class KubernetesConfigProvider(BaseFileProvider):
|
||||
""" Implementation of the config provider that reads and writes configuration
|
||||
data from a Kubernetes Secret. """
|
||||
def __init__(self, config_volume, yaml_filename, py_filename):
|
||||
super(KubernetesConfigProvider, self).__init__(config_volume, yaml_filename, py_filename)
|
||||
|
||||
# Load the service account token from the local store.
|
||||
if not os.path.exists(SERVICE_ACCOUNT_TOKEN_PATH):
|
||||
raise Exception('Cannot load Kubernetes service account token')
|
||||
|
||||
with open(SERVICE_ACCOUNT_TOKEN_PATH, 'r') as f:
|
||||
self._service_token = f.read()
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return 'k8s'
|
||||
|
||||
def get_volume_path(self, directory, filename):
|
||||
# NOTE: Overridden to ensure we don't have subdirectories, which aren't supported
|
||||
# in Kubernetes secrets.
|
||||
return "_".join([directory.rstrip('/'), filename])
|
||||
|
||||
def volume_file_exists(self, filename):
|
||||
# NOTE: Overridden because we don't have subdirectories, which aren't supported
|
||||
# in Kubernetes secrets.
|
||||
secret = self._lookup_secret()
|
||||
if not secret or not secret.get('data'):
|
||||
return False
|
||||
return filename in secret['data']
|
||||
|
||||
def list_volume_directory(self, path):
|
||||
# NOTE: Overridden because we don't have subdirectories, which aren't supported
|
||||
# in Kubernetes secrets.
|
||||
secret = self._lookup_secret()
|
||||
|
||||
if not secret:
|
||||
return []
|
||||
|
||||
paths = []
|
||||
for filename in secret.get('data', {}):
|
||||
if filename.startswith(path):
|
||||
paths.append(filename[len(path) + 1:])
|
||||
return paths
|
||||
|
||||
def save_config(self, config_obj):
|
||||
self._update_secret_file(self.yaml_filename, get_yaml(config_obj))
|
||||
|
||||
def write_volume_file(self, filename, contents):
|
||||
try:
|
||||
self._update_secret_file(filename, contents)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
def remove_volume_file(self, filename):
|
||||
try:
|
||||
self._update_secret_file(filename, None)
|
||||
except IOError as ioe:
|
||||
raise CannotWriteConfigException(str(ioe))
|
||||
|
||||
def save_volume_file(self, filename, flask_file):
|
||||
filepath = super(KubernetesConfigProvider, self).save_volume_file(filename, flask_file)
|
||||
with open(filepath, 'r') as f:
|
||||
self.write_volume_file(filename, f.read())
|
||||
|
||||
def _assert_success(self, response):
|
||||
if response.status_code != 200:
|
||||
logger.error('Kubernetes API call failed with response: %s => %s', response.status_code,
|
||||
response.text)
|
||||
raise CannotWriteConfigException('Kubernetes API call failed: %s' % response.text)
|
||||
|
||||
def _update_secret_file(self, filename, value=None):
|
||||
# Check first that the namespace for Quay Enterprise exists. If it does not, report that
|
||||
# as an error, as it seems to be a common issue.
|
||||
namespace_url = 'namespaces/%s' % (QE_NAMESPACE)
|
||||
response = self._execute_k8s_api('GET', namespace_url)
|
||||
if response.status_code // 100 != 2:
|
||||
msg = 'A Kubernetes namespace with name `%s` must be created to save config' % QE_NAMESPACE
|
||||
raise CannotWriteConfigException(msg)
|
||||
|
||||
# Check if the secret exists. If not, then we create an empty secret and then update the file
|
||||
# inside.
|
||||
secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
|
||||
secret = self._lookup_secret()
|
||||
if secret is None:
|
||||
self._assert_success(self._execute_k8s_api('POST', secret_url, {
|
||||
"kind": "Secret",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": QE_CONFIG_SECRET
|
||||
},
|
||||
"data": {}
|
||||
}))
|
||||
|
||||
# Update the secret to reflect the file change.
|
||||
secret['data'] = secret.get('data', {})
|
||||
|
||||
if value is not None:
|
||||
secret['data'][filename] = base64.b64encode(value)
|
||||
else:
|
||||
secret['data'].pop(filename)
|
||||
|
||||
self._assert_success(self._execute_k8s_api('PUT', secret_url, secret))
|
||||
|
||||
# Wait until the local mounted copy of the secret has been updated, as
|
||||
# this is an eventual consistency operation, but the caller expects immediate
|
||||
# consistency.
|
||||
while True:
|
||||
matching_files = set()
|
||||
for secret_filename, encoded_value in secret['data'].iteritems():
|
||||
expected_value = base64.b64decode(encoded_value)
|
||||
try:
|
||||
with self.get_volume_file(secret_filename) as f:
|
||||
contents = f.read()
|
||||
|
||||
if contents == expected_value:
|
||||
matching_files.add(secret_filename)
|
||||
except IOError:
|
||||
continue
|
||||
|
||||
if matching_files == set(secret['data'].keys()):
|
||||
break
|
||||
|
||||
# Sleep for a second and then try again.
|
||||
time.sleep(1)
|
||||
|
||||
def _lookup_secret(self):
|
||||
secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
|
||||
response = self._execute_k8s_api('GET', secret_url)
|
||||
if response.status_code != 200:
|
||||
return None
|
||||
return json.loads(response.text)
|
||||
|
||||
def _execute_k8s_api(self, method, relative_url, data=None):
|
||||
headers = {
|
||||
'Authorization': 'Bearer ' + self._service_token
|
||||
}
|
||||
|
||||
if data:
|
||||
headers['Content-Type'] = 'application/json'
|
||||
|
||||
data = json.dumps(data) if data else None
|
||||
session = Session()
|
||||
url = 'https://%s/api/v1/%s' % (KUBERNETES_API_HOST, relative_url)
|
||||
|
||||
request = Request(method, url, data=data, headers=headers)
|
||||
return session.send(request.prepare(), verify=False, timeout=2)
|
914
config_app/config_util/config/schema.py
Normal file
914
config_app/config_util/config/schema.py
Normal file
|
@ -0,0 +1,914 @@
|
|||
# INTERNAL_ONLY_PROPERTIES defines the properties in the config that, while settable, should
|
||||
# not be documented for external users. These will generally be used for internal test or only
|
||||
# given to customers when they have been briefed on the side effects of using them.
|
||||
INTERNAL_ONLY_PROPERTIES = {
|
||||
'__module__',
|
||||
'__doc__',
|
||||
'create_transaction',
|
||||
|
||||
'TESTING',
|
||||
'SEND_FILE_MAX_AGE_DEFAULT',
|
||||
|
||||
'REPLICATION_QUEUE_NAME',
|
||||
'DOCKERFILE_BUILD_QUEUE_NAME',
|
||||
'CHUNK_CLEANUP_QUEUE_NAME',
|
||||
'SECSCAN_NOTIFICATION_QUEUE_NAME',
|
||||
'SECURITY_SCANNER_ISSUER_NAME',
|
||||
'NOTIFICATION_QUEUE_NAME',
|
||||
'NAMESPACE_GC_QUEUE_NAME',
|
||||
|
||||
'FEATURE_BILLING',
|
||||
'FEATURE_SUPPORT_CHAT',
|
||||
'BILLING_TYPE',
|
||||
|
||||
'INSTANCE_SERVICE_KEY_LOCATION',
|
||||
'INSTANCE_SERVICE_KEY_REFRESH',
|
||||
'INSTANCE_SERVICE_KEY_SERVICE',
|
||||
'INSTANCE_SERVICE_KEY_KID_LOCATION',
|
||||
'INSTANCE_SERVICE_KEY_EXPIRATION',
|
||||
'UNAPPROVED_SERVICE_KEY_TTL_SEC',
|
||||
'EXPIRED_SERVICE_KEY_TTL_SEC',
|
||||
'REGISTRY_JWT_AUTH_MAX_FRESH_S',
|
||||
|
||||
'BITTORRENT_FILENAME_PEPPER',
|
||||
'BITTORRENT_WEBSEED_LIFETIME',
|
||||
|
||||
'SERVICE_LOG_ACCOUNT_ID',
|
||||
'BUILDLOGS_OPTIONS',
|
||||
'LIBRARY_NAMESPACE',
|
||||
'STAGGER_WORKERS',
|
||||
'QUEUE_WORKER_METRICS_REFRESH_SECONDS',
|
||||
'PUSH_TEMP_TAG_EXPIRATION_SEC',
|
||||
'GARBAGE_COLLECTION_FREQUENCY',
|
||||
'PAGE_TOKEN_KEY',
|
||||
'BUILD_MANAGER',
|
||||
'JWTPROXY_AUDIENCE',
|
||||
'SYSTEM_SERVICE_BLACKLIST',
|
||||
'JWTPROXY_SIGNER',
|
||||
'SECURITY_SCANNER_INDEXING_MIN_ID',
|
||||
'STATIC_SITE_BUCKET',
|
||||
'LABEL_KEY_RESERVED_PREFIXES',
|
||||
'TEAM_SYNC_WORKER_FREQUENCY',
|
||||
'DOCUMENTATION_METADATA',
|
||||
'DOCUMENTATION_LOCATION',
|
||||
'JSONIFY_PRETTYPRINT_REGULAR',
|
||||
'SYSTEM_LOGS_FILE',
|
||||
'SYSTEM_LOGS_PATH',
|
||||
'SYSTEM_SERVICES_PATH',
|
||||
'TUF_GUN_PREFIX',
|
||||
'LOGGING_LEVEL',
|
||||
'SIGNED_GRANT_EXPIRATION_SEC',
|
||||
'PROMETHEUS_AGGREGATOR_URL',
|
||||
'DB_TRANSACTION_FACTORY',
|
||||
'NOTIFICATION_SEND_TIMEOUT',
|
||||
'QUEUE_METRICS_TYPE',
|
||||
'MAIL_FAIL_SILENTLY',
|
||||
'LOCAL_OAUTH_HANDLER',
|
||||
'USE_CDN',
|
||||
'ANALYTICS_TYPE',
|
||||
'LAST_ACCESSED_UPDATE_THRESHOLD_S',
|
||||
|
||||
'EXCEPTION_LOG_TYPE',
|
||||
'SENTRY_DSN',
|
||||
'SENTRY_PUBLIC_DSN',
|
||||
|
||||
'BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT',
|
||||
'THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT',
|
||||
|
||||
'SECURITY_SCANNER_ENDPOINT_BATCH',
|
||||
'SECURITY_SCANNER_API_TIMEOUT_SECONDS',
|
||||
'SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS',
|
||||
'SECURITY_SCANNER_ENGINE_VERSION_TARGET',
|
||||
'SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS',
|
||||
'SECURITY_SCANNER_API_VERSION',
|
||||
|
||||
'DATA_MODEL_CACHE_CONFIG',
|
||||
|
||||
# TODO: move this into the schema once we support signing in QE.
|
||||
'FEATURE_SIGNING',
|
||||
'TUF_SERVER',
|
||||
}
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
'type': 'object',
|
||||
'description': 'Schema for Quay configuration',
|
||||
'required': [
|
||||
'PREFERRED_URL_SCHEME',
|
||||
'SERVER_HOSTNAME',
|
||||
'DB_URI',
|
||||
'AUTHENTICATION_TYPE',
|
||||
'DISTRIBUTED_STORAGE_CONFIG',
|
||||
'BUILDLOGS_REDIS',
|
||||
'USER_EVENTS_REDIS',
|
||||
'DISTRIBUTED_STORAGE_PREFERENCE',
|
||||
'DEFAULT_TAG_EXPIRATION',
|
||||
'TAG_EXPIRATION_OPTIONS',
|
||||
],
|
||||
'properties': {
|
||||
# Hosting.
|
||||
'PREFERRED_URL_SCHEME': {
|
||||
'type': 'string',
|
||||
'description': 'The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`',
|
||||
'enum': ['http', 'https'],
|
||||
'x-example': 'https',
|
||||
},
|
||||
'SERVER_HOSTNAME': {
|
||||
'type': 'string',
|
||||
'description': 'The URL at which Quay is accessible, without the scheme.',
|
||||
'x-example': 'quay.io',
|
||||
},
|
||||
'EXTERNAL_TLS_TERMINATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'If TLS is supported, but terminated at a layer before Quay, must be true.',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# User-visible configuration.
|
||||
'REGISTRY_TITLE': {
|
||||
'type': 'string',
|
||||
'description': 'If specified, the long-form title for the registry. Defaults to `Quay Enterprise`.',
|
||||
'x-example': 'Corp Container Service',
|
||||
},
|
||||
'REGISTRY_TITLE_SHORT': {
|
||||
'type': 'string',
|
||||
'description': 'If specified, the short-form title for the registry. Defaults to `Quay Enterprise`.',
|
||||
'x-example': 'CCS',
|
||||
},
|
||||
'CONTACT_INFO': {
|
||||
'type': 'array',
|
||||
'minItems': 1,
|
||||
'uniqueItems': True,
|
||||
'description': 'If specified, contact information to display on the contact page. ' +
|
||||
'If only a single piece of contact information is specified, the contact footer will link directly.',
|
||||
'items': [
|
||||
{
|
||||
'type': 'string',
|
||||
'pattern': '^mailto:(.)+$',
|
||||
'x-example': 'mailto:support@quay.io',
|
||||
'description': 'Adds a link to send an e-mail',
|
||||
},
|
||||
{
|
||||
'type': 'string',
|
||||
'pattern': '^irc://(.)+$',
|
||||
'x-example': 'irc://chat.freenode.net:6665/quay',
|
||||
'description': 'Adds a link to visit an IRC chat room',
|
||||
},
|
||||
{
|
||||
'type': 'string',
|
||||
'pattern': '^tel:(.)+$',
|
||||
'x-example': 'tel:+1-888-930-3475',
|
||||
'description': 'Adds a link to call a phone number',
|
||||
},
|
||||
{
|
||||
'type': 'string',
|
||||
'pattern': '^http(s)?://(.)+$',
|
||||
'x-example': 'https://twitter.com/quayio',
|
||||
'description': 'Adds a link to a defined URL',
|
||||
},
|
||||
],
|
||||
},
|
||||
'SEARCH_RESULTS_PER_PAGE' : {
|
||||
'type': 'number',
|
||||
'description': 'Number of results returned per page by search page. Defaults to 10',
|
||||
'x-example': 10,
|
||||
},
|
||||
'SEARCH_MAX_RESULT_PAGE_COUNT' : {
|
||||
'type': 'number',
|
||||
'description': 'Maximum number of pages the user can paginate in search before they are limited. Defaults to 10',
|
||||
'x-example': 10,
|
||||
},
|
||||
|
||||
# E-mail.
|
||||
'FEATURE_MAILING': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether emails are enabled. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
'MAIL_SERVER': {
|
||||
'type': 'string',
|
||||
'description': 'The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.',
|
||||
'x-example': 'smtp.somedomain.com',
|
||||
},
|
||||
'MAIL_USE_TLS': {
|
||||
'type': 'boolean',
|
||||
'description': 'If specified, whether to use TLS for sending e-mails.',
|
||||
'x-example': True,
|
||||
},
|
||||
'MAIL_PORT': {
|
||||
'type': 'number',
|
||||
'description': 'The SMTP port to use. If not specified, defaults to 587.',
|
||||
'x-example': 588,
|
||||
},
|
||||
'MAIL_USERNAME': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'The SMTP username to use when sending e-mails.',
|
||||
'x-example': 'myuser',
|
||||
},
|
||||
'MAIL_PASSWORD': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'The SMTP password to use when sending e-mails.',
|
||||
'x-example': 'mypassword',
|
||||
},
|
||||
'MAIL_DEFAULT_SENDER': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `support@quay.io`.',
|
||||
'x-example': 'support@myco.com',
|
||||
},
|
||||
|
||||
# Database.
|
||||
'DB_URI': {
|
||||
'type': 'string',
|
||||
'description': 'The URI at which to access the database, including any credentials.',
|
||||
'x-example': 'mysql+pymysql://username:password@dns.of.database/quay',
|
||||
'x-reference': 'https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495',
|
||||
},
|
||||
'DB_CONNECTION_ARGS': {
|
||||
'type': 'object',
|
||||
'description': 'If specified, connection arguments for the database such as timeouts and SSL.',
|
||||
'properties': {
|
||||
'threadlocals': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to use thread-local connections. Should *ALWAYS* be `true`'
|
||||
},
|
||||
'autorollback': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to use auto-rollback connections. Should *ALWAYS* be `true`'
|
||||
},
|
||||
'ssl': {
|
||||
'type': 'object',
|
||||
'description': 'SSL connection configuration',
|
||||
'properties': {
|
||||
'ca': {
|
||||
'type': 'string',
|
||||
'description': '*Absolute container path* to the CA certificate to use for SSL connections',
|
||||
'x-example': 'conf/stack/ssl-ca-cert.pem',
|
||||
},
|
||||
},
|
||||
'required': ['ca'],
|
||||
},
|
||||
},
|
||||
'required': ['threadlocals', 'autorollback'],
|
||||
},
|
||||
'ALLOW_PULLS_WITHOUT_STRICT_LOGGING': {
|
||||
'type': 'boolean',
|
||||
'description': 'If true, pulls in which the pull audit log entry cannot be written will ' +
|
||||
'still succeed. Useful if the database can fallback into a read-only state ' +
|
||||
'and it is desired for pulls to continue during that time. Defaults to False.',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Storage.
|
||||
'FEATURE_STORAGE_REPLICATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to automatically replicate between storage engines. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'FEATURE_PROXY_STORAGE': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'MAXIMUM_LAYER_SIZE': {
|
||||
'type': 'string',
|
||||
'description': 'Maximum allowed size of an image layer. Defaults to 20G',
|
||||
'x-example': '100G',
|
||||
'pattern': '^[0-9]+(G|M)$',
|
||||
},
|
||||
'DISTRIBUTED_STORAGE_CONFIG': {
|
||||
'type': 'object',
|
||||
'description': 'Configuration for storage engine(s) to use in Quay. Each key is a unique ID' +
|
||||
' for a storage engine, with the value being a tuple of the type and ' +
|
||||
' configuration for that engine.',
|
||||
'x-example': {
|
||||
'local_storage': ['LocalStorage', {'storage_path': 'some/path/'}],
|
||||
},
|
||||
'items': {
|
||||
'type': 'array',
|
||||
},
|
||||
},
|
||||
'DISTRIBUTED_STORAGE_PREFERENCE': {
|
||||
'type': 'array',
|
||||
'description': 'The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to ' +
|
||||
'use. A preferred engine means it is first checked for pullig and images are ' +
|
||||
'pushed to it.',
|
||||
'items': {
|
||||
'type': 'string',
|
||||
'uniqueItems': True,
|
||||
},
|
||||
'x-example': ['s3_us_east', 's3_us_west'],
|
||||
},
|
||||
'DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS': {
|
||||
'type': 'array',
|
||||
'description': 'The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose ' +
|
||||
'images should be fully replicated, by default, to all other storage engines.',
|
||||
'items': {
|
||||
'type': 'string',
|
||||
'uniqueItems': True,
|
||||
},
|
||||
'x-example': ['s3_us_east', 's3_us_west'],
|
||||
},
|
||||
'USERFILES_LOCATION': {
|
||||
'type': 'string',
|
||||
'description': 'ID of the storage engine in which to place user-uploaded files',
|
||||
'x-example': 's3_us_east',
|
||||
},
|
||||
'USERFILES_PATH': {
|
||||
'type': 'string',
|
||||
'description': 'Path under storage in which to place user-uploaded files',
|
||||
'x-example': 'userfiles',
|
||||
},
|
||||
'ACTION_LOG_ARCHIVE_LOCATION': {
|
||||
'type': 'string',
|
||||
'description': 'If action log archiving is enabled, the storage engine in which to place the ' +
|
||||
'archived data.',
|
||||
'x-example': 's3_us_east',
|
||||
},
|
||||
'ACTION_LOG_ARCHIVE_PATH': {
|
||||
'type': 'string',
|
||||
'description': 'If action log archiving is enabled, the path in storage in which to place the ' +
|
||||
'archived data.',
|
||||
'x-example': 'archives/actionlogs',
|
||||
},
|
||||
'LOG_ARCHIVE_LOCATION': {
|
||||
'type': 'string',
|
||||
'description': 'If builds are enabled, the storage engine in which to place the ' +
|
||||
'archived build logs.',
|
||||
'x-example': 's3_us_east',
|
||||
},
|
||||
'LOG_ARCHIVE_PATH': {
|
||||
'type': 'string',
|
||||
'description': 'If builds are enabled, the path in storage in which to place the ' +
|
||||
'archived build logs.',
|
||||
'x-example': 'archives/buildlogs',
|
||||
},
|
||||
|
||||
# Authentication.
|
||||
'AUTHENTICATION_TYPE': {
|
||||
'type': 'string',
|
||||
'description': 'The authentication engine to use for credential authentication.',
|
||||
'x-example': 'Database',
|
||||
'enum': ['Database', 'LDAP', 'JWT', 'Keystone', 'OIDC'],
|
||||
},
|
||||
'SUPER_USERS': {
|
||||
'type': 'array',
|
||||
'description': 'Quay usernames of those users to be granted superuser privileges',
|
||||
'uniqueItems': True,
|
||||
'items': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
'DIRECT_OAUTH_CLIENTID_WHITELIST': {
|
||||
'type': 'array',
|
||||
'description': 'A list of client IDs of *Quay-managed* applications that are allowed ' +
|
||||
'to perform direct OAuth approval without user approval.',
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html',
|
||||
'uniqueItems': True,
|
||||
'items': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
|
||||
# Redis.
|
||||
'BUILDLOGS_REDIS': {
|
||||
'type': 'object',
|
||||
'description': 'Connection information for Redis for build logs caching',
|
||||
'required': ['host'],
|
||||
'properties': {
|
||||
'host': {
|
||||
'type': 'string',
|
||||
'description': 'The hostname at which Redis is accessible',
|
||||
'x-example': 'my.redis.cluster',
|
||||
},
|
||||
'port': {
|
||||
'type': 'number',
|
||||
'description': 'The port at which Redis is accessible',
|
||||
'x-example': 1234,
|
||||
},
|
||||
'password': {
|
||||
'type': 'string',
|
||||
'description': 'The password to connect to the Redis instance',
|
||||
'x-example': 'mypassword',
|
||||
},
|
||||
},
|
||||
},
|
||||
'USER_EVENTS_REDIS': {
|
||||
'type': 'object',
|
||||
'description': 'Connection information for Redis for user event handling',
|
||||
'required': ['host'],
|
||||
'properties': {
|
||||
'host': {
|
||||
'type': 'string',
|
||||
'description': 'The hostname at which Redis is accessible',
|
||||
'x-example': 'my.redis.cluster',
|
||||
},
|
||||
'port': {
|
||||
'type': 'number',
|
||||
'description': 'The port at which Redis is accessible',
|
||||
'x-example': 1234,
|
||||
},
|
||||
'password': {
|
||||
'type': 'string',
|
||||
'description': 'The password to connect to the Redis instance',
|
||||
'x-example': 'mypassword',
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
# OAuth configuration.
|
||||
'GITHUB_LOGIN_CONFIG': {
|
||||
'type': ['object', 'null'],
|
||||
'description': 'Configuration for using GitHub (Enterprise) as an external login provider',
|
||||
'required': ['CLIENT_ID', 'CLIENT_SECRET'],
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-auth.html',
|
||||
'properties': {
|
||||
'GITHUB_ENDPOINT': {
|
||||
'type': 'string',
|
||||
'description': 'The endpoint of the GitHub (Enterprise) being hit',
|
||||
'x-example': 'https://github.com/',
|
||||
},
|
||||
'API_ENDPOINT': {
|
||||
'type': 'string',
|
||||
'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com',
|
||||
'x-example': 'https://api.github.com/',
|
||||
},
|
||||
'CLIENT_ID': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_TRIGGER_CONFIG',
|
||||
'x-example': '0e8dbe15c4c7630b6780',
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
|
||||
},
|
||||
'CLIENT_SECRET': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client secret for this Quay instance',
|
||||
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
|
||||
},
|
||||
'ORG_RESTRICT': {
|
||||
'type': 'boolean',
|
||||
'description': 'If true, only users within the organization whitelist can login using this provider',
|
||||
'x-example': True,
|
||||
},
|
||||
'ALLOWED_ORGANIZATIONS': {
|
||||
'type': 'array',
|
||||
'description': 'The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option',
|
||||
'uniqueItems': True,
|
||||
'items': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
'BITBUCKET_TRIGGER_CONFIG': {
|
||||
'type': ['object', 'null'],
|
||||
'description': 'Configuration for using BitBucket for build triggers',
|
||||
'required': ['CONSUMER_KEY', 'CONSUMER_SECRET'],
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bitbucket-build.html',
|
||||
'properties': {
|
||||
'CONSUMER_KEY': {
|
||||
'type': 'string',
|
||||
'description': 'The registered consumer key (client ID) for this Quay instance',
|
||||
'x-example': '0e8dbe15c4c7630b6780',
|
||||
},
|
||||
'CONSUMER_SECRET': {
|
||||
'type': 'string',
|
||||
'description': 'The registered consumer secret (client secret) for this Quay instance',
|
||||
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
|
||||
},
|
||||
},
|
||||
},
|
||||
'GITHUB_TRIGGER_CONFIG': {
|
||||
'type': ['object', 'null'],
|
||||
'description': 'Configuration for using GitHub (Enterprise) for build triggers',
|
||||
'required': ['GITHUB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'],
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-build.html',
|
||||
'properties': {
|
||||
'GITHUB_ENDPOINT': {
|
||||
'type': 'string',
|
||||
'description': 'The endpoint of the GitHub (Enterprise) being hit',
|
||||
'x-example': 'https://github.com/',
|
||||
},
|
||||
'API_ENDPOINT': {
|
||||
'type': 'string',
|
||||
'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com',
|
||||
'x-example': 'https://api.github.com/',
|
||||
},
|
||||
'CLIENT_ID': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_LOGIN_CONFIG',
|
||||
'x-example': '0e8dbe15c4c7630b6780',
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
|
||||
},
|
||||
'CLIENT_SECRET': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client secret for this Quay instance',
|
||||
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
|
||||
},
|
||||
},
|
||||
},
|
||||
'GOOGLE_LOGIN_CONFIG': {
|
||||
'type': ['object', 'null'],
|
||||
'description': 'Configuration for using Google for external authentication',
|
||||
'required': ['CLIENT_ID', 'CLIENT_SECRET'],
|
||||
'properties': {
|
||||
'CLIENT_ID': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client ID for this Quay instance',
|
||||
'x-example': '0e8dbe15c4c7630b6780',
|
||||
},
|
||||
'CLIENT_SECRET': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client secret for this Quay instance',
|
||||
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
|
||||
},
|
||||
},
|
||||
},
|
||||
'GITLAB_TRIGGER_CONFIG': {
|
||||
'type': ['object', 'null'],
|
||||
'description': 'Configuration for using Gitlab (Enterprise) for external authentication',
|
||||
'required': ['GITLAB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'],
|
||||
'properties': {
|
||||
'GITLAB_ENDPOINT': {
|
||||
'type': 'string',
|
||||
'description': 'The endpoint at which Gitlab(Enterprise) is running',
|
||||
'x-example': 'https://gitlab.com',
|
||||
},
|
||||
'CLIENT_ID': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client ID for this Quay instance',
|
||||
'x-example': '0e8dbe15c4c7630b6780',
|
||||
},
|
||||
'CLIENT_SECRET': {
|
||||
'type': 'string',
|
||||
'description': 'The registered client secret for this Quay instance',
|
||||
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
# Health.
|
||||
'HEALTH_CHECKER': {
|
||||
'description': 'The configured health check.',
|
||||
'x-example': ('RDSAwareHealthCheck', {'access_key': 'foo', 'secret_key': 'bar'}),
|
||||
},
|
||||
|
||||
# Metrics.
|
||||
'PROMETHEUS_NAMESPACE': {
|
||||
'type': 'string',
|
||||
'description': 'The prefix applied to all exposed Prometheus metrics. Defaults to `quay`',
|
||||
'x-example': 'myregistry',
|
||||
},
|
||||
|
||||
# Misc configuration.
|
||||
'BLACKLIST_V2_SPEC': {
|
||||
'type': 'string',
|
||||
'description': 'The Docker CLI versions to which Quay will respond that V2 is *unsupported*. Defaults to `<1.6.0`',
|
||||
'x-reference': 'http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec',
|
||||
'x-example': '<1.8.0',
|
||||
},
|
||||
'USER_RECOVERY_TOKEN_LIFETIME': {
|
||||
'type': 'string',
|
||||
'description': 'The length of time a token for recovering a user accounts is valid. Defaults to 30m.',
|
||||
'x-example': '10m',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
'SESSION_COOKIE_SECURE': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether the `secure` property should be set on session cookies. ' +
|
||||
'Defaults to False. Recommended to be True for all installations using SSL.',
|
||||
'x-example': True,
|
||||
'x-reference': 'https://en.wikipedia.org/wiki/Secure_cookies',
|
||||
},
|
||||
'PUBLIC_NAMESPACES': {
|
||||
'type': 'array',
|
||||
'description': 'If a namespace is defined in the public namespace list, then it will appear on *all*' +
|
||||
' user\'s repository list pages, regardless of whether that user is a member of the namespace.' +
|
||||
' Typically, this is used by an enterprise customer in configuring a set of "well-known"' +
|
||||
' namespaces.',
|
||||
'uniqueItems': True,
|
||||
'items': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
'AVATAR_KIND': {
|
||||
'type': 'string',
|
||||
'description': 'The types of avatars to display, either generated inline (local) or Gravatar (gravatar)',
|
||||
'enum': ['local', 'gravatar'],
|
||||
},
|
||||
'V2_PAGINATION_SIZE': {
|
||||
'type': 'number',
|
||||
'description': 'The number of results returned per page in V2 registry APIs',
|
||||
'x-example': 100,
|
||||
},
|
||||
'ENABLE_HEALTH_DEBUG_SECRET': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'If specified, a secret that can be given to health endpoints to see full debug info when' +
|
||||
'not authenticated as a superuser',
|
||||
'x-example': 'somesecrethere',
|
||||
},
|
||||
'BROWSER_API_CALLS_XHR_ONLY': {
|
||||
'type': 'boolean',
|
||||
'description': 'If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True.',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Time machine and tag expiration settings.
|
||||
'FEATURE_CHANGE_TAG_EXPIRATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True.',
|
||||
'x-example': False,
|
||||
},
|
||||
'DEFAULT_TAG_EXPIRATION': {
|
||||
'type': 'string',
|
||||
'description': 'The default, configurable tag expiration time for time machine. Defaults to `2w`.',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
'TAG_EXPIRATION_OPTIONS': {
|
||||
'type': 'array',
|
||||
'description': 'The options that users can select for expiration of tags in their namespace (if enabled)',
|
||||
'items': {
|
||||
'type': 'string',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
},
|
||||
|
||||
# Team syncing.
|
||||
'FEATURE_TEAM_SYNCING': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone)',
|
||||
'x-example': True,
|
||||
},
|
||||
'TEAM_RESYNC_STALE_TIME': {
|
||||
'type': 'string',
|
||||
'description': 'If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m)',
|
||||
'x-example': '2h',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
'FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP': {
|
||||
'type': 'boolean',
|
||||
'description': 'If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False.',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Security scanning.
|
||||
'FEATURE_SECURITY_SCANNER': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to turn of/off the security scanner. Defaults to False',
|
||||
'x-example': False,
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/security-scanning.html',
|
||||
},
|
||||
'FEATURE_SECURITY_NOTIFICATIONS': {
|
||||
'type': 'boolean',
|
||||
'description': 'If the security scanner is enabled, whether to turn of/off security notificaitons. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'SECURITY_SCANNER_ENDPOINT' : {
|
||||
'type': 'string',
|
||||
'pattern': '^http(s)?://(.)+$',
|
||||
'description': 'The endpoint for the security scanner',
|
||||
'x-example': 'http://192.168.99.101:6060' ,
|
||||
},
|
||||
'SECURITY_SCANNER_INDEXING_INTERVAL': {
|
||||
'type': 'number',
|
||||
'description': 'The number of seconds between indexing intervals in the security scanner. Defaults to 30.',
|
||||
'x-example': 30,
|
||||
},
|
||||
|
||||
# Bittorrent support.
|
||||
'FEATURE_BITTORRENT': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to allow using Bittorrent-based pulls. Defaults to False',
|
||||
'x-example': False,
|
||||
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bittorrent.html',
|
||||
},
|
||||
'BITTORRENT_PIECE_SIZE': {
|
||||
'type': 'number',
|
||||
'description': 'The bittorent piece size to use. If not specified, defaults to 512 * 1024.',
|
||||
'x-example': 512 * 1024,
|
||||
},
|
||||
'BITTORRENT_ANNOUNCE_URL': {
|
||||
'type': 'string',
|
||||
'pattern': '^http(s)?://(.)+$',
|
||||
'description': 'The URL of the announce endpoint on the bittorrent tracker',
|
||||
'x-example': 'https://localhost:6881/announce',
|
||||
},
|
||||
|
||||
# Build
|
||||
'FEATURE_GITHUB_BUILD': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to support GitHub build triggers. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'FEATURE_BITBUCKET_BUILD': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to support Bitbucket build triggers. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'FEATURE_GITLAB_BUILD': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to support GitLab build triggers. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'FEATURE_BUILD_SUPPORT': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to support Dockerfile build. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
'DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT': {
|
||||
'type': ['number', 'null'],
|
||||
'description': 'If not None, the default maximum number of builds that can be queued in a namespace.',
|
||||
'x-example': 20,
|
||||
},
|
||||
'SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD': {
|
||||
'type': ['number', 'null'],
|
||||
'description': 'If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled. Defaults to 5.',
|
||||
'x-example': 10,
|
||||
},
|
||||
'SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD': {
|
||||
'type': ['number', 'null'],
|
||||
'description': 'If not None, the number of successive failures that can occur before a build trigger is automatically disabled. Defaults to 100.',
|
||||
'x-example': 50,
|
||||
},
|
||||
|
||||
# Login
|
||||
'FEATURE_GITHUB_LOGIN': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether GitHub login is supported. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
'FEATURE_GOOGLE_LOGIN': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether Google login is supported. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Recaptcha
|
||||
'FEATURE_RECAPTCHA': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether Recaptcha is necessary for user login and recovery. Defaults to False',
|
||||
'x-example': False,
|
||||
'x-reference': 'https://www.google.com/recaptcha/intro/',
|
||||
},
|
||||
'RECAPTCHA_SITE_KEY': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'If recaptcha is enabled, the site key for the Recaptcha service',
|
||||
},
|
||||
'RECAPTCHA_SECRET_KEY': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'If recaptcha is enabled, the secret key for the Recaptcha service',
|
||||
},
|
||||
|
||||
# External application tokens.
|
||||
'FEATURE_APP_SPECIFIC_TOKENS': {
|
||||
'type': 'boolean',
|
||||
'description': 'If enabled, users can create tokens for use by the Docker CLI. Defaults to True',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
'APP_SPECIFIC_TOKEN_EXPIRATION': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'The expiration for external app tokens. Defaults to None.',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
|
||||
'EXPIRED_APP_SPECIFIC_TOKEN_GC': {
|
||||
'type': ['string', 'null'],
|
||||
'description': 'Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d.',
|
||||
'pattern': '^[0-9]+(w|m|d|h|s)$',
|
||||
},
|
||||
|
||||
# Feature Flag: Permanent Sessions.
|
||||
'FEATURE_PERMANENT_SESSIONS': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether sessions are permanent. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Super User Support.
|
||||
'FEATURE_SUPER_USERS': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether super users are supported. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Anonymous Users.
|
||||
'FEATURE_ANONYMOUS_ACCESS': {
|
||||
'type': 'boolean',
|
||||
'description': ' Whether to allow anonymous users to browse and pull public repositories. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: User Creation.
|
||||
'FEATURE_USER_CREATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether users can be created (by non-super users). Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Invite Only User Creation.
|
||||
'FEATURE_INVITE_ONLY_USER_CREATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether users being created must be invited by another user. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Encrypted Basic Auth.
|
||||
'FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Direct Login.
|
||||
'FEATURE_DIRECT_LOGIN': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether users can directly login to the UI. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Advertising V2.
|
||||
'FEATURE_ADVERTISE_V2': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether the v2/ endpoint is visible. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Log Rotation.
|
||||
'FEATURE_ACTION_LOG_ROTATION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether or not to rotate old action logs to storage. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: ACI Conversion.
|
||||
'FEATURE_ACI_CONVERSION': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to enable conversion to ACIs. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Library Support.
|
||||
'FEATURE_LIBRARY_SUPPORT': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Require Team Invite.
|
||||
'FEATURE_REQUIRE_TEAM_INVITE': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to require invitations when adding a user to a team. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: Collecting and Supporting Metadata.
|
||||
'FEATURE_USER_METADATA': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to collect and support user metadata. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Support App Registry.
|
||||
'FEATURE_APP_REGISTRY': {
|
||||
'type': 'boolean',
|
||||
'description': 'Whether to enable support for App repositories. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Public Reposiotires in _catalog Endpoint.
|
||||
'FEATURE_PUBLIC_CATALOG': {
|
||||
'type': 'boolean',
|
||||
'description': 'If set to true, the _catalog endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Reader Build Logs.
|
||||
'FEATURE_READER_BUILD_LOGS': {
|
||||
'type': 'boolean',
|
||||
'description': 'If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False',
|
||||
'x-example': False,
|
||||
},
|
||||
|
||||
# Feature Flag: Usernames Autocomplete.
|
||||
'FEATURE_PARTIAL_USER_AUTOCOMPLETE': {
|
||||
'type': 'boolean',
|
||||
'description': 'If set to true, autocompletion will apply to partial usernames. Defaults to True',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: User log access.
|
||||
'FEATURE_USER_LOG_ACCESS': {
|
||||
'type': 'boolean',
|
||||
'description': 'If set to true, users will have access to audit logs for their namespace. Defaults to False',
|
||||
'x-example': True,
|
||||
},
|
||||
|
||||
# Feature Flag: User renaming.
|
||||
'FEATURE_USER_RENAME': {
|
||||
'type': 'boolean',
|
||||
'description': 'If set to true, users can rename their own namespace. Defaults to False',
|
||||
'x-example': True,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
81
config_app/config_util/config/testprovider.py
Normal file
81
config_app/config_util/config/testprovider.py
Normal file
|
@ -0,0 +1,81 @@
|
|||
import json
|
||||
import io
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from config_util.config.baseprovider import BaseProvider
|
||||
|
||||
REAL_FILES = ['test/data/signing-private.gpg', 'test/data/signing-public.gpg', 'test/data/test.pem']
|
||||
|
||||
|
||||
class TestConfigProvider(BaseProvider):
|
||||
""" Implementation of the config provider for testing. Everything is kept in-memory instead on
|
||||
the real file system. """
|
||||
def __init__(self):
|
||||
self.clear()
|
||||
|
||||
def clear(self):
|
||||
self.files = {}
|
||||
self._config = {}
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return 'test'
|
||||
|
||||
def update_app_config(self, app_config):
|
||||
self._config = app_config
|
||||
|
||||
def get_config(self):
|
||||
if not 'config.yaml' in self.files:
|
||||
return None
|
||||
|
||||
return json.loads(self.files.get('config.yaml', '{}'))
|
||||
|
||||
def save_config(self, config_obj):
|
||||
self.files['config.yaml'] = json.dumps(config_obj)
|
||||
|
||||
def config_exists(self):
|
||||
return 'config.yaml' in self.files
|
||||
|
||||
def volume_exists(self):
|
||||
return True
|
||||
|
||||
def volume_file_exists(self, filename):
|
||||
if filename in REAL_FILES:
|
||||
return True
|
||||
|
||||
return filename in self.files
|
||||
|
||||
def save_volume_file(self, filename, flask_file):
|
||||
self.files[filename] = flask_file.read()
|
||||
|
||||
def write_volume_file(self, filename, contents):
|
||||
self.files[filename] = contents
|
||||
|
||||
def get_volume_file(self, filename, mode='r'):
|
||||
if filename in REAL_FILES:
|
||||
return open(filename, mode=mode)
|
||||
|
||||
return io.BytesIO(self.files[filename])
|
||||
|
||||
def remove_volume_file(self, filename):
|
||||
self.files.pop(filename, None)
|
||||
|
||||
def list_volume_directory(self, path):
|
||||
paths = []
|
||||
for filename in self.files:
|
||||
if filename.startswith(path):
|
||||
paths.append(filename[len(path)+1:])
|
||||
|
||||
return paths
|
||||
|
||||
def requires_restart(self, app_config):
|
||||
return False
|
||||
|
||||
def reset_for_test(self):
|
||||
self._config['SUPER_USERS'] = ['devtable']
|
||||
self.files = {}
|
||||
|
||||
def get_volume_path(self, directory, filename):
|
||||
return os.path.join(directory, filename)
|
||||
|
47
config_app/config_util/log.py
Normal file
47
config_app/config_util/log.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
import os
|
||||
from _init_config import CONF_DIR
|
||||
|
||||
|
||||
def logfile_path(jsonfmt=False, debug=False):
|
||||
"""
|
||||
Returns the a logfileconf path following this rules:
|
||||
- conf/logging_debug_json.conf # jsonfmt=true, debug=true
|
||||
- conf/logging_json.conf # jsonfmt=true, debug=false
|
||||
- conf/logging_debug.conf # jsonfmt=false, debug=true
|
||||
- conf/logging.conf # jsonfmt=false, debug=false
|
||||
Can be parametrized via envvars: JSONLOG=true, DEBUGLOG=true
|
||||
"""
|
||||
_json = ""
|
||||
_debug = ""
|
||||
|
||||
if jsonfmt or os.getenv('JSONLOG', 'false').lower() == 'true':
|
||||
_json = "_json"
|
||||
|
||||
if debug or os.getenv('DEBUGLOG', 'false').lower() == 'true':
|
||||
_debug = "_debug"
|
||||
|
||||
return os.path.join(CONF_DIR, "logging%s%s.conf" % (_debug, _json))
|
||||
|
||||
|
||||
def filter_logs(values, filtered_fields):
|
||||
"""
|
||||
Takes a dict and a list of keys to filter.
|
||||
eg:
|
||||
with filtered_fields:
|
||||
[{'key': ['k1', k2'], 'fn': lambda x: 'filtered'}]
|
||||
and values:
|
||||
{'k1': {'k2': 'some-secret'}, 'k3': 'some-value'}
|
||||
the returned dict is:
|
||||
{'k1': {k2: 'filtered'}, 'k3': 'some-value'}
|
||||
"""
|
||||
for field in filtered_fields:
|
||||
cdict = values
|
||||
|
||||
for key in field['key'][:-1]:
|
||||
if key in cdict:
|
||||
cdict = cdict[key]
|
||||
|
||||
last_key = field['key'][-1]
|
||||
|
||||
if last_key in cdict and cdict[last_key]:
|
||||
cdict[last_key] = field['fn'](cdict[last_key])
|
81
config_app/config_util/ssl.py
Normal file
81
config_app/config_util/ssl.py
Normal file
|
@ -0,0 +1,81 @@
|
|||
from fnmatch import fnmatch
|
||||
|
||||
import OpenSSL
|
||||
|
||||
class CertInvalidException(Exception):
|
||||
""" Exception raised when a certificate could not be parsed/loaded. """
|
||||
pass
|
||||
|
||||
class KeyInvalidException(Exception):
|
||||
""" Exception raised when a key could not be parsed/loaded or successfully applied to a cert. """
|
||||
pass
|
||||
|
||||
|
||||
def load_certificate(cert_contents):
|
||||
""" Loads the certificate from the given contents and returns it or raises a CertInvalidException
|
||||
on failure.
|
||||
"""
|
||||
try:
|
||||
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_contents)
|
||||
return SSLCertificate(cert)
|
||||
except OpenSSL.crypto.Error as ex:
|
||||
raise CertInvalidException(ex.message[0][2])
|
||||
|
||||
|
||||
_SUBJECT_ALT_NAME = 'subjectAltName'
|
||||
|
||||
class SSLCertificate(object):
|
||||
""" Helper class for easier working with SSL certificates. """
|
||||
def __init__(self, openssl_cert):
|
||||
self.openssl_cert = openssl_cert
|
||||
|
||||
def validate_private_key(self, private_key_path):
|
||||
""" Validates that the private key found at the given file path applies to this certificate.
|
||||
Raises a KeyInvalidException on failure.
|
||||
"""
|
||||
context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
|
||||
context.use_certificate(self.openssl_cert)
|
||||
|
||||
try:
|
||||
context.use_privatekey_file(private_key_path)
|
||||
context.check_privatekey()
|
||||
except OpenSSL.SSL.Error as ex:
|
||||
raise KeyInvalidException(ex.message[0][2])
|
||||
|
||||
def matches_name(self, check_name):
|
||||
""" Returns true if this SSL certificate matches the given DNS hostname. """
|
||||
for dns_name in self.names:
|
||||
if fnmatch(check_name, dns_name):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@property
|
||||
def expired(self):
|
||||
""" Returns whether the SSL certificate has expired. """
|
||||
return self.openssl_cert.has_expired()
|
||||
|
||||
@property
|
||||
def common_name(self):
|
||||
""" Returns the defined common name for the certificate, if any. """
|
||||
return self.openssl_cert.get_subject().commonName
|
||||
|
||||
@property
|
||||
def names(self):
|
||||
""" Returns all the DNS named to which the certificate applies. May be empty. """
|
||||
dns_names = set()
|
||||
common_name = self.common_name
|
||||
if common_name is not None:
|
||||
dns_names.add(common_name)
|
||||
|
||||
# Find the DNS extension, if any.
|
||||
for i in range(0, self.openssl_cert.get_extension_count()):
|
||||
ext = self.openssl_cert.get_extension(i)
|
||||
if ext.get_short_name() == _SUBJECT_ALT_NAME:
|
||||
value = str(ext)
|
||||
for san_name in value.split(','):
|
||||
san_name_trimmed = san_name.strip()
|
||||
if san_name_trimmed.startswith('DNS:'):
|
||||
dns_names.add(san_name_trimmed[4:])
|
||||
|
||||
return dns_names
|
32
config_app/config_util/workers.py
Normal file
32
config_app/config_util/workers.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
import os
|
||||
import psutil
|
||||
|
||||
|
||||
def get_worker_count(worker_kind_name, multiplier, minimum=None, maximum=None):
|
||||
""" Returns the number of gunicorn workers to run for the given worker kind,
|
||||
based on a combination of environment variable, multiplier, minimum (if any),
|
||||
and number of accessible CPU cores.
|
||||
"""
|
||||
minimum = minimum or multiplier
|
||||
maximum = maximum or (multiplier * multiplier)
|
||||
|
||||
# Check for an override via an environment variable.
|
||||
override_value = os.environ.get('WORKER_COUNT_' + worker_kind_name.upper())
|
||||
if override_value is not None:
|
||||
return max(override_value, minimum)
|
||||
|
||||
override_value = os.environ.get('WORKER_COUNT')
|
||||
if override_value is not None:
|
||||
return max(override_value, minimum)
|
||||
|
||||
# Load the number of CPU cores via affinity, and use that to calculate the
|
||||
# number of workers to run.
|
||||
p = psutil.Process(os.getpid())
|
||||
|
||||
try:
|
||||
cpu_count = len(p.cpu_affinity())
|
||||
except AttributeError:
|
||||
# cpu_affinity isn't supported on this platform. Assume 2.
|
||||
cpu_count = 2
|
||||
|
||||
return min(max(cpu_count * multiplier, minimum), maximum)
|
|
@ -1,8 +1,8 @@
|
|||
from app import app as application
|
||||
from config_endpoints.setup_web import setup_web
|
||||
from config_app import app as application
|
||||
from config_endpoints.api import api_bp
|
||||
from config_endpoints.setup_web import setup_web
|
||||
|
||||
|
||||
# application.register_blueprint(setup_web)
|
||||
application.register_blueprint(setup_web)
|
||||
application.register_blueprint(api_bp, url_prefix='/api')
|
||||
|
|
@ -32,8 +32,6 @@ function provideConfig($provide: ng.auto.IProvideService,
|
|||
|
||||
// Configure the API provider.
|
||||
RestangularProvider.setBaseUrl('/api/v1/');
|
||||
|
||||
console.log('i');
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -29,8 +29,6 @@ angular.module("quay-config")
|
|||
'configurationSaved': '&configurationSaved'
|
||||
},
|
||||
controller: function($rootScope, $scope, $element, $timeout, ApiService) {
|
||||
console.log('in the controller of the configSetupTool')
|
||||
|
||||
var authPassword = null;
|
||||
|
||||
$scope.HOSTNAME_REGEX = '^[a-zA-Z-0-9\.]+(:[0-9]+)?$';
|
||||
|
@ -1415,11 +1413,11 @@ angular.module("quay-config")
|
|||
});
|
||||
};
|
||||
|
||||
UserService.updateUserIn($scope, function(user) {
|
||||
if (!user.anonymous) {
|
||||
loadCertificates();
|
||||
}
|
||||
});
|
||||
// UserService.updateUserIn($scope, function(user) {
|
||||
// console.log(user)
|
||||
// no need to check for user, since it's all local
|
||||
loadCertificates();
|
||||
// });
|
||||
|
||||
$scope.handleCertsSelected = function(files, callback) {
|
||||
$scope.certsUploading = true;
|
||||
|
|
|
@ -14,11 +14,8 @@ require('../../static/js/tar');
|
|||
const ng1QuayModule: string = bundle(ConfigAppModule, []).name;
|
||||
angular.module('quay-config', [ng1QuayModule])
|
||||
.run(() => {
|
||||
console.log(' init run was called')
|
||||
});
|
||||
|
||||
console.log('Hello world! I\'m the config app');
|
||||
|
||||
declare var require: any;
|
||||
function requireAll(r) {
|
||||
r.keys().forEach(r);
|
||||
|
|
|
@ -325,7 +325,6 @@ angular.module('quay-config').factory('ApiService', ['Restangular', '$q', 'UtilS
|
|||
};
|
||||
|
||||
// todo: remove hacks
|
||||
apiService.scGetConfig = () => new Promise(() => { hello: true });
|
||||
apiService.scRegistryStatus = () => new Promise(() => { hello: true });
|
||||
|
||||
return apiService;
|
||||
|
|
|
@ -182,30 +182,6 @@ function(ApiService, CookieService, $rootScope, Config, $location, $timeout) {
|
|||
return externalUsername || userResponse.username;
|
||||
};
|
||||
|
||||
userService.deleteNamespace = function(info, callback) {
|
||||
var namespace = info.user ? info.user.username : info.organization.name;
|
||||
if (!namespace) {
|
||||
return;
|
||||
}
|
||||
|
||||
var errorDisplay = ApiService.errorDisplay('Could not delete namespace', callback);
|
||||
var cb = function(resp) {
|
||||
userService.load(function(currentUser) {
|
||||
callback(true);
|
||||
$location.path('/');
|
||||
});
|
||||
}
|
||||
|
||||
if (info.user) {
|
||||
ApiService.deleteCurrentUser().then(cb, errorDisplay)
|
||||
} else {
|
||||
var delParams = {
|
||||
'orgname': info.organization.name
|
||||
};
|
||||
ApiService.deleteAdminedOrganization(null, delParams).then(cb, errorDisplay);
|
||||
}
|
||||
};
|
||||
|
||||
userService.currentUser = function() {
|
||||
return userResponse;
|
||||
};
|
||||
|
|
114
config_app/loghandler_config.py
Executable file
114
config_app/loghandler_config.py
Executable file
|
@ -0,0 +1,114 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import traceback
|
||||
|
||||
|
||||
LOG_FORMAT_REGEXP = re.compile(r'\((.+?)\)', re.IGNORECASE)
|
||||
|
||||
|
||||
def _json_default(obj):
|
||||
"""
|
||||
Coerce everything to strings.
|
||||
All objects representing time get output as ISO8601.
|
||||
"""
|
||||
if isinstance(obj, (datetime.date, datetime.time, datetime.datetime)):
|
||||
return obj.isoformat()
|
||||
|
||||
elif isinstance(obj, Exception):
|
||||
return "Exception: %s" % str(obj)
|
||||
|
||||
return str(obj)
|
||||
|
||||
|
||||
# skip natural LogRecord attributes
|
||||
# http://docs.python.org/library/logging.html#logrecord-attributes
|
||||
RESERVED_ATTRS = set([
|
||||
'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename', 'funcName', 'levelname',
|
||||
'levelno', 'lineno', 'module', 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
|
||||
'processName', 'relativeCreated', 'stack_info', 'thread', 'threadName'
|
||||
])
|
||||
|
||||
|
||||
class JsonFormatter(logging.Formatter):
|
||||
"""
|
||||
A custom formatter to format logging records as json strings.
|
||||
extra values will be formatted as str() if nor supported by
|
||||
json default encoder
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""
|
||||
:param json_default: a function for encoding non-standard objects
|
||||
as outlined in http://docs.python.org/2/library/json.html
|
||||
:param json_encoder: optional custom encoder
|
||||
:param json_serializer: a :meth:`json.dumps`-compatible callable
|
||||
that will be used to serialize the log record.
|
||||
:param prefix: an optional key prefix to nest logs
|
||||
"""
|
||||
self.json_default = kwargs.pop("json_default", _json_default)
|
||||
self.json_encoder = kwargs.pop("json_encoder", None)
|
||||
self.json_serializer = kwargs.pop("json_serializer", json.dumps)
|
||||
self.default_values = kwargs.pop("default_extra", {})
|
||||
self.prefix_key = kwargs.pop("prefix_key", "data")
|
||||
|
||||
logging.Formatter.__init__(self, *args, **kwargs)
|
||||
|
||||
self._fmt_parameters = self._parse_format_string()
|
||||
self._skip_fields = set(self._fmt_parameters)
|
||||
self._skip_fields.update(RESERVED_ATTRS)
|
||||
|
||||
def _parse_format_string(self):
|
||||
"""Parses format string looking for substitutions"""
|
||||
standard_formatters = LOG_FORMAT_REGEXP
|
||||
return standard_formatters.findall(self._fmt)
|
||||
|
||||
def add_fields(self, log_record, record, message_dict):
|
||||
"""
|
||||
Override this method to implement custom logic for adding fields.
|
||||
"""
|
||||
|
||||
target = log_record
|
||||
if self.prefix_key:
|
||||
log_record[self.prefix_key] = {}
|
||||
target = log_record[self.prefix_key]
|
||||
|
||||
for field, value in record.__dict__.iteritems():
|
||||
if field in self._fmt_parameters and field in RESERVED_ATTRS:
|
||||
log_record[field] = value
|
||||
elif field not in RESERVED_ATTRS:
|
||||
target[field] = value
|
||||
|
||||
target.update(message_dict)
|
||||
target.update(self.default_values)
|
||||
|
||||
def format(self, record):
|
||||
"""Formats a log record and serializes to json"""
|
||||
message_dict = {}
|
||||
if isinstance(record.msg, dict):
|
||||
message_dict = record.msg
|
||||
record.message = None
|
||||
if "message" in message_dict:
|
||||
record.message = message_dict.pop("message", "")
|
||||
else:
|
||||
record.message = record.getMessage()
|
||||
|
||||
# only format time if needed
|
||||
if "asctime" in self._fmt_parameters:
|
||||
record.asctime = self.formatTime(record, self.datefmt)
|
||||
|
||||
# Display formatted exception, but allow overriding it in the
|
||||
# user-supplied dict.
|
||||
if record.exc_info and not message_dict.get('exc_info'):
|
||||
message_dict['exc_info'] = traceback.format_list(traceback.extract_tb(record.exc_info[2]))
|
||||
log_record = {}
|
||||
|
||||
self.add_fields(log_record, record, message_dict)
|
||||
|
||||
return self.json_serializer(log_record, default=self.json_default, cls=self.json_encoder)
|
|
@ -1,21 +0,0 @@
|
|||
import os
|
||||
from util.config.provider import TestConfigProvider, KubernetesConfigProvider, FileConfigProvider
|
||||
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/"))
|
||||
OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/')
|
||||
|
||||
|
||||
def get_config_provider(config_volume, yaml_filename, py_filename, testing=False, kubernetes=False):
|
||||
""" Loads and returns the config provider for the current environment. """
|
||||
if testing:
|
||||
return TestConfigProvider()
|
||||
|
||||
if kubernetes:
|
||||
return KubernetesConfigProvider(config_volume, yaml_filename, py_filename)
|
||||
|
||||
return FileConfigProvider(config_volume, yaml_filename, py_filename)
|
||||
|
||||
|
||||
config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py',
|
||||
testing=False, kubernetes=False)
|
Reference in a new issue