Make script paths work in docker and locally for config_app

This commit is contained in:
Sam Chow 2018-06-05 13:43:01 -04:00
parent d5db3462b9
commit e9d24dc5ff
26 changed files with 79 additions and 2248 deletions

View file

@ -106,6 +106,10 @@ RUN yarn build \
&& jpegoptim static/img/**/*.jpg \
&& optipng -clobber -quiet static/img/**/*.png
# Config app js compile
COPY config_app/ config_app/
RUN yarn build-config-app
COPY . .
RUN PYTHONPATH=$QUAYPATH venv/bin/python -m external_libraries
@ -139,4 +143,3 @@ RUN ./scripts/detect-config.sh
EXPOSE 443 8443 80
ENTRYPOINT [ "/bin/bash", "./quay-entrypoint.sh"]

View file

@ -3,8 +3,10 @@ import re
import subprocess
# Note: this currently points to the directory above, since we're in the quay config_app dir. When extracting, revert
# Note: this currently points to the directory above, since we're in the quay config_app dir
# TODO(config_extract): revert to root directory rather than the one above
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/"))
STATIC_DIR = os.path.join(ROOT_DIR, 'static/')
STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/')

View file

@ -1,38 +1,40 @@
import os
import logging
from flask import Flask
from _init_config import ROOT_DIR
from config_app.config_util.config import get_config_provider
from data import database
from util.config.superusermanager import SuperUserManager
from util.ipresolver import NoopIPResolver
from util.config.superusermanager import SuperUserManager
from config_app._init_config import ROOT_DIR
from config_app.config_util.config import get_config_provider
app = Flask(__name__)
logger = logging.getLogger(__name__)
# OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, 'stack/')
OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, 'config_app/conf/stack')
is_testing = 'TEST' in os.environ
is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ
config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config_app_config.py',
testing=is_testing, kubernetes=is_kubernetes)
# TODO(config kubernetes): reinstate when enabling kubernetes in config app
# is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ
config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py',
testing=is_testing)
if is_testing:
from config_app.config_test.testconfig import TestConfig
from test.testconfig import TestConfig
logger.debug('Loading test config.')
app.config.from_object(TestConfig())
else:
from config_app.config_app_config import DefaultConfig
from config import DefaultConfig
logger.debug('Loading default config.')
app.config.from_object(DefaultConfig())
# app.teardown_request(database.close_db_filter)
app.teardown_request(database.close_db_filter)
# Load the override config via the provider.
config_provider.update_app_config(app.config)
superusers = SuperUserManager(app)
ip_resolver = NoopIPResolver()
ip_resolver = NoopIPResolver()

View file

@ -6,12 +6,11 @@ import logging
from Crypto import Random
from config_app.config_util.log import logfile_path
from config_app.config_util.workers import get_worker_count
logconfig = logfile_path(debug=True)
bind = '0.0.0.0:5000'
workers = get_worker_count('local', 2, minimum=2, maximum=8)
workers = 1
worker_class = 'gevent'
daemon = False
pythonpath = '.'

View file

@ -0,0 +1,26 @@
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import logging
from Crypto import Random
from config_app.config_util.log import logfile_path
logconfig = logfile_path(debug=True)
bind = '0.0.0.0:80'
workers = 1
worker_class = 'gevent'
pythonpath = '.'
preload_app = True
def post_fork(server, worker):
# Reset the Random library to ensure it won't raise the "PID check failed." error after
# gunicorn forks.
Random.atfork()
def when_ready(server):
logger = logging.getLogger(__name__)
logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class)

View file

@ -1,544 +0,0 @@
from uuid import uuid4
import os.path
import requests
from _init_config import ROOT_DIR, CONF_DIR
def build_requests_session():
sess = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=100,
pool_maxsize=100)
sess.mount('http://', adapter)
sess.mount('https://', adapter)
return sess
# The set of configuration key names that will be accessible in the client. Since these
# values are sent to the frontend, DO NOT PLACE ANY SECRETS OR KEYS in this list.
CLIENT_WHITELIST = ['SERVER_HOSTNAME', 'PREFERRED_URL_SCHEME', 'MIXPANEL_KEY',
'STRIPE_PUBLISHABLE_KEY', 'ENTERPRISE_LOGO_URL', 'SENTRY_PUBLIC_DSN',
'AUTHENTICATION_TYPE', 'REGISTRY_TITLE', 'REGISTRY_TITLE_SHORT',
'CONTACT_INFO', 'AVATAR_KIND', 'LOCAL_OAUTH_HANDLER', 'DOCUMENTATION_LOCATION',
'DOCUMENTATION_METADATA', 'SETUP_COMPLETE', 'DEBUG', 'MARKETO_MUNCHKIN_ID',
'STATIC_SITE_BUCKET', 'RECAPTCHA_SITE_KEY', 'CHANNEL_COLORS',
'TAG_EXPIRATION_OPTIONS', 'INTERNAL_OIDC_SERVICE_ID',
'SEARCH_RESULTS_PER_PAGE', 'SEARCH_MAX_RESULT_PAGE_COUNT']
def frontend_visible_config(config_dict):
visible_dict = {}
for name in CLIENT_WHITELIST:
if name.lower().find('secret') >= 0:
raise Exception('Cannot whitelist secrets: %s' % name)
if name in config_dict:
visible_dict[name] = config_dict.get(name, None)
return visible_dict
# Configuration that should not be changed by end users
class ImmutableConfig(object):
# Requests based HTTP client with a large request pool
HTTPCLIENT = build_requests_session()
# Status tag config
STATUS_TAGS = {}
for tag_name in ['building', 'failed', 'none', 'ready', 'cancelled']:
tag_path = os.path.join(ROOT_DIR, 'buildstatus', tag_name + '.svg')
with open(tag_path) as tag_svg:
STATUS_TAGS[tag_name] = tag_svg.read()
# Reverse DNS prefixes that are reserved for internal use on labels and should not be allowable
# to be set via the API.
DEFAULT_LABEL_KEY_RESERVED_PREFIXES = ['com.docker.', 'io.docker.', 'org.dockerproject.',
'org.opencontainers.', 'io.cncf.',
'io.kubernetes.', 'io.k8s.',
'io.quay', 'com.coreos', 'com.tectonic',
'internal', 'quay']
# Colors for local avatars.
AVATAR_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728',
'#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2',
'#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79',
'#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b',
'#8c6d31', '#ad494a', '#e7ba52', '#a55194']
# Colors for channels.
CHANNEL_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728',
'#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2',
'#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79',
'#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b',
'#8c6d31', '#ad494a', '#e7ba52', '#a55194']
PROPAGATE_EXCEPTIONS = True
class DefaultConfig(ImmutableConfig):
# Flask config
JSONIFY_PRETTYPRINT_REGULAR = False
SESSION_COOKIE_SECURE = False
LOGGING_LEVEL = 'DEBUG'
SEND_FILE_MAX_AGE_DEFAULT = 0
PREFERRED_URL_SCHEME = 'http'
SERVER_HOSTNAME = 'localhost:5000'
REGISTRY_TITLE = 'Quay Enterprise'
REGISTRY_TITLE_SHORT = 'Quay Enterprise'
CONTACT_INFO = [
'mailto:support@quay.io',
'irc://chat.freenode.net:6665/quay',
'tel:+1-888-930-3475',
'https://twitter.com/quayio',
]
# Mail config
MAIL_SERVER = ''
MAIL_USE_TLS = True
MAIL_PORT = 587
MAIL_USERNAME = None
MAIL_PASSWORD = None
MAIL_DEFAULT_SENDER = 'support@quay.io'
MAIL_FAIL_SILENTLY = False
TESTING = True
# DB config
DB_URI = 'sqlite:///test/data/test.db'
DB_CONNECTION_ARGS = {
'threadlocals': True,
'autorollback': True,
}
@staticmethod
def create_transaction(db):
return db.transaction()
DB_TRANSACTION_FACTORY = create_transaction
# If set to true, TLS is used, but is terminated by an external service (such as a load balancer).
# Note that PREFERRED_URL_SCHEME must be `https` when this flag is set or it can lead to undefined
# behavior.
EXTERNAL_TLS_TERMINATION = False
# If true, CDN URLs will be used for our external dependencies, rather than the local
# copies.
USE_CDN = False
# Authentication
AUTHENTICATION_TYPE = 'Database'
# Build logs
BUILDLOGS_REDIS = {'host': 'localhost'}
BUILDLOGS_OPTIONS = []
# Real-time user events
USER_EVENTS_REDIS = {'host': 'localhost'}
# Stripe config
BILLING_TYPE = 'FakeStripe'
# Analytics
ANALYTICS_TYPE = 'FakeAnalytics'
# Build Queue Metrics
QUEUE_METRICS_TYPE = 'Null'
QUEUE_WORKER_METRICS_REFRESH_SECONDS = 300
# Exception logging
EXCEPTION_LOG_TYPE = 'FakeSentry'
SENTRY_DSN = None
SENTRY_PUBLIC_DSN = None
# Github Config
GITHUB_LOGIN_CONFIG = None
GITHUB_TRIGGER_CONFIG = None
# Google Config.
GOOGLE_LOGIN_CONFIG = None
# Bitbucket Config.
BITBUCKET_TRIGGER_CONFIG = None
# Gitlab Config.
GITLAB_TRIGGER_CONFIG = None
NOTIFICATION_QUEUE_NAME = 'notification'
DOCKERFILE_BUILD_QUEUE_NAME = 'dockerfilebuild'
REPLICATION_QUEUE_NAME = 'imagestoragereplication'
SECSCAN_NOTIFICATION_QUEUE_NAME = 'security_notification'
CHUNK_CLEANUP_QUEUE_NAME = 'chunk_cleanup'
NAMESPACE_GC_QUEUE_NAME = 'namespacegc'
# Super user config. Note: This MUST BE an empty list for the default config.
SUPER_USERS = []
# Feature Flag: Whether sessions are permanent.
FEATURE_PERMANENT_SESSIONS = True
# Feature Flag: Whether super users are supported.
FEATURE_SUPER_USERS = True
# Feature Flag: Whether to allow anonymous users to browse and pull public repositories.
FEATURE_ANONYMOUS_ACCESS = True
# Feature Flag: Whether billing is required.
FEATURE_BILLING = False
# Feature Flag: Whether user accounts automatically have usage log access.
FEATURE_USER_LOG_ACCESS = False
# Feature Flag: Whether GitHub login is supported.
FEATURE_GITHUB_LOGIN = False
# Feature Flag: Whether Google login is supported.
FEATURE_GOOGLE_LOGIN = False
# Feature Flag: whether to enable support chat
FEATURE_SUPPORT_CHAT = False
# Feature Flag: Whether to support GitHub build triggers.
FEATURE_GITHUB_BUILD = False
# Feature Flag: Whether to support Bitbucket build triggers.
FEATURE_BITBUCKET_BUILD = False
# Feature Flag: Whether to support GitLab build triggers.
FEATURE_GITLAB_BUILD = False
# Feature Flag: Dockerfile build support.
FEATURE_BUILD_SUPPORT = True
# Feature Flag: Whether emails are enabled.
FEATURE_MAILING = True
# Feature Flag: Whether users can be created (by non-super users).
FEATURE_USER_CREATION = True
# Feature Flag: Whether users being created must be invited by another user. If FEATURE_USER_CREATION is off,
# this flag has no effect.
FEATURE_INVITE_ONLY_USER_CREATION = False
# Feature Flag: Whether users can be renamed
FEATURE_USER_RENAME = False
# Feature Flag: Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for
# basic auth.
FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH = False
# Feature Flag: Whether to automatically replicate between storage engines.
FEATURE_STORAGE_REPLICATION = False
# Feature Flag: Whether users can directly login to the UI.
FEATURE_DIRECT_LOGIN = True
# Feature Flag: Whether the v2/ endpoint is visible
FEATURE_ADVERTISE_V2 = True
# Semver spec for which Docker versions we will blacklist
# Documentation: http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec
BLACKLIST_V2_SPEC = '<1.6.0'
# Feature Flag: Whether or not to rotate old action logs to storage.
FEATURE_ACTION_LOG_ROTATION = False
# Feature Flag: Whether to enable conversion to ACIs.
FEATURE_ACI_CONVERSION = False
# Feature Flag: Whether to allow for "namespace-less" repositories when pulling and pushing from
# Docker.
FEATURE_LIBRARY_SUPPORT = True
# Feature Flag: Whether to require invitations when adding a user to a team.
FEATURE_REQUIRE_TEAM_INVITE = True
# Feature Flag: Whether to proxy all direct download URLs in storage via the registry's nginx.
FEATURE_PROXY_STORAGE = False
# Feature Flag: Whether to collect and support user metadata.
FEATURE_USER_METADATA = False
# Feature Flag: Whether to support signing
FEATURE_SIGNING = False
# Feature Flag: Whether to enable support for App repositories.
FEATURE_APP_REGISTRY = False
# Feature Flag: If set to true, the _catalog endpoint returns public repositories. Otherwise,
# only private repositories can be returned.
FEATURE_PUBLIC_CATALOG = False
# Feature Flag: If set to true, build logs may be read by those with read access to the repo,
# rather than only write access or admin access.
FEATURE_READER_BUILD_LOGS = False
# Feature Flag: If set to true, autocompletion will apply to partial usernames.
FEATURE_PARTIAL_USER_AUTOCOMPLETE = True
# If a namespace is defined in the public namespace list, then it will appear on *all*
# user's repository list pages, regardless of whether that user is a member of the namespace.
# Typically, this is used by an enterprise customer in configuring a set of "well-known"
# namespaces.
PUBLIC_NAMESPACES = []
# The namespace to use for library repositories.
# Note: This must remain 'library' until Docker removes their hard-coded namespace for libraries.
# See: https://github.com/docker/docker/blob/master/registry/session.go#L320
LIBRARY_NAMESPACE = 'library'
BUILD_MANAGER = ('enterprise', {})
DISTRIBUTED_STORAGE_CONFIG = {
'local_eu': ['LocalStorage', {'storage_path': 'test/data/registry/eu'}],
'local_us': ['LocalStorage', {'storage_path': 'test/data/registry/us'}],
}
DISTRIBUTED_STORAGE_PREFERENCE = ['local_us']
DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS = ['local_us']
# Health checker.
HEALTH_CHECKER = ('LocalHealthCheck', {})
# Userfiles
USERFILES_LOCATION = 'local_us'
USERFILES_PATH = 'userfiles/'
# Build logs archive
LOG_ARCHIVE_LOCATION = 'local_us'
LOG_ARCHIVE_PATH = 'logarchive/'
# Action logs archive
ACTION_LOG_ARCHIVE_LOCATION = 'local_us'
ACTION_LOG_ARCHIVE_PATH = 'actionlogarchive/'
# System logs.
SYSTEM_LOGS_PATH = "/var/log/"
SYSTEM_LOGS_FILE = "/var/log/syslog"
SYSTEM_SERVICES_PATH = os.path.join(CONF_DIR, "init/service/")
# Allow registry pulls when unable to write to the audit log
ALLOW_PULLS_WITHOUT_STRICT_LOGGING = False
# Services that should not be shown in the logs view.
SYSTEM_SERVICE_BLACKLIST = []
# Temporary tag expiration in seconds, this may actually be longer based on GC policy
PUSH_TEMP_TAG_EXPIRATION_SEC = 60 * 60 # One hour per layer
# Signed registry grant token expiration in seconds
SIGNED_GRANT_EXPIRATION_SEC = 60 * 60 * 24 # One day to complete a push/pull
# Registry v2 JWT Auth config
REGISTRY_JWT_AUTH_MAX_FRESH_S = 60 * 60 + 60 # At most signed one hour, accounting for clock skew
# The URL endpoint to which we redirect OAuth when generating a token locally.
LOCAL_OAUTH_HANDLER = '/oauth/localapp'
# The various avatar background colors.
AVATAR_KIND = 'local'
# The location of the Quay documentation.
DOCUMENTATION_LOCATION = 'http://docs.quay.io'
DOCUMENTATION_METADATA = 'https://coreos.github.io/quay-docs/search.json'
# How often the Garbage Collection worker runs.
GARBAGE_COLLECTION_FREQUENCY = 30 # seconds
# How long notifications will try to send before timing out.
NOTIFICATION_SEND_TIMEOUT = 10
# Security scanner
FEATURE_SECURITY_SCANNER = False
FEATURE_SECURITY_NOTIFICATIONS = False
# The endpoint for the security scanner.
SECURITY_SCANNER_ENDPOINT = 'http://192.168.99.101:6060'
# The number of seconds between indexing intervals in the security scanner
SECURITY_SCANNER_INDEXING_INTERVAL = 30
# If specified, the security scanner will only index images newer than the provided ID.
SECURITY_SCANNER_INDEXING_MIN_ID = None
# If specified, the endpoint to be used for all POST calls to the security scanner.
SECURITY_SCANNER_ENDPOINT_BATCH = None
# If specified, GET requests that return non-200 will be retried at the following instances.
SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS = []
# The indexing engine version running inside the security scanner.
SECURITY_SCANNER_ENGINE_VERSION_TARGET = 3
# The version of the API to use for the security scanner.
SECURITY_SCANNER_API_VERSION = 'v1'
# API call timeout for the security scanner.
SECURITY_SCANNER_API_TIMEOUT_SECONDS = 10
# POST call timeout for the security scanner.
SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS = 480
# The issuer name for the security scanner.
SECURITY_SCANNER_ISSUER_NAME = 'security_scanner'
# JWTProxy Settings
# The address (sans schema) to proxy outgoing requests through the jwtproxy
# to be signed
JWTPROXY_SIGNER = 'localhost:8080'
# The audience that jwtproxy should verify on incoming requests
# If None, will be calculated off of the SERVER_HOSTNAME (default)
JWTPROXY_AUDIENCE = None
# Torrent management flags
FEATURE_BITTORRENT = False
BITTORRENT_PIECE_SIZE = 512 * 1024
BITTORRENT_ANNOUNCE_URL = 'https://localhost:6881/announce'
BITTORRENT_FILENAME_PEPPER = str(uuid4())
BITTORRENT_WEBSEED_LIFETIME = 3600
# "Secret" key for generating encrypted paging tokens. Only needed to be secret to
# hide the ID range for production (in which this value is overridden). Should *not*
# be relied upon for secure encryption otherwise.
# This value is a Fernet key and should be 32bytes URL-safe base64 encoded.
PAGE_TOKEN_KEY = '0OYrc16oBuksR8T3JGB-xxYSlZ2-7I_zzqrLzggBJ58='
# The timeout for service key approval.
UNAPPROVED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 # One day
# How long to wait before GCing an expired service key.
EXPIRED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 * 7 # One week
# The ID of the user account in the database to be used for service audit logs. If none, the
# lowest user in the database will be used.
SERVICE_LOG_ACCOUNT_ID = None
# The service key ID for the instance service.
# NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated.
INSTANCE_SERVICE_KEY_SERVICE = 'quay'
# The location of the key ID file generated for this instance.
INSTANCE_SERVICE_KEY_KID_LOCATION = os.path.join(CONF_DIR, 'quay.kid')
# The location of the private key generated for this instance.
# NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated.
INSTANCE_SERVICE_KEY_LOCATION = os.path.join(CONF_DIR, 'quay.pem')
# This instance's service key expiration in minutes.
INSTANCE_SERVICE_KEY_EXPIRATION = 120
# Number of minutes between expiration refresh in minutes. Should be the expiration / 2 minus
# some additional window time.
INSTANCE_SERVICE_KEY_REFRESH = 55
# The whitelist of client IDs for OAuth applications that allow for direct login.
DIRECT_OAUTH_CLIENTID_WHITELIST = []
# URL that specifies the location of the prometheus stats aggregator.
PROMETHEUS_AGGREGATOR_URL = 'http://localhost:9092'
# Namespace prefix for all prometheus metrics.
PROMETHEUS_NAMESPACE = 'quay'
# Overridable list of reverse DNS prefixes that are reserved for internal use on labels.
LABEL_KEY_RESERVED_PREFIXES = []
# Delays workers from starting until a random point in time between 0 and their regular interval.
STAGGER_WORKERS = True
# Location of the static marketing site.
STATIC_SITE_BUCKET = None
# Site key and secret key for using recaptcha.
FEATURE_RECAPTCHA = False
RECAPTCHA_SITE_KEY = None
RECAPTCHA_SECRET_KEY = None
# Server where TUF metadata can be found
TUF_SERVER = None
# Prefix to add to metadata e.g. <prefix>/<namespace>/<reponame>
TUF_GUN_PREFIX = None
# Maximum size allowed for layers in the registry.
MAXIMUM_LAYER_SIZE = '20G'
# Feature Flag: Whether team syncing from the backing auth is enabled.
FEATURE_TEAM_SYNCING = False
TEAM_RESYNC_STALE_TIME = '30m'
TEAM_SYNC_WORKER_FREQUENCY = 60 # seconds
# Feature Flag: If enabled, non-superusers can setup team syncing.
FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP = False
# The default configurable tag expiration time for time machine.
DEFAULT_TAG_EXPIRATION = '2w'
# The options to present in namespace settings for the tag expiration. If empty, no option
# will be given and the default will be displayed read-only.
TAG_EXPIRATION_OPTIONS = ['0s', '1d', '1w', '2w', '4w']
# Feature Flag: Whether users can view and change their tag expiration.
FEATURE_CHANGE_TAG_EXPIRATION = True
# Defines a secret for enabling the health-check endpoint's debug information.
ENABLE_HEALTH_DEBUG_SECRET = None
# The lifetime for a user recovery token before it becomes invalid.
USER_RECOVERY_TOKEN_LIFETIME = '30m'
# If specified, when app specific passwords expire by default.
APP_SPECIFIC_TOKEN_EXPIRATION = None
# Feature Flag: If enabled, users can create and use app specific tokens to login via the CLI.
FEATURE_APP_SPECIFIC_TOKENS = True
# How long expired app specific tokens should remain visible to users before being automatically
# deleted. Set to None to turn off garbage collection.
EXPIRED_APP_SPECIFIC_TOKEN_GC = '1d'
# The size of pages returned by the Docker V2 API.
V2_PAGINATION_SIZE = 50
# If enabled, ensures that API calls are made with the X-Requested-With header
# when called from a browser.
BROWSER_API_CALLS_XHR_ONLY = True
# If set to a non-None integer value, the default number of maximum builds for a namespace.
DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT = None
# If set to a non-None integer value, the default number of maximum builds for a namespace whose
# creator IP is deemed a threat.
THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT = None
# For Billing Support Only: The number of allowed builds on a namespace that has been billed
# successfully.
BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT = None
# Configuration for the data model cache.
DATA_MODEL_CACHE_CONFIG = {
'engine': 'memcached',
'endpoint': ('127.0.0.1', 18080),
}
# Defines the number of successive failures of a build trigger's build before the trigger is
# automatically disabled.
SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD = 100
# Defines the number of successive internal errors of a build trigger's build before the
# trigger is automatically disabled.
SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD = 5
# Defines the delay required (in seconds) before the last_accessed field of a user/robot or access
# token will be updated after the previous update.
LAST_ACCESSED_UPDATE_THRESHOLD_S = 60
# Defines the number of results per page used to show search results
SEARCH_RESULTS_PER_PAGE = 10
# Defines the maximum number of pages the user can paginate before they are limited
SEARCH_MAX_RESULT_PAGE_COUNT = 10

View file

@ -22,7 +22,6 @@ class ApiExceptionHandlingApi(Api):
@crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS)
def handle_error(self, error):
print('HANDLING ERROR IN API')
return super(ApiExceptionHandlingApi, self).handle_error(error)
@ -38,30 +37,12 @@ def format_date(date):
return formatdate(timegm(date.utctimetuple()))
def verify_not_prod(func):
@add_method_metadata('enterprise_only', True)
@wraps(func)
def wrapped(*args, **kwargs):
# Verify that we are not running on a production (i.e. hosted) stack. If so, we fail.
# This should never happen (because of the feature-flag on SUPER_USERS), but we want to be
# absolutely sure.
# if app.config['SERVER_HOSTNAME'].find('quay.io') >= 0:
# TODO(config_port) fixme
if False:
logger.error('!!! Super user method called IN PRODUCTION !!!')
raise StandardError()
return func(*args, **kwargs)
return wrapped
def resource(*urls, **kwargs):
def wrapper(api_resource):
if not api_resource:
return None
print('registering resource: ', urls)
api_resource.registered = True
api.add_resource(api_resource, *urls, **kwargs)
return api_resource

View file

@ -1,3 +1,4 @@
# TODO to extract the discovery stuff into a util at the top level and then use it both here and old discovery.py
import logging
import sys
from collections import OrderedDict

View file

@ -6,7 +6,7 @@ import signal
from flask import abort, request
from config_app.config_endpoints.api.suconfig_models_pre_oci import pre_oci_model as model
from config_app.config_endpoints.api import resource, ApiResource, verify_not_prod, nickname, validate_json_request
from config_app.config_endpoints.api import resource, ApiResource, nickname, validate_json_request
from config_app.c_app import app, config_provider, superusers, OVERRIDE_CONFIG_DIRECTORY, ip_resolver
from auth.auth_context import get_authenticated_user
@ -58,7 +58,6 @@ class SuperUserConfig(ApiResource):
},
}
@verify_not_prod
@nickname('scGetConfig')
def get(self):
""" Returns the currently defined configuration, if any. """
@ -68,13 +67,11 @@ class SuperUserConfig(ApiResource):
}
@nickname('scUpdateConfig')
@verify_not_prod
@validate_json_request('UpdateConfig')
def put(self):
""" Updates the config override file. """
# Note: This method is called to set the database configuration before super users exists,
# so we also allow it to be called if there is no valid registry configuration setup.
# if not config_provider.config_exists() or SuperUserPermission().can():
if not config_provider.config_exists():
config_object = request.get_json()['config']
hostname = request.get_json()['hostname']
@ -124,7 +121,6 @@ class SuperUserRegistryStatus(ApiResource):
if a database is configured, and if it has any defined users.
"""
@nickname('scRegistryStatus')
@verify_not_prod
def get(self):
""" Returns the status of the registry. """
@ -174,7 +170,6 @@ class _AlembicLogHandler(logging.Handler):
@resource('/v1/superuser/setupdb')
class SuperUserSetupDatabase(ApiResource):
""" Resource for invoking alembic to setup the database. """
@verify_not_prod
@nickname('scSetupDatabase')
def get(self):
""" Invokes the alembic upgrade process. """
@ -224,14 +219,12 @@ def get_process_id(name):
class SuperUserShutdown(ApiResource):
""" Resource for sending a shutdown signal to the container. """
@verify_not_prod
@nickname('scShutdownContainer')
def post(self):
""" Sends a signal to the phusion init system to shut down the container. """
# Note: This method is called to set the database configuration before super users exists,
# so we also allow it to be called if there is no valid registry configuration setup.
# if app.config['TESTING'] or not database_has_users() or SuperUserPermission().can():
if app.config['TESTING'] or not database_has_users():
# Note: We skip if debugging locally.
if app.config.get('DEBUGGING') == True:
@ -303,7 +296,7 @@ class SuperUserCreateInitialSuperUser(ApiResource):
superusers.register_superuser(username)
# Conduct login with that user.
# TODO(config): assuming we don't need to login the user
# TODO(config): figure out if we need validation for checking logged in user stuff
# common_login(superuser_uuid)
return {
@ -336,14 +329,12 @@ class SuperUserConfigValidate(ApiResource):
}
@nickname('scValidateConfig')
@verify_not_prod
@validate_json_request('ValidateConfig')
def post(self, service):
""" Validates the given config for the given service. """
# Note: This method is called to validate the database configuration before super users exists,
# so we also allow it to be called if there is no valid registry configuration setup. Note that
# this is also safe since this method does not access any information not given in the request.
# if not config_provider.config_exists() or SuperUserPermission().can():
if not config_provider.config_exists():
config = request.get_json()['config']
validator_context = ValidatorContext.from_app(app, config, request.get_json().get('password', ''),

View file

@ -4,7 +4,7 @@ import pathvalidate
from flask import request, jsonify
from config_app.config_endpoints.exception import InvalidRequest
from config_app.config_endpoints.api import resource, ApiResource, verify_not_prod, nickname
from config_app.config_endpoints.api import resource, ApiResource, nickname
from config_app.config_util.ssl import load_certificate, CertInvalidException
from config_app.c_app import app, config_provider
@ -19,7 +19,6 @@ class SuperUserCustomCertificate(ApiResource):
""" Resource for managing a custom certificate. """
@nickname('uploadCustomCertificate')
@verify_not_prod
def post(self, certpath):
uploaded_file = request.files['file']
if not uploaded_file:
@ -58,7 +57,6 @@ class SuperUserCustomCertificate(ApiResource):
return '', 204
@nickname('deleteCustomCertificate')
@verify_not_prod
def delete(self, certpath):
cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath)
config_provider.remove_volume_file(cert_full_path)
@ -70,7 +68,6 @@ class SuperUserCustomCertificates(ApiResource):
""" Resource for managing custom certificates. """
@nickname('getCustomCertificates')
@verify_not_prod
def get(self):
has_extra_certs_path = config_provider.volume_file_exists(EXTRA_CA_DIRECTORY)
extra_certs_found = config_provider.list_volume_directory(EXTRA_CA_DIRECTORY)
@ -107,7 +104,6 @@ class SuperUserCustomCertificates(ApiResource):
}
# TODO(config) port this endpoint when (https://github.com/quay/quay/pull/3055) merged to ensure no conflicts
@resource('/v1/superuser/keys')
class SuperUserServiceKeyManagement(ApiResource):
""" Resource for managing service keys."""
@ -142,7 +138,6 @@ class SuperUserServiceKeyManagement(ApiResource):
},
}
@verify_not_prod
@nickname('listServiceKeys')
def get(self):
keys = pre_oci_model.list_all_service_keys()

View file

@ -1,17 +1,8 @@
import json
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from datetime import datetime
from dateutil.relativedelta import relativedelta
from six import add_metaclass
from tzlocal import get_localzone
# from app import avatar, superusers
# from buildtrigger.basehandler import BuildTriggerHandler
from data import model
from config_app.config_endpoints.api import format_date
from util.morecollections import AttrDict
def user_view(user):
@ -22,46 +13,6 @@ def user_view(user):
}
# class BuildTrigger(
# namedtuple('BuildTrigger', ['uuid', 'service_name', 'pull_robot', 'can_read', 'can_admin', 'for_build'])):
# """
# BuildTrigger represent a trigger that is associated with a build
# :type uuid: string
# :type service_name: string
# :type pull_robot: User
# :type can_read: boolean
# :type can_admin: boolean
# :type for_build: boolean
# """
#
# def to_dict(self):
# if not self.uuid:
# return None
#
# build_trigger = BuildTriggerHandler.get_handler(self)
# build_source = build_trigger.config.get('build_source')
#
# repo_url = build_trigger.get_repository_url() if build_source else None
# can_read = self.can_read or self.can_admin
#
# trigger_data = {
# 'id': self.uuid,
# 'service': self.service_name,
# 'is_active': build_trigger.is_active(),
#
# 'build_source': build_source if can_read else None,
# 'repository_url': repo_url if can_read else None,
#
# 'config': build_trigger.config if self.can_admin else {},
# 'can_invoke': self.can_admin,
# }
#
# if not self.for_build and self.can_admin and self.pull_robot:
# trigger_data['pull_robot'] = user_view(self.pull_robot)
#
# return trigger_data
class RepositoryBuild(namedtuple('RepositoryBuild',
['uuid', 'logs_archived', 'repository_namespace_user_username', 'repository_name',
'can_write', 'can_read', 'pull_robot', 'resource_key', 'trigger', 'display_name',
@ -185,9 +136,6 @@ class User(namedtuple('User', ['username', 'email', 'verified', 'enabled', 'robo
'username': self.username,
'email': self.email,
'verified': self.verified,
# todo(config) remove or add these lines from app
# 'avatar': avatar.get_data_for_user(self),
# 'super_user': superusers.is_superuser(self.username),
'enabled': self.enabled,
}
@ -205,108 +153,9 @@ class Organization(namedtuple('Organization', ['username', 'email'])):
return {
'name': self.username,
'email': self.email,
# todo(config) remove or add these lines from app
# 'avatar': avatar.get_data_for_org(self),
}
class LogEntry(
namedtuple('LogEntry', [
'metadata_json', 'ip', 'datetime', 'performer_email', 'performer_username', 'performer_robot',
'account_organization', 'account_username', 'account_email', 'account_robot', 'kind',
])):
"""
LogEntry a single log entry.
:type metadata_json: string
:type ip: string
:type datetime: string
:type performer_email: int
:type performer_username: string
:type performer_robot: boolean
:type account_organization: boolean
:type account_username: string
:type account_email: string
:type account_robot: boolean
:type kind_id: int
"""
def to_dict(self):
view = {
'kind': self.kind,
'metadata': json.loads(self.metadata_json),
'ip': self.ip,
'datetime': format_date(self.datetime),
}
if self.performer_username:
performer = AttrDict({'username': self.performer_username, 'email': self.performer_email})
performer.robot = None
if self.performer_robot:
performer.robot = self.performer_robot
view['performer'] = {
'kind': 'user',
'name': self.performer_username,
'is_robot': self.performer_robot,
# todo(config) remove or add these lines from app
# 'avatar': avatar.get_data_for_user(performer),
}
if self.account_username:
account = AttrDict({'username': self.account_username, 'email': self.account_email})
if self.account_organization:
view['namespace'] = {
'kind': 'org',
'name': self.account_username,
# todo(config) remove or add these lines from app
# 'avatar': avatar.get_data_for_org(account),
}
else:
account.robot = None
if self.account_robot:
account.robot = self.account_robot
view['namespace'] = {
'kind': 'user',
'name': self.account_username,
# todo(config) remove or add these lines from app
# 'avatar': avatar.get_data_for_user(account),
}
return view
class LogEntryPage(
namedtuple('LogEntryPage', ['logs', 'next_page_token'])):
"""
LogEntryPage represents a single page of logs.
:type logs: [LogEntry]
:type next_page_token: {any -> any}
"""
class AggregatedLogEntry(
namedtuple('AggregatedLogEntry', ['count', 'kind_id', 'day', 'start_time'])):
"""
AggregatedLogEntry represents an aggregated view of logs.
:type count: int
:type kind_id: int
:type day: string
:type start_time: Date
"""
def to_dict(self):
synthetic_date = datetime(self.start_time.year, self.start_time.month, int(self.day), tzinfo=get_localzone())
if synthetic_date.day < self.start_time.day:
synthetic_date = synthetic_date + relativedelta(months=1)
kinds = model.log.get_log_entry_kinds()
view = {
'kind': kinds[self.kind_id],
'count': self.count,
'datetime': format_date(synthetic_date),
}
return view
@add_metaclass(ABCMeta)
@ -314,135 +163,8 @@ class SuperuserDataInterface(object):
"""
Interface that represents all data store interactions required by a superuser api.
"""
@abstractmethod
def get_logs_query(self, start_time, end_time, page_token=None):
"""
Returns a LogEntryPage.
"""
@abstractmethod
def get_aggregated_logs(self, start_time, end_time):
"""
Returns a list of AggregatedLogEntry
"""
@abstractmethod
def get_organizations(self):
"""
Returns a list of Organization
"""
@abstractmethod
def get_active_users(self):
"""
Returns a list of User
"""
@abstractmethod
def create_install_user(self, username, password, email):
"""
Returns the created user and confirmation code for email confirmation
"""
@abstractmethod
def get_nonrobot_user(self, username):
"""
Returns a User
"""
@abstractmethod
def create_reset_password_email_code(self, email):
"""
Returns a recover password code
"""
@abstractmethod
def mark_user_for_deletion(self, username):
"""
Returns None
"""
@abstractmethod
def change_password(self, username, password):
"""
Returns None
"""
@abstractmethod
def update_email(self, username, email, auto_verify):
"""
Returns None
"""
@abstractmethod
def update_enabled(self, username, enabled):
"""
Returns None
"""
@abstractmethod
def take_ownership(self, namespace, authed_user):
"""
Returns id of entity and whether the entity was a user
"""
@abstractmethod
def mark_organization_for_deletion(self, name):
"""
Returns None
"""
@abstractmethod
def change_organization_name(self, old_org_name, new_org_name):
"""
Returns updated Organization
"""
@abstractmethod
def list_all_service_keys(self):
"""
Returns a list of service keys
"""
@abstractmethod
def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None):
"""
Returns a tuple of private key and public key id
"""
@abstractmethod
def approve_service_key(self, kid, approver, approval_type, notes=''):
"""
Returns the approved Key
"""
@abstractmethod
def get_service_key(self, kid, service=None, alive_only=True, approved_only=True):
"""
Returns ServiceKey
"""
@abstractmethod
def set_key_expiration(self, kid, expiration_date):
"""
Returns None
"""
@abstractmethod
def update_service_key(self, kid, name=None, metadata=None):
"""
Returns None
"""
@abstractmethod
def delete_service_key(self, kid):
"""
Returns deleted ServiceKey
"""
@abstractmethod
def get_repository_build(self, uuid):
"""
Returns RepositoryBuild
"""

View file

@ -2,36 +2,6 @@ from data import model
from config_app.config_endpoints.api.superuser_models_interface import SuperuserDataInterface, User, ServiceKey, Approval
#
# def _create_log(log, log_kind):
# account_organization = None
# account_username = None
# account_email = None
# account_robot = None
# try:
# account_organization = log.account.organization
# account_username = log.account.username
# account_email = log.account.email
# account_robot = log.account.robot
# except AttributeError:
# pass
#
# performer_robot = None
# performer_username = None
# performer_email = None
#
# try:
# performer_robot = log.performer.robot
# performer_username = log.performer.username
# performer_email = log.performer.email
# except AttributeError:
# pass
#
# return LogEntry(log.metadata_json, log.ip, log.datetime, performer_email, performer_username,
# performer_robot, account_organization, account_username,
# account_email, account_robot, log_kind[log.kind_id])
def _create_user(user):
if user is None:
return None
@ -46,18 +16,6 @@ def _create_key(key):
return ServiceKey(key.name, key.kid, key.service, key.jwk, key.metadata, key.created_date, key.expiration_date,
key.rotation_duration, approval)
#
#
# class ServiceKeyDoesNotExist(Exception):
# pass
#
#
# class ServiceKeyAlreadyApproved(Exception):
# pass
#
#
# class InvalidRepositoryBuildException(Exception):
# pass
class PreOCIModel(SuperuserDataInterface):
@ -65,210 +23,9 @@ class PreOCIModel(SuperuserDataInterface):
PreOCIModel implements the data model for the SuperUser using a database schema
before it was changed to support the OCI specification.
"""
def get_logs_query(self, start_time, end_time, page_token=None):
pass
def get_aggregated_logs(self, start_time, end_time):
pass
def get_organizations(self):
pass
def get_active_users(self):
pass
def create_install_user(self, username, password, email):
pass
def get_nonrobot_user(self, username):
pass
def create_reset_password_email_code(self, email):
pass
def mark_user_for_deletion(self, username):
pass
def change_password(self, username, password):
pass
def update_email(self, username, email, auto_verify):
pass
def update_enabled(self, username, enabled):
pass
def take_ownership(self, namespace, authed_user):
pass
def mark_organization_for_deletion(self, name):
pass
def change_organization_name(self, old_org_name, new_org_name):
pass
def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None):
pass
def approve_service_key(self, kid, approver, approval_type, notes=''):
pass
def get_service_key(self, kid, service=None, alive_only=True, approved_only=True):
pass
def set_key_expiration(self, kid, expiration_date):
pass
def update_service_key(self, kid, name=None, metadata=None):
pass
def delete_service_key(self, kid):
pass
def get_repository_build(self, uuid):
pass
# def get_repository_build(self, uuid):
# try:
# build = model.build.get_repository_build(uuid)
# except model.InvalidRepositoryBuildException as e:
# raise InvalidRepositoryBuildException(e.message)
#
# repo_namespace = build.repository_namespace_user_username
# repo_name = build.repository_name
#
# can_read = ReadRepositoryPermission(repo_namespace, repo_name).can()
# can_write = ModifyRepositoryPermission(repo_namespace, repo_name).can()
# can_admin = AdministerRepositoryPermission(repo_namespace, repo_name).can()
# job_config = get_job_config(build.job_config)
# phase, status, error = _get_build_status(build)
# url = userfiles.get_file_url(self.resource_key, request.remote_addr, requires_cors=True)
#
# return RepositoryBuild(build.uuid, build.logs_archived, repo_namespace, repo_name, can_write, can_read,
# _create_user(build.pull_robot), build.resource_key,
# BuildTrigger(build.trigger.uuid, build.trigger.service.name,
# _create_user(build.trigger.pull_robot), can_read, can_admin, True),
# build.display_name, build.display_name, build.started, job_config, phase, status, error, url)
#
# def delete_service_key(self, kid):
# try:
# key = model.service_keys.delete_service_key(kid)
# except model.ServiceKeyDoesNotExist:
# raise ServiceKeyDoesNotExist
# return _create_key(key)
#
# def update_service_key(self, kid, name=None, metadata=None):
# model.service_keys.update_service_key(kid, name, metadata)
#
# def set_key_expiration(self, kid, expiration_date):
# model.service_keys.set_key_expiration(kid, expiration_date)
#
# def get_service_key(self, kid, service=None, alive_only=True, approved_only=True):
# try:
# key = model.service_keys.get_service_key(kid, approved_only=approved_only, alive_only=alive_only)
# return _create_key(key)
# except model.ServiceKeyDoesNotExist:
# raise ServiceKeyDoesNotExist
#
# def approve_service_key(self, kid, approver, approval_type, notes=''):
# try:
# key = model.service_keys.approve_service_key(kid, approver, approval_type, notes=notes)
# return _create_key(key)
# except model.ServiceKeyDoesNotExist:
# raise ServiceKeyDoesNotExist
# except model.ServiceKeyAlreadyApproved:
# raise ServiceKeyAlreadyApproved
#
# def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None):
# (private_key, key) = model.service_keys.generate_service_key(service, expiration_date, metadata=metadata, name=name)
#
# return private_key, key.kid
def list_all_service_keys(self):
keys = model.service_keys.list_all_keys()
return [_create_key(key) for key in keys]
# def change_organization_name(self, old_org_name, new_org_name):
# org = model.organization.get_organization(old_org_name)
# if new_org_name is not None:
# org = model.user.change_username(org.id, new_org_name)
#
# return Organization(org.username, org.email)
#
# def mark_organization_for_deletion(self, name):
# org = model.organization.get_organization(name)
# model.user.mark_namespace_for_deletion(org, all_queues, namespace_gc_queue, force=True)
#
# def take_ownership(self, namespace, authed_user):
# entity = model.user.get_user_or_org(namespace)
# if entity is None:
# return None, False
#
# was_user = not entity.organization
# if entity.organization:
# # Add the superuser as an admin to the owners team of the org.
# model.organization.add_user_as_admin(authed_user, entity)
# else:
# # If the entity is a user, convert it to an organization and add the current superuser
# # as the admin.
# model.organization.convert_user_to_organization(entity, authed_user)
# return entity.id, was_user
#
# def update_enabled(self, username, enabled):
# user = model.user.get_nonrobot_user(username)
# model.user.update_enabled(user, bool(enabled))
#
# def update_email(self, username, email, auto_verify):
# user = model.user.get_nonrobot_user(username)
# model.user.update_email(user, email, auto_verify)
#
# def change_password(self, username, password):
# user = model.user.get_nonrobot_user(username)
# model.user.change_password(user, password)
#
# def mark_user_for_deletion(self, username):
# user = model.user.get_nonrobot_user(username)
# model.user.mark_namespace_for_deletion(user, all_queues, namespace_gc_queue, force=True)
#
# def create_reset_password_email_code(self, email):
# code = model.user.create_reset_password_email_code(email)
# return code.code
#
# def get_nonrobot_user(self, username):
# user = model.user.get_nonrobot_user(username)
# if user is None:
# return None
# return _create_user(user)
#
# def create_install_user(self, username, password, email):
# prompts = model.user.get_default_user_prompts(features)
# user = model.user.create_user(username, password, email, auto_verify=not features.MAILING,
# email_required=features.MAILING, prompts=prompts)
#
# return_user = _create_user(user)
# # If mailing is turned on, send the user a verification email.
# if features.MAILING:
# confirmation = model.user.create_confirm_email_code(user)
# return return_user, confirmation.code
# return return_user, ''
#
# def get_active_users(self, disabled=True):
# users = model.user.get_active_users(disabled=disabled)
# return [_create_user(user) for user in users]
#
# def get_organizations(self):
# return [Organization(org.username, org.email) for org in model.organization.get_organizations()]
#
# def get_aggregated_logs(self, start_time, end_time):
# aggregated_logs = model.log.get_aggregated_logs(start_time, end_time)
# return [AggregatedLogEntry(log.count, log.kind_id, log.day, start_time) for log in aggregated_logs]
#
# def get_logs_query(self, start_time, end_time, page_token=None):
# logs_query = model.log.get_logs_query(start_time, end_time)
# logs, next_page_token = model.modelutil.paginate(logs_query, database.LogEntry, descending=True,
# page_token=page_token, limit=20)
# kinds = model.log.get_log_entry_kinds()
# return LogEntryPage([_create_log(log, kinds) for log in logs], next_page_token)
pre_oci_model = PreOCIModel()

View file

@ -1,4 +1,6 @@
from auth.auth_context import get_authenticated_user
from config_app.config_endpoints.api import resource, ApiResource, nickname
from config_app.config_endpoints.api.superuser_models_interface import user_view
@resource('/v1/user/')
@ -8,11 +10,10 @@ class User(ApiResource):
@nickname('getLoggedInUser')
def get(self):
""" Get user information for the authenticated user. """
# user = get_authenticated_user()
user = get_authenticated_user()
# TODO(config): figure out if we need user validation
# if user is None or user.organization or not UserReadPermission(user.username).can():
# raise InvalidToken("Requires authentication", payload={'session_required': False})
# return user_view(user)
return {
'anonymous': False,
# 'username': user.username,
}
return user_view(user)

View file

@ -5,6 +5,8 @@ import re
from flask import make_response, render_template
from flask_restful import reqparse
from config_app._init_config import ROOT_DIR
def truthy_bool(param):
return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'}
@ -30,9 +32,9 @@ def _list_files(path, extension, contains=""):
def join_path(dp, f):
# Remove the static/ prefix. It is added in the template.
return os.path.join(dp, f)[len('static/'):]
return os.path.join(dp, f)[len(ROOT_DIR) + 1 + len('config_app/static/'):]
filepath = os.path.join('static/', path)
filepath = os.path.join(os.path.join(ROOT_DIR, 'config_app/static/'), path)
return [join_path(dp, f) for dp, _, files in os.walk(filepath) for f in files if matches(f)]

View file

@ -5,30 +5,7 @@ from werkzeug.exceptions import HTTPException
class ApiErrorType(Enum):
external_service_timeout = 'external_service_timeout'
invalid_request = 'invalid_request'
invalid_response = 'invalid_response'
invalid_token = 'invalid_token'
expired_token = 'expired_token'
insufficient_scope = 'insufficient_scope'
fresh_login_required = 'fresh_login_required'
exceeds_license = 'exceeds_license'
not_found = 'not_found'
downstream_issue = 'downstream_issue'
ERROR_DESCRIPTION = {
ApiErrorType.external_service_timeout.value: "An external service timed out. Retrying the request may resolve the issue.",
ApiErrorType.invalid_request.value: "The request was invalid. It may have contained invalid values or was improperly formatted.",
ApiErrorType.invalid_response.value: "The response was invalid.",
ApiErrorType.invalid_token.value: "The access token provided was invalid.",
ApiErrorType.expired_token.value: "The access token provided has expired.",
ApiErrorType.insufficient_scope.value: "The access token did not have sufficient scope to access the requested resource.",
ApiErrorType.fresh_login_required.value: "The action requires a fresh login to succeed.",
ApiErrorType.exceeds_license.value: "The action was refused because the current license does not allow it.",
ApiErrorType.not_found.value: "The resource was not found.",
ApiErrorType.downstream_issue.value: "An error occurred in a downstream service.",
}
class ApiException(HTTPException):
@ -79,10 +56,6 @@ class ApiException(HTTPException):
return rv
class ExternalServiceError(ApiException):
def __init__(self, error_description, payload=None):
ApiException.__init__(self, ApiErrorType.external_service_timeout, 520, error_description, payload)
class InvalidRequest(ApiException):
def __init__(self, error_description, payload=None):
@ -92,32 +65,3 @@ class InvalidRequest(ApiException):
class InvalidResponse(ApiException):
def __init__(self, error_description, payload=None):
ApiException.__init__(self, ApiErrorType.invalid_response, 400, error_description, payload)
class InvalidToken(ApiException):
def __init__(self, error_description, payload=None):
ApiException.__init__(self, ApiErrorType.invalid_token, 401, error_description, payload)
class ExpiredToken(ApiException):
def __init__(self, error_description, payload=None):
ApiException.__init__(self, ApiErrorType.expired_token, 401, error_description, payload)
class FreshLoginRequired(ApiException):
def __init__(self, payload=None):
ApiException.__init__(self, ApiErrorType.fresh_login_required, 401, "Requires fresh login", payload)
class ExceedsLicenseException(ApiException):
def __init__(self, payload=None):
ApiException.__init__(self, ApiErrorType.exceeds_license, 402, 'Payment Required', payload)
class NotFound(ApiException):
def __init__(self, payload=None):
ApiException.__init__(self, ApiErrorType.not_found, 404, 'Not Found', payload)
class DownstreamIssue(ApiException):
def __init__(self, error_description, payload=None):
ApiException.__init__(self, ApiErrorType.downstream_issue, 520, error_description, payload)

View file

@ -1,13 +1,15 @@
from flask import Blueprint
from cachetools import lru_cache
from config_app.config_endpoints.common import render_page_template
from config_app.config_endpoints.api.discovery import generate_route_data
# from config_util.cache import no_cache
from config_app.config_endpoints.api import no_cache
setup_web = Blueprint('setup_web', __name__, template_folder='templates')
# @lru_cache(maxsize=1)
@lru_cache(maxsize=1)
def _get_route_data():
return generate_route_data()
@ -16,7 +18,7 @@ def render_page_template_with_routedata(name, *args, **kwargs):
return render_page_template(name, _get_route_data(), *args, **kwargs)
# @no_cache
@no_cache
@setup_web.route('/', methods=['GET'], defaults={'path': ''})
def index(path, **kwargs):
return render_page_template_with_routedata('index.html', js_bundle_name='configapp', **kwargs)

View file

@ -1,108 +0,0 @@
import os
from datetime import datetime, timedelta
from tempfile import NamedTemporaryFile
from config import DefaultConfig
class FakeTransaction(object):
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
pass
TEST_DB_FILE = NamedTemporaryFile(delete=True)
class TestConfig(DefaultConfig):
TESTING = True
SECRET_KEY = 'a36c9d7d-25a9-4d3f-a586-3d2f8dc40a83'
BILLING_TYPE = 'FakeStripe'
TEST_DB_FILE = TEST_DB_FILE
DB_URI = os.environ.get('TEST_DATABASE_URI', 'sqlite:///{0}'.format(TEST_DB_FILE.name))
DB_CONNECTION_ARGS = {
'threadlocals': True,
'autorollback': True,
}
@staticmethod
def create_transaction(db):
return FakeTransaction()
DB_TRANSACTION_FACTORY = create_transaction
DISTRIBUTED_STORAGE_CONFIG = {'local_us': ['FakeStorage', {}], 'local_eu': ['FakeStorage', {}]}
DISTRIBUTED_STORAGE_PREFERENCE = ['local_us']
BUILDLOGS_MODULE_AND_CLASS = ('test.testlogs', 'testlogs.TestBuildLogs')
BUILDLOGS_OPTIONS = ['devtable', 'building', 'deadbeef-dead-beef-dead-beefdeadbeef', False]
USERFILES_LOCATION = 'local_us'
FEATURE_SUPER_USERS = True
FEATURE_BILLING = True
FEATURE_MAILING = True
SUPER_USERS = ['devtable']
LICENSE_USER_LIMIT = 500
LICENSE_EXPIRATION = datetime.now() + timedelta(weeks=520)
LICENSE_EXPIRATION_WARNING = datetime.now() + timedelta(weeks=520)
FEATURE_GITHUB_BUILD = True
FEATURE_BITTORRENT = True
FEATURE_ACI_CONVERSION = True
CLOUDWATCH_NAMESPACE = None
FEATURE_SECURITY_SCANNER = True
FEATURE_SECURITY_NOTIFICATIONS = True
SECURITY_SCANNER_ENDPOINT = 'http://fakesecurityscanner/'
SECURITY_SCANNER_API_VERSION = 'v1'
SECURITY_SCANNER_ENGINE_VERSION_TARGET = 1
SECURITY_SCANNER_API_TIMEOUT_SECONDS = 1
FEATURE_SIGNING = True
SIGNING_ENGINE = 'gpg2'
GPG2_PRIVATE_KEY_NAME = 'EEB32221'
GPG2_PRIVATE_KEY_FILENAME = 'test/data/signing-private.gpg'
GPG2_PUBLIC_KEY_FILENAME = 'test/data/signing-public.gpg'
INSTANCE_SERVICE_KEY_KID_LOCATION = 'test/data/test.kid'
INSTANCE_SERVICE_KEY_LOCATION = 'test/data/test.pem'
PROMETHEUS_AGGREGATOR_URL = None
GITHUB_LOGIN_CONFIG = {}
GOOGLE_LOGIN_CONFIG = {}
FEATURE_GITHUB_LOGIN = True
FEATURE_GOOGLE_LOGIN = True
TESTOIDC_LOGIN_CONFIG = {
'CLIENT_ID': 'foo',
'CLIENT_SECRET': 'bar',
'OIDC_SERVER': 'http://fakeoidc',
'DEBUGGING': True,
'LOGIN_BINDING_FIELD': 'sub',
}
RECAPTCHA_SITE_KEY = 'somekey'
RECAPTCHA_SECRET_KEY = 'somesecretkey'
FEATURE_APP_REGISTRY = True
FEATURE_TEAM_SYNCING = True
FEATURE_CHANGE_TAG_EXPIRATION = True
TAG_EXPIRATION_OPTIONS = ['0s', '1s', '1d', '1w', '2w', '4w']
DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT = None
DATA_MODEL_CACHE_CONFIG = {
'engine': 'inmemory',
}

View file

@ -2,7 +2,7 @@ import os
import logging
from config_app.config_util.config.baseprovider import (BaseProvider, import_yaml, export_yaml,
CannotWriteConfigException)
CannotWriteConfigException)
logger = logging.getLogger(__name__)

View file

@ -6,7 +6,7 @@ from six import add_metaclass
from jsonschema import validate, ValidationError
from config_app.config_util.config.schema import CONFIG_SCHEMA
from util.config.schema import CONFIG_SCHEMA
logger = logging.getLogger(__name__)

View file

@ -1,914 +0,0 @@
# INTERNAL_ONLY_PROPERTIES defines the properties in the config that, while settable, should
# not be documented for external users. These will generally be used for internal test or only
# given to customers when they have been briefed on the side effects of using them.
INTERNAL_ONLY_PROPERTIES = {
'__module__',
'__doc__',
'create_transaction',
'TESTING',
'SEND_FILE_MAX_AGE_DEFAULT',
'REPLICATION_QUEUE_NAME',
'DOCKERFILE_BUILD_QUEUE_NAME',
'CHUNK_CLEANUP_QUEUE_NAME',
'SECSCAN_NOTIFICATION_QUEUE_NAME',
'SECURITY_SCANNER_ISSUER_NAME',
'NOTIFICATION_QUEUE_NAME',
'NAMESPACE_GC_QUEUE_NAME',
'FEATURE_BILLING',
'FEATURE_SUPPORT_CHAT',
'BILLING_TYPE',
'INSTANCE_SERVICE_KEY_LOCATION',
'INSTANCE_SERVICE_KEY_REFRESH',
'INSTANCE_SERVICE_KEY_SERVICE',
'INSTANCE_SERVICE_KEY_KID_LOCATION',
'INSTANCE_SERVICE_KEY_EXPIRATION',
'UNAPPROVED_SERVICE_KEY_TTL_SEC',
'EXPIRED_SERVICE_KEY_TTL_SEC',
'REGISTRY_JWT_AUTH_MAX_FRESH_S',
'BITTORRENT_FILENAME_PEPPER',
'BITTORRENT_WEBSEED_LIFETIME',
'SERVICE_LOG_ACCOUNT_ID',
'BUILDLOGS_OPTIONS',
'LIBRARY_NAMESPACE',
'STAGGER_WORKERS',
'QUEUE_WORKER_METRICS_REFRESH_SECONDS',
'PUSH_TEMP_TAG_EXPIRATION_SEC',
'GARBAGE_COLLECTION_FREQUENCY',
'PAGE_TOKEN_KEY',
'BUILD_MANAGER',
'JWTPROXY_AUDIENCE',
'SYSTEM_SERVICE_BLACKLIST',
'JWTPROXY_SIGNER',
'SECURITY_SCANNER_INDEXING_MIN_ID',
'STATIC_SITE_BUCKET',
'LABEL_KEY_RESERVED_PREFIXES',
'TEAM_SYNC_WORKER_FREQUENCY',
'DOCUMENTATION_METADATA',
'DOCUMENTATION_LOCATION',
'JSONIFY_PRETTYPRINT_REGULAR',
'SYSTEM_LOGS_FILE',
'SYSTEM_LOGS_PATH',
'SYSTEM_SERVICES_PATH',
'TUF_GUN_PREFIX',
'LOGGING_LEVEL',
'SIGNED_GRANT_EXPIRATION_SEC',
'PROMETHEUS_AGGREGATOR_URL',
'DB_TRANSACTION_FACTORY',
'NOTIFICATION_SEND_TIMEOUT',
'QUEUE_METRICS_TYPE',
'MAIL_FAIL_SILENTLY',
'LOCAL_OAUTH_HANDLER',
'USE_CDN',
'ANALYTICS_TYPE',
'LAST_ACCESSED_UPDATE_THRESHOLD_S',
'EXCEPTION_LOG_TYPE',
'SENTRY_DSN',
'SENTRY_PUBLIC_DSN',
'BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT',
'THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT',
'SECURITY_SCANNER_ENDPOINT_BATCH',
'SECURITY_SCANNER_API_TIMEOUT_SECONDS',
'SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS',
'SECURITY_SCANNER_ENGINE_VERSION_TARGET',
'SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS',
'SECURITY_SCANNER_API_VERSION',
'DATA_MODEL_CACHE_CONFIG',
# TODO: move this into the schema once we support signing in QE.
'FEATURE_SIGNING',
'TUF_SERVER',
}
CONFIG_SCHEMA = {
'type': 'object',
'description': 'Schema for Quay configuration',
'required': [
'PREFERRED_URL_SCHEME',
'SERVER_HOSTNAME',
'DB_URI',
'AUTHENTICATION_TYPE',
'DISTRIBUTED_STORAGE_CONFIG',
'BUILDLOGS_REDIS',
'USER_EVENTS_REDIS',
'DISTRIBUTED_STORAGE_PREFERENCE',
'DEFAULT_TAG_EXPIRATION',
'TAG_EXPIRATION_OPTIONS',
],
'properties': {
# Hosting.
'PREFERRED_URL_SCHEME': {
'type': 'string',
'description': 'The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`',
'enum': ['http', 'https'],
'x-example': 'https',
},
'SERVER_HOSTNAME': {
'type': 'string',
'description': 'The URL at which Quay is accessible, without the scheme.',
'x-example': 'quay.io',
},
'EXTERNAL_TLS_TERMINATION': {
'type': 'boolean',
'description': 'If TLS is supported, but terminated at a layer before Quay, must be true.',
'x-example': True,
},
# User-visible configuration.
'REGISTRY_TITLE': {
'type': 'string',
'description': 'If specified, the long-form title for the registry. Defaults to `Quay Enterprise`.',
'x-example': 'Corp Container Service',
},
'REGISTRY_TITLE_SHORT': {
'type': 'string',
'description': 'If specified, the short-form title for the registry. Defaults to `Quay Enterprise`.',
'x-example': 'CCS',
},
'CONTACT_INFO': {
'type': 'array',
'minItems': 1,
'uniqueItems': True,
'description': 'If specified, contact information to display on the contact page. ' +
'If only a single piece of contact information is specified, the contact footer will link directly.',
'items': [
{
'type': 'string',
'pattern': '^mailto:(.)+$',
'x-example': 'mailto:support@quay.io',
'description': 'Adds a link to send an e-mail',
},
{
'type': 'string',
'pattern': '^irc://(.)+$',
'x-example': 'irc://chat.freenode.net:6665/quay',
'description': 'Adds a link to visit an IRC chat room',
},
{
'type': 'string',
'pattern': '^tel:(.)+$',
'x-example': 'tel:+1-888-930-3475',
'description': 'Adds a link to call a phone number',
},
{
'type': 'string',
'pattern': '^http(s)?://(.)+$',
'x-example': 'https://twitter.com/quayio',
'description': 'Adds a link to a defined URL',
},
],
},
'SEARCH_RESULTS_PER_PAGE' : {
'type': 'number',
'description': 'Number of results returned per page by search page. Defaults to 10',
'x-example': 10,
},
'SEARCH_MAX_RESULT_PAGE_COUNT' : {
'type': 'number',
'description': 'Maximum number of pages the user can paginate in search before they are limited. Defaults to 10',
'x-example': 10,
},
# E-mail.
'FEATURE_MAILING': {
'type': 'boolean',
'description': 'Whether emails are enabled. Defaults to True',
'x-example': True,
},
'MAIL_SERVER': {
'type': 'string',
'description': 'The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.',
'x-example': 'smtp.somedomain.com',
},
'MAIL_USE_TLS': {
'type': 'boolean',
'description': 'If specified, whether to use TLS for sending e-mails.',
'x-example': True,
},
'MAIL_PORT': {
'type': 'number',
'description': 'The SMTP port to use. If not specified, defaults to 587.',
'x-example': 588,
},
'MAIL_USERNAME': {
'type': ['string', 'null'],
'description': 'The SMTP username to use when sending e-mails.',
'x-example': 'myuser',
},
'MAIL_PASSWORD': {
'type': ['string', 'null'],
'description': 'The SMTP password to use when sending e-mails.',
'x-example': 'mypassword',
},
'MAIL_DEFAULT_SENDER': {
'type': ['string', 'null'],
'description': 'If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `support@quay.io`.',
'x-example': 'support@myco.com',
},
# Database.
'DB_URI': {
'type': 'string',
'description': 'The URI at which to access the database, including any credentials.',
'x-example': 'mysql+pymysql://username:password@dns.of.database/quay',
'x-reference': 'https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495',
},
'DB_CONNECTION_ARGS': {
'type': 'object',
'description': 'If specified, connection arguments for the database such as timeouts and SSL.',
'properties': {
'threadlocals': {
'type': 'boolean',
'description': 'Whether to use thread-local connections. Should *ALWAYS* be `true`'
},
'autorollback': {
'type': 'boolean',
'description': 'Whether to use auto-rollback connections. Should *ALWAYS* be `true`'
},
'ssl': {
'type': 'object',
'description': 'SSL connection configuration',
'properties': {
'ca': {
'type': 'string',
'description': '*Absolute container path* to the CA certificate to use for SSL connections',
'x-example': 'conf/stack/ssl-ca-cert.pem',
},
},
'required': ['ca'],
},
},
'required': ['threadlocals', 'autorollback'],
},
'ALLOW_PULLS_WITHOUT_STRICT_LOGGING': {
'type': 'boolean',
'description': 'If true, pulls in which the pull audit log entry cannot be written will ' +
'still succeed. Useful if the database can fallback into a read-only state ' +
'and it is desired for pulls to continue during that time. Defaults to False.',
'x-example': True,
},
# Storage.
'FEATURE_STORAGE_REPLICATION': {
'type': 'boolean',
'description': 'Whether to automatically replicate between storage engines. Defaults to False',
'x-example': False,
},
'FEATURE_PROXY_STORAGE': {
'type': 'boolean',
'description': 'Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False',
'x-example': False,
},
'MAXIMUM_LAYER_SIZE': {
'type': 'string',
'description': 'Maximum allowed size of an image layer. Defaults to 20G',
'x-example': '100G',
'pattern': '^[0-9]+(G|M)$',
},
'DISTRIBUTED_STORAGE_CONFIG': {
'type': 'object',
'description': 'Configuration for storage engine(s) to use in Quay. Each key is a unique ID' +
' for a storage engine, with the value being a tuple of the type and ' +
' configuration for that engine.',
'x-example': {
'local_storage': ['LocalStorage', {'storage_path': 'some/path/'}],
},
'items': {
'type': 'array',
},
},
'DISTRIBUTED_STORAGE_PREFERENCE': {
'type': 'array',
'description': 'The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to ' +
'use. A preferred engine means it is first checked for pullig and images are ' +
'pushed to it.',
'items': {
'type': 'string',
'uniqueItems': True,
},
'x-example': ['s3_us_east', 's3_us_west'],
},
'DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS': {
'type': 'array',
'description': 'The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose ' +
'images should be fully replicated, by default, to all other storage engines.',
'items': {
'type': 'string',
'uniqueItems': True,
},
'x-example': ['s3_us_east', 's3_us_west'],
},
'USERFILES_LOCATION': {
'type': 'string',
'description': 'ID of the storage engine in which to place user-uploaded files',
'x-example': 's3_us_east',
},
'USERFILES_PATH': {
'type': 'string',
'description': 'Path under storage in which to place user-uploaded files',
'x-example': 'userfiles',
},
'ACTION_LOG_ARCHIVE_LOCATION': {
'type': 'string',
'description': 'If action log archiving is enabled, the storage engine in which to place the ' +
'archived data.',
'x-example': 's3_us_east',
},
'ACTION_LOG_ARCHIVE_PATH': {
'type': 'string',
'description': 'If action log archiving is enabled, the path in storage in which to place the ' +
'archived data.',
'x-example': 'archives/actionlogs',
},
'LOG_ARCHIVE_LOCATION': {
'type': 'string',
'description': 'If builds are enabled, the storage engine in which to place the ' +
'archived build logs.',
'x-example': 's3_us_east',
},
'LOG_ARCHIVE_PATH': {
'type': 'string',
'description': 'If builds are enabled, the path in storage in which to place the ' +
'archived build logs.',
'x-example': 'archives/buildlogs',
},
# Authentication.
'AUTHENTICATION_TYPE': {
'type': 'string',
'description': 'The authentication engine to use for credential authentication.',
'x-example': 'Database',
'enum': ['Database', 'LDAP', 'JWT', 'Keystone', 'OIDC'],
},
'SUPER_USERS': {
'type': 'array',
'description': 'Quay usernames of those users to be granted superuser privileges',
'uniqueItems': True,
'items': {
'type': 'string',
},
},
'DIRECT_OAUTH_CLIENTID_WHITELIST': {
'type': 'array',
'description': 'A list of client IDs of *Quay-managed* applications that are allowed ' +
'to perform direct OAuth approval without user approval.',
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html',
'uniqueItems': True,
'items': {
'type': 'string',
},
},
# Redis.
'BUILDLOGS_REDIS': {
'type': 'object',
'description': 'Connection information for Redis for build logs caching',
'required': ['host'],
'properties': {
'host': {
'type': 'string',
'description': 'The hostname at which Redis is accessible',
'x-example': 'my.redis.cluster',
},
'port': {
'type': 'number',
'description': 'The port at which Redis is accessible',
'x-example': 1234,
},
'password': {
'type': 'string',
'description': 'The password to connect to the Redis instance',
'x-example': 'mypassword',
},
},
},
'USER_EVENTS_REDIS': {
'type': 'object',
'description': 'Connection information for Redis for user event handling',
'required': ['host'],
'properties': {
'host': {
'type': 'string',
'description': 'The hostname at which Redis is accessible',
'x-example': 'my.redis.cluster',
},
'port': {
'type': 'number',
'description': 'The port at which Redis is accessible',
'x-example': 1234,
},
'password': {
'type': 'string',
'description': 'The password to connect to the Redis instance',
'x-example': 'mypassword',
},
},
},
# OAuth configuration.
'GITHUB_LOGIN_CONFIG': {
'type': ['object', 'null'],
'description': 'Configuration for using GitHub (Enterprise) as an external login provider',
'required': ['CLIENT_ID', 'CLIENT_SECRET'],
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-auth.html',
'properties': {
'GITHUB_ENDPOINT': {
'type': 'string',
'description': 'The endpoint of the GitHub (Enterprise) being hit',
'x-example': 'https://github.com/',
},
'API_ENDPOINT': {
'type': 'string',
'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com',
'x-example': 'https://api.github.com/',
},
'CLIENT_ID': {
'type': 'string',
'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_TRIGGER_CONFIG',
'x-example': '0e8dbe15c4c7630b6780',
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
},
'CLIENT_SECRET': {
'type': 'string',
'description': 'The registered client secret for this Quay instance',
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
},
'ORG_RESTRICT': {
'type': 'boolean',
'description': 'If true, only users within the organization whitelist can login using this provider',
'x-example': True,
},
'ALLOWED_ORGANIZATIONS': {
'type': 'array',
'description': 'The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option',
'uniqueItems': True,
'items': {
'type': 'string',
},
},
},
},
'BITBUCKET_TRIGGER_CONFIG': {
'type': ['object', 'null'],
'description': 'Configuration for using BitBucket for build triggers',
'required': ['CONSUMER_KEY', 'CONSUMER_SECRET'],
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bitbucket-build.html',
'properties': {
'CONSUMER_KEY': {
'type': 'string',
'description': 'The registered consumer key (client ID) for this Quay instance',
'x-example': '0e8dbe15c4c7630b6780',
},
'CONSUMER_SECRET': {
'type': 'string',
'description': 'The registered consumer secret (client secret) for this Quay instance',
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
},
},
},
'GITHUB_TRIGGER_CONFIG': {
'type': ['object', 'null'],
'description': 'Configuration for using GitHub (Enterprise) for build triggers',
'required': ['GITHUB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'],
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-build.html',
'properties': {
'GITHUB_ENDPOINT': {
'type': 'string',
'description': 'The endpoint of the GitHub (Enterprise) being hit',
'x-example': 'https://github.com/',
},
'API_ENDPOINT': {
'type': 'string',
'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com',
'x-example': 'https://api.github.com/',
},
'CLIENT_ID': {
'type': 'string',
'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_LOGIN_CONFIG',
'x-example': '0e8dbe15c4c7630b6780',
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
},
'CLIENT_SECRET': {
'type': 'string',
'description': 'The registered client secret for this Quay instance',
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html',
},
},
},
'GOOGLE_LOGIN_CONFIG': {
'type': ['object', 'null'],
'description': 'Configuration for using Google for external authentication',
'required': ['CLIENT_ID', 'CLIENT_SECRET'],
'properties': {
'CLIENT_ID': {
'type': 'string',
'description': 'The registered client ID for this Quay instance',
'x-example': '0e8dbe15c4c7630b6780',
},
'CLIENT_SECRET': {
'type': 'string',
'description': 'The registered client secret for this Quay instance',
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
},
},
},
'GITLAB_TRIGGER_CONFIG': {
'type': ['object', 'null'],
'description': 'Configuration for using Gitlab (Enterprise) for external authentication',
'required': ['GITLAB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'],
'properties': {
'GITLAB_ENDPOINT': {
'type': 'string',
'description': 'The endpoint at which Gitlab(Enterprise) is running',
'x-example': 'https://gitlab.com',
},
'CLIENT_ID': {
'type': 'string',
'description': 'The registered client ID for this Quay instance',
'x-example': '0e8dbe15c4c7630b6780',
},
'CLIENT_SECRET': {
'type': 'string',
'description': 'The registered client secret for this Quay instance',
'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846',
},
},
},
# Health.
'HEALTH_CHECKER': {
'description': 'The configured health check.',
'x-example': ('RDSAwareHealthCheck', {'access_key': 'foo', 'secret_key': 'bar'}),
},
# Metrics.
'PROMETHEUS_NAMESPACE': {
'type': 'string',
'description': 'The prefix applied to all exposed Prometheus metrics. Defaults to `quay`',
'x-example': 'myregistry',
},
# Misc configuration.
'BLACKLIST_V2_SPEC': {
'type': 'string',
'description': 'The Docker CLI versions to which Quay will respond that V2 is *unsupported*. Defaults to `<1.6.0`',
'x-reference': 'http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec',
'x-example': '<1.8.0',
},
'USER_RECOVERY_TOKEN_LIFETIME': {
'type': 'string',
'description': 'The length of time a token for recovering a user accounts is valid. Defaults to 30m.',
'x-example': '10m',
'pattern': '^[0-9]+(w|m|d|h|s)$',
},
'SESSION_COOKIE_SECURE': {
'type': 'boolean',
'description': 'Whether the `secure` property should be set on session cookies. ' +
'Defaults to False. Recommended to be True for all installations using SSL.',
'x-example': True,
'x-reference': 'https://en.wikipedia.org/wiki/Secure_cookies',
},
'PUBLIC_NAMESPACES': {
'type': 'array',
'description': 'If a namespace is defined in the public namespace list, then it will appear on *all*' +
' user\'s repository list pages, regardless of whether that user is a member of the namespace.' +
' Typically, this is used by an enterprise customer in configuring a set of "well-known"' +
' namespaces.',
'uniqueItems': True,
'items': {
'type': 'string',
},
},
'AVATAR_KIND': {
'type': 'string',
'description': 'The types of avatars to display, either generated inline (local) or Gravatar (gravatar)',
'enum': ['local', 'gravatar'],
},
'V2_PAGINATION_SIZE': {
'type': 'number',
'description': 'The number of results returned per page in V2 registry APIs',
'x-example': 100,
},
'ENABLE_HEALTH_DEBUG_SECRET': {
'type': ['string', 'null'],
'description': 'If specified, a secret that can be given to health endpoints to see full debug info when' +
'not authenticated as a superuser',
'x-example': 'somesecrethere',
},
'BROWSER_API_CALLS_XHR_ONLY': {
'type': 'boolean',
'description': 'If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True.',
'x-example': False,
},
# Time machine and tag expiration settings.
'FEATURE_CHANGE_TAG_EXPIRATION': {
'type': 'boolean',
'description': 'Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True.',
'x-example': False,
},
'DEFAULT_TAG_EXPIRATION': {
'type': 'string',
'description': 'The default, configurable tag expiration time for time machine. Defaults to `2w`.',
'pattern': '^[0-9]+(w|m|d|h|s)$',
},
'TAG_EXPIRATION_OPTIONS': {
'type': 'array',
'description': 'The options that users can select for expiration of tags in their namespace (if enabled)',
'items': {
'type': 'string',
'pattern': '^[0-9]+(w|m|d|h|s)$',
},
},
# Team syncing.
'FEATURE_TEAM_SYNCING': {
'type': 'boolean',
'description': 'Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone)',
'x-example': True,
},
'TEAM_RESYNC_STALE_TIME': {
'type': 'string',
'description': 'If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m)',
'x-example': '2h',
'pattern': '^[0-9]+(w|m|d|h|s)$',
},
'FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP': {
'type': 'boolean',
'description': 'If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False.',
'x-example': True,
},
# Security scanning.
'FEATURE_SECURITY_SCANNER': {
'type': 'boolean',
'description': 'Whether to turn of/off the security scanner. Defaults to False',
'x-example': False,
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/security-scanning.html',
},
'FEATURE_SECURITY_NOTIFICATIONS': {
'type': 'boolean',
'description': 'If the security scanner is enabled, whether to turn of/off security notificaitons. Defaults to False',
'x-example': False,
},
'SECURITY_SCANNER_ENDPOINT' : {
'type': 'string',
'pattern': '^http(s)?://(.)+$',
'description': 'The endpoint for the security scanner',
'x-example': 'http://192.168.99.101:6060' ,
},
'SECURITY_SCANNER_INDEXING_INTERVAL': {
'type': 'number',
'description': 'The number of seconds between indexing intervals in the security scanner. Defaults to 30.',
'x-example': 30,
},
# Bittorrent support.
'FEATURE_BITTORRENT': {
'type': 'boolean',
'description': 'Whether to allow using Bittorrent-based pulls. Defaults to False',
'x-example': False,
'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bittorrent.html',
},
'BITTORRENT_PIECE_SIZE': {
'type': 'number',
'description': 'The bittorent piece size to use. If not specified, defaults to 512 * 1024.',
'x-example': 512 * 1024,
},
'BITTORRENT_ANNOUNCE_URL': {
'type': 'string',
'pattern': '^http(s)?://(.)+$',
'description': 'The URL of the announce endpoint on the bittorrent tracker',
'x-example': 'https://localhost:6881/announce',
},
# Build
'FEATURE_GITHUB_BUILD': {
'type': 'boolean',
'description': 'Whether to support GitHub build triggers. Defaults to False',
'x-example': False,
},
'FEATURE_BITBUCKET_BUILD': {
'type': 'boolean',
'description': 'Whether to support Bitbucket build triggers. Defaults to False',
'x-example': False,
},
'FEATURE_GITLAB_BUILD': {
'type': 'boolean',
'description': 'Whether to support GitLab build triggers. Defaults to False',
'x-example': False,
},
'FEATURE_BUILD_SUPPORT': {
'type': 'boolean',
'description': 'Whether to support Dockerfile build. Defaults to True',
'x-example': True,
},
'DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT': {
'type': ['number', 'null'],
'description': 'If not None, the default maximum number of builds that can be queued in a namespace.',
'x-example': 20,
},
'SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD': {
'type': ['number', 'null'],
'description': 'If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled. Defaults to 5.',
'x-example': 10,
},
'SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD': {
'type': ['number', 'null'],
'description': 'If not None, the number of successive failures that can occur before a build trigger is automatically disabled. Defaults to 100.',
'x-example': 50,
},
# Login
'FEATURE_GITHUB_LOGIN': {
'type': 'boolean',
'description': 'Whether GitHub login is supported. Defaults to False',
'x-example': False,
},
'FEATURE_GOOGLE_LOGIN': {
'type': 'boolean',
'description': 'Whether Google login is supported. Defaults to False',
'x-example': False,
},
# Recaptcha
'FEATURE_RECAPTCHA': {
'type': 'boolean',
'description': 'Whether Recaptcha is necessary for user login and recovery. Defaults to False',
'x-example': False,
'x-reference': 'https://www.google.com/recaptcha/intro/',
},
'RECAPTCHA_SITE_KEY': {
'type': ['string', 'null'],
'description': 'If recaptcha is enabled, the site key for the Recaptcha service',
},
'RECAPTCHA_SECRET_KEY': {
'type': ['string', 'null'],
'description': 'If recaptcha is enabled, the secret key for the Recaptcha service',
},
# External application tokens.
'FEATURE_APP_SPECIFIC_TOKENS': {
'type': 'boolean',
'description': 'If enabled, users can create tokens for use by the Docker CLI. Defaults to True',
'x-example': False,
},
'APP_SPECIFIC_TOKEN_EXPIRATION': {
'type': ['string', 'null'],
'description': 'The expiration for external app tokens. Defaults to None.',
'pattern': '^[0-9]+(w|m|d|h|s)$',
},
'EXPIRED_APP_SPECIFIC_TOKEN_GC': {
'type': ['string', 'null'],
'description': 'Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d.',
'pattern': '^[0-9]+(w|m|d|h|s)$',
},
# Feature Flag: Permanent Sessions.
'FEATURE_PERMANENT_SESSIONS': {
'type': 'boolean',
'description': 'Whether sessions are permanent. Defaults to True',
'x-example': True,
},
# Feature Flag: Super User Support.
'FEATURE_SUPER_USERS': {
'type': 'boolean',
'description': 'Whether super users are supported. Defaults to True',
'x-example': True,
},
# Feature Flag: Anonymous Users.
'FEATURE_ANONYMOUS_ACCESS': {
'type': 'boolean',
'description': ' Whether to allow anonymous users to browse and pull public repositories. Defaults to True',
'x-example': True,
},
# Feature Flag: User Creation.
'FEATURE_USER_CREATION': {
'type': 'boolean',
'description': 'Whether users can be created (by non-super users). Defaults to True',
'x-example': True,
},
# Feature Flag: Invite Only User Creation.
'FEATURE_INVITE_ONLY_USER_CREATION': {
'type': 'boolean',
'description': 'Whether users being created must be invited by another user. Defaults to False',
'x-example': False,
},
# Feature Flag: Encrypted Basic Auth.
'FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH': {
'type': 'boolean',
'description': 'Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False',
'x-example': False,
},
# Feature Flag: Direct Login.
'FEATURE_DIRECT_LOGIN': {
'type': 'boolean',
'description': 'Whether users can directly login to the UI. Defaults to True',
'x-example': True,
},
# Feature Flag: Advertising V2.
'FEATURE_ADVERTISE_V2': {
'type': 'boolean',
'description': 'Whether the v2/ endpoint is visible. Defaults to True',
'x-example': True,
},
# Feature Flag: Log Rotation.
'FEATURE_ACTION_LOG_ROTATION': {
'type': 'boolean',
'description': 'Whether or not to rotate old action logs to storage. Defaults to False',
'x-example': False,
},
# Feature Flag: ACI Conversion.
'FEATURE_ACI_CONVERSION': {
'type': 'boolean',
'description': 'Whether to enable conversion to ACIs. Defaults to False',
'x-example': False,
},
# Feature Flag: Library Support.
'FEATURE_LIBRARY_SUPPORT': {
'type': 'boolean',
'description': 'Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True',
'x-example': True,
},
# Feature Flag: Require Team Invite.
'FEATURE_REQUIRE_TEAM_INVITE': {
'type': 'boolean',
'description': 'Whether to require invitations when adding a user to a team. Defaults to True',
'x-example': True,
},
# Feature Flag: Collecting and Supporting Metadata.
'FEATURE_USER_METADATA': {
'type': 'boolean',
'description': 'Whether to collect and support user metadata. Defaults to False',
'x-example': False,
},
# Feature Flag: Support App Registry.
'FEATURE_APP_REGISTRY': {
'type': 'boolean',
'description': 'Whether to enable support for App repositories. Defaults to False',
'x-example': False,
},
# Feature Flag: Public Reposiotires in _catalog Endpoint.
'FEATURE_PUBLIC_CATALOG': {
'type': 'boolean',
'description': 'If set to true, the _catalog endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False',
'x-example': False,
},
# Feature Flag: Reader Build Logs.
'FEATURE_READER_BUILD_LOGS': {
'type': 'boolean',
'description': 'If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False',
'x-example': False,
},
# Feature Flag: Usernames Autocomplete.
'FEATURE_PARTIAL_USER_AUTOCOMPLETE': {
'type': 'boolean',
'description': 'If set to true, autocompletion will apply to partial usernames. Defaults to True',
'x-example': True,
},
# Feature Flag: User log access.
'FEATURE_USER_LOG_ACCESS': {
'type': 'boolean',
'description': 'If set to true, users will have access to audit logs for their namespace. Defaults to False',
'x-example': True,
},
# Feature Flag: User renaming.
'FEATURE_USER_RENAME': {
'type': 'boolean',
'description': 'If set to true, users can rename their own namespace. Defaults to False',
'x-example': True,
},
},
}

View file

@ -1,32 +0,0 @@
import os
import psutil
def get_worker_count(worker_kind_name, multiplier, minimum=None, maximum=None):
""" Returns the number of gunicorn workers to run for the given worker kind,
based on a combination of environment variable, multiplier, minimum (if any),
and number of accessible CPU cores.
"""
minimum = minimum or multiplier
maximum = maximum or (multiplier * multiplier)
# Check for an override via an environment variable.
override_value = os.environ.get('WORKER_COUNT_' + worker_kind_name.upper())
if override_value is not None:
return max(override_value, minimum)
override_value = os.environ.get('WORKER_COUNT')
if override_value is not None:
return max(override_value, minimum)
# Load the number of CPU cores via affinity, and use that to calculate the
# number of workers to run.
p = psutil.Process(os.getpid())
try:
cpu_count = len(p.cpu_affinity())
except AttributeError:
# cpu_affinity isn't supported on this platform. Assume 2.
cpu_count = 2
return min(max(cpu_count * multiplier, minimum), maximum)

View file

@ -6,6 +6,6 @@ QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYDIR/config_app/conf/gunicorn_local.py config_application:application
PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYDIR/config_app/conf/gunicorn_web.py config_application:application
echo 'Gunicorn exited'

View file

@ -3,13 +3,12 @@
* callbacks. Any method defined on the server is exposed here as an equivalent method. Also
* defines some helper functions for working with API responses.
*/
// console.log(angular.module('quay-config').requires);
angular.module('quay-config').factory('ApiService', ['Restangular', '$q', 'UtilService', function(Restangular, $q, UtilService) {
var apiService = {};
// if (!window.__endpoints) {
// return apiService;
// }
if (!window.__endpoints) {
return apiService;
}
var getResource = function(getMethod, operation, opt_parameters, opt_background) {
var resource = {};

View file

@ -1,3 +1,4 @@
# TODO to extract the discovery stuff into a util at the top level and then use it both here and config_app discovery.py
""" API discovery information. """
import re

View file

@ -7,7 +7,7 @@ cat << "EOF"
\ \ \ \ / / | |__| | | |__| | / ____ \ | | | |____ | |__| | | . ` | | __| _| |_ | |__| |
\ \/ \ \/ / \_ ___/ \____/ /_/ \_\ |_| \_____| \____/ |_| \_| |_| |_____| \_____|
\__/ \__/ \ \__
\___\ by CoreOS
\___\ by Red Hat
Build, Store, and Distribute your Containers

View file

@ -15,7 +15,8 @@
"clean": "rm -f static/build/*",
"clean-config-app": "rm -f config_app/static/build/*",
"watch-config-app": "npm run clean-config-app && cd config_app && webpack --watch"
"watch-config-app": "npm run clean-config-app && cd config_app && webpack --watch",
"build-config-app": "npm run clean-config-app && cd config_app && NODE_ENV=production webpack --progress"
},
"repository": {
"type": "git",