Start moving configapp into separate dir

This commit is contained in:
Sam Chow 2018-05-14 11:27:56 -04:00
parent 81af2d9fcc
commit 92db413da6
12 changed files with 327 additions and 6 deletions

3
config_app/Procfile Normal file
View file

@ -0,0 +1,3 @@
app: PYTHONPATH="../" gunicorn -c conf/gunicorn_local.py application:application
# webpack: npm run watch

0
config_app/__init__.py Normal file
View file

257
config_app/app.py Normal file
View file

@ -0,0 +1,257 @@
import hashlib
import json
import logging
import os
from functools import partial
from Crypto.PublicKey import RSA
from flask import Flask, request, Request
from flask_login import LoginManager
from flask_mail import Mail
from flask_principal import Principal
from jwkest.jwk import RSAKey
import features
from _init import CONF_DIR
from auth.auth_context import get_authenticated_user
from avatars.avatars import Avatar
from buildman.manager.buildcanceller import BuildCanceller
from data import database
from data import model
from data.archivedlogs import LogArchive
from data.billing import Billing
from data.buildlogs import BuildLogs
from data.cache import get_model_cache
from data.model.user import LoginWrappedDBUser
from data.queue import WorkQueue, BuildMetricQueueReporter
from data.userevent import UserEventsBuilderModule
from data.userfiles import Userfiles
from data.users import UserAuthentication
from path_converters import RegexConverter, RepositoryPathConverter, APIRepositoryPathConverter
from oauth.services.github import GithubOAuthService
from oauth.services.gitlab import GitLabOAuthService
from oauth.loginmanager import OAuthLoginManager
from storage import Storage
from util.log import filter_logs
from util import get_app_url
from util.ipresolver import IPResolver
from util.saas.analytics import Analytics
from util.saas.useranalytics import UserAnalytics
from util.saas.exceptionlog import Sentry
from util.names import urn_generator
from util.config.configutil import generate_secret_key
from util.config.provider import get_config_provider
from util.config.superusermanager import SuperUserManager
from util.label_validator import LabelValidator
from util.metrics.metricqueue import MetricQueue
from util.metrics.prometheus import PrometheusPlugin
from util.saas.cloudwatch import start_cloudwatch_sender
from util.secscan.api import SecurityScannerAPI
from util.tufmetadata.api import TUFMetadataAPI
from util.security.instancekeys import InstanceKeys
from util.security.signing import Signer
OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/')
OVERRIDE_CONFIG_YAML_FILENAME = os.path.join(CONF_DIR, 'stack/config.yaml')
OVERRIDE_CONFIG_PY_FILENAME = os.path.join(CONF_DIR, 'stack/config.py')
OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG'
DOCKER_V2_SIGNINGKEY_FILENAME = 'docker_v2.pem'
app = Flask(__name__)
logger = logging.getLogger(__name__)
# Instantiate the configuration.
is_testing = 'TEST' in os.environ
is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ
config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py',
testing=is_testing, kubernetes=is_kubernetes)
if is_testing:
from test.testconfig import TestConfig
logger.debug('Loading test config.')
app.config.from_object(TestConfig())
else:
from config import DefaultConfig
logger.debug('Loading default config.')
app.config.from_object(DefaultConfig())
app.teardown_request(database.close_db_filter)
# Load the override config via the provider.
config_provider.update_app_config(app.config)
# Update any configuration found in the override environment variable.
environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}'))
app.config.update(environ_config)
# Allow user to define a custom storage preference for the local instance.
_distributed_storage_preference = os.environ.get('QUAY_DISTRIBUTED_STORAGE_PREFERENCE', '').split()
if _distributed_storage_preference:
app.config['DISTRIBUTED_STORAGE_PREFERENCE'] = _distributed_storage_preference
# Generate a secret key if none was specified.
if app.config['SECRET_KEY'] is None:
logger.debug('Generating in-memory secret key')
app.config['SECRET_KEY'] = generate_secret_key()
# If the "preferred" scheme is https, then http is not allowed. Therefore, ensure we have a secure
# session cookie.
if (app.config['PREFERRED_URL_SCHEME'] == 'https' and
not app.config.get('FORCE_NONSECURE_SESSION_COOKIE', False)):
app.config['SESSION_COOKIE_SECURE'] = True
# Load features from config.
features.import_features(app.config)
CONFIG_DIGEST = hashlib.sha256(json.dumps(app.config, default=str)).hexdigest()[0:8]
logger.debug("Loaded config", extra={"config": app.config})
class RequestWithId(Request):
request_gen = staticmethod(urn_generator(['request']))
def __init__(self, *args, **kwargs):
super(RequestWithId, self).__init__(*args, **kwargs)
self.request_id = self.request_gen()
@app.before_request
def _request_start():
logger.debug('Starting request: %s (%s)', request.request_id, request.path,
extra={"request_id": request.request_id})
DEFAULT_FILTER = lambda x: '[FILTERED]'
FILTERED_VALUES = [
{'key': ['password'], 'fn': DEFAULT_FILTER},
{'key': ['user', 'password'], 'fn': DEFAULT_FILTER},
{'key': ['blob'], 'fn': lambda x: x[0:8]}
]
@app.after_request
def _request_end(resp):
jsonbody = request.get_json(force=True, silent=True)
values = request.values.to_dict()
if jsonbody and not isinstance(jsonbody, dict):
jsonbody = {'_parsererror': jsonbody}
if isinstance(values, dict):
filter_logs(values, FILTERED_VALUES)
extra = {
"endpoint": request.endpoint,
"request_id" : request.request_id,
"remote_addr": request.remote_addr,
"http_method": request.method,
"original_url": request.url,
"path": request.path,
"parameters": values,
"json_body": jsonbody,
"confsha": CONFIG_DIGEST,
}
if request.user_agent is not None:
extra["user-agent"] = request.user_agent.string
logger.debug('Ending request: %s (%s)', request.request_id, request.path, extra=extra)
return resp
root_logger = logging.getLogger()
app.request_class = RequestWithId
# Register custom converters.
app.url_map.converters['regex'] = RegexConverter
app.url_map.converters['repopath'] = RepositoryPathConverter
app.url_map.converters['apirepopath'] = APIRepositoryPathConverter
Principal(app, use_sessions=False)
tf = app.config['DB_TRANSACTION_FACTORY']
model_cache = get_model_cache(app.config)
avatar = Avatar(app)
login_manager = LoginManager(app)
mail = Mail(app)
prometheus = PrometheusPlugin(app)
metric_queue = MetricQueue(prometheus)
chunk_cleanup_queue = WorkQueue(app.config['CHUNK_CLEANUP_QUEUE_NAME'], tf, metric_queue=metric_queue)
instance_keys = InstanceKeys(app)
ip_resolver = IPResolver(app)
storage = Storage(app, metric_queue, chunk_cleanup_queue, instance_keys, config_provider, ip_resolver)
userfiles = Userfiles(app, storage)
log_archive = LogArchive(app, storage)
analytics = Analytics(app)
user_analytics = UserAnalytics(app)
billing = Billing(app)
sentry = Sentry(app)
build_logs = BuildLogs(app)
authentication = UserAuthentication(app, config_provider, OVERRIDE_CONFIG_DIRECTORY)
userevents = UserEventsBuilderModule(app)
superusers = SuperUserManager(app)
signer = Signer(app, config_provider)
instance_keys = InstanceKeys(app)
label_validator = LabelValidator(app)
build_canceller = BuildCanceller(app)
start_cloudwatch_sender(metric_queue, app)
github_trigger = GithubOAuthService(app.config, 'GITHUB_TRIGGER_CONFIG')
gitlab_trigger = GitLabOAuthService(app.config, 'GITLAB_TRIGGER_CONFIG')
oauth_login = OAuthLoginManager(app.config)
oauth_apps = [github_trigger, gitlab_trigger]
image_replication_queue = WorkQueue(app.config['REPLICATION_QUEUE_NAME'], tf,
has_namespace=False, metric_queue=metric_queue)
dockerfile_build_queue = WorkQueue(app.config['DOCKERFILE_BUILD_QUEUE_NAME'], tf,
metric_queue=metric_queue,
reporter=BuildMetricQueueReporter(metric_queue),
has_namespace=True)
notification_queue = WorkQueue(app.config['NOTIFICATION_QUEUE_NAME'], tf, has_namespace=True,
metric_queue=metric_queue)
secscan_notification_queue = WorkQueue(app.config['SECSCAN_NOTIFICATION_QUEUE_NAME'], tf,
has_namespace=False,
metric_queue=metric_queue)
# Note: We set `has_namespace` to `False` here, as we explicitly want this queue to not be emptied
# when a namespace is marked for deletion.
namespace_gc_queue = WorkQueue(app.config['NAMESPACE_GC_QUEUE_NAME'], tf, has_namespace=False,
metric_queue=metric_queue)
all_queues = [image_replication_queue, dockerfile_build_queue, notification_queue,
secscan_notification_queue, chunk_cleanup_queue, namespace_gc_queue]
secscan_api = SecurityScannerAPI(app, app.config, storage)
tuf_metadata_api = TUFMetadataAPI(app, app.config)
# Check for a key in config. If none found, generate a new signing key for Docker V2 manifests.
_v2_key_path = os.path.join(OVERRIDE_CONFIG_DIRECTORY, DOCKER_V2_SIGNINGKEY_FILENAME)
if os.path.exists(_v2_key_path):
docker_v2_signing_key = RSAKey().load(_v2_key_path)
else:
docker_v2_signing_key = RSAKey(key=RSA.generate(2048))
database.configure(app.config)
model.config.app_config = app.config
model.config.store = storage
model.config.register_image_cleanup_callback(secscan_api.cleanup_layers)
model.config.register_repo_cleanup_callback(tuf_metadata_api.delete_metadata)
@login_manager.user_loader
def load_user(user_uuid):
logger.debug('User loader loading deferred user with uuid: %s', user_uuid)
return LoginWrappedDBUser(user_uuid)
get_app_url = partial(get_app_url, app.config)

15
config_app/application.py Normal file
View file

@ -0,0 +1,15 @@
import os
import logging
import logging.config
from util.log import logfile_path
from app import app as application
# Bind all of the blueprints
import web
if __name__ == '__main__':
logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False)
application.run(port=5000, debug=True, threaded=True, host='0.0.0.0')

View file

View file

@ -0,0 +1,27 @@
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import logging
from Crypto import Random
from util.log import logfile_path
from util.workers import get_worker_count
logconfig = logfile_path(debug=True)
bind = '0.0.0.0:5000'
workers = get_worker_count('local', 2, minimum=2, maximum=8)
worker_class = 'gevent'
daemon = False
pythonpath = '.'
preload_app = True
def post_fork(server, worker):
# Reset the Random library to ensure it won't raise the "PID check failed." error after
# gunicorn forks.
Random.atfork()
def when_ready(server):
logger = logging.getLogger(__name__)
logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class)

View file

View file

@ -29,7 +29,7 @@ def render_page_template_with_routedata(name, *args, **kwargs):
logger = logging.getLogger(__name__)
logging.captureWarnings(True)
setup_web = Blueprint('setup_web', __name__)
setup_web = Blueprint('setup_web', __name__, template_folder='templates')
STATUS_TAGS = app.config['STATUS_TAGS']

6
config_app/web.py Normal file
View file

@ -0,0 +1,6 @@
from app import app as application
from config_endpoints.setup_web import setup_web
application.register_blueprint(setup_web)

17
local-config-app.sh Executable file
View file

@ -0,0 +1,17 @@
#!/usr/bin/env bash
cat << "EOF"
__ __
/ \ / \ ______ _ _ __ __ __
/ /\ / /\ \ / __ \ | | | | / \ \ \ / /
/ / / / \ \ | | | | | | | | / /\ \ \ /
\ \ \ \ / / | |__| | | |__| | / ____ \ | |
\ \/ \ \/ / \_ ___/ \____/ /_/ \_\ |_|
\__/ \__/ \ \__
\___\ by CoreOS
Build, Store, and Distribute your Containers
EOF
goreman -basedir "config_app" start

6
web.py
View file

@ -9,7 +9,6 @@ from endpoints.realtime import realtime
from endpoints.web import web
from endpoints.webhooks import webhooks
from endpoints.wellknown import wellknown
from endpoints.setup_web import setup_web
import os
@ -18,10 +17,7 @@ print('\n\n\nAre we in config mode?')
print(is_config_mode)
if is_config_mode:
application.register_blueprint(setup_web)
else:
application.register_blueprint(web)
application.register_blueprint(web)
application.register_blueprint(githubtrigger, url_prefix='/oauth2')