initial import for Open Source 🎉

This commit is contained in:
Jimmy Zelinskie 2019-11-12 11:09:47 -05:00
parent 1898c361f3
commit 9c0dd3b722
2048 changed files with 218743 additions and 0 deletions

27
conf/gunicorn_local.py Normal file
View file

@ -0,0 +1,27 @@
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import logging
from Crypto import Random
from util.log import logfile_path
from util.workers import get_worker_count
logconfig = logfile_path(debug=True)
bind = '0.0.0.0:5000'
workers = get_worker_count('local', 2, minimum=2, maximum=8)
worker_class = 'gevent'
daemon = False
pythonpath = '.'
preload_app = True
def post_fork(server, worker):
# Reset the Random library to ensure it won't raise the "PID check failed." error after
# gunicorn forks.
Random.atfork()
def when_ready(server):
logger = logging.getLogger(__name__)
logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class)

28
conf/gunicorn_registry.py Normal file
View file

@ -0,0 +1,28 @@
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import logging
from Crypto import Random
from util.log import logfile_path
from util.workers import get_worker_count
logconfig = logfile_path(debug=False)
bind = 'unix:/tmp/gunicorn_registry.sock'
workers = get_worker_count('registry', 4, minimum=8, maximum=64)
worker_class = 'gevent'
pythonpath = '.'
preload_app = True
def post_fork(server, worker):
# Reset the Random library to ensure it won't raise the "PID check failed." error after
# gunicorn forks.
Random.atfork()
def when_ready(server):
logger = logging.getLogger(__name__)
logger.debug('Starting registry gunicorn with %s workers and %s worker class', workers,
worker_class)

28
conf/gunicorn_secscan.py Normal file
View file

@ -0,0 +1,28 @@
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import logging
from Crypto import Random
from util.log import logfile_path
from util.workers import get_worker_count
logconfig = logfile_path(debug=False)
bind = 'unix:/tmp/gunicorn_secscan.sock'
workers = get_worker_count('secscan', 2, minimum=2, maximum=4)
worker_class = 'gevent'
pythonpath = '.'
preload_app = True
def post_fork(server, worker):
# Reset the Random library to ensure it won't raise the "PID check failed." error after
# gunicorn forks.
Random.atfork()
def when_ready(server):
logger = logging.getLogger(__name__)
logger.debug('Starting secscan gunicorn with %s workers and %s worker class', workers,
worker_class)

27
conf/gunicorn_verbs.py Normal file
View file

@ -0,0 +1,27 @@
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import logging
from Crypto import Random
from util.log import logfile_path
from util.workers import get_worker_count
logconfig = logfile_path(debug=False)
bind = 'unix:/tmp/gunicorn_verbs.sock'
workers = get_worker_count('verbs', 2, minimum=2, maximum=32)
pythonpath = '.'
preload_app = True
timeout = 2000 # Because sync workers
def post_fork(server, worker):
# Reset the Random library to ensure it won't raise the "PID check failed." error after
# gunicorn forks.
Random.atfork()
def when_ready(server):
logger = logging.getLogger(__name__)
logger.debug('Starting verbs gunicorn with %s workers and sync worker class', workers)

28
conf/gunicorn_web.py Normal file
View file

@ -0,0 +1,28 @@
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import logging
from Crypto import Random
from util.log import logfile_path
from util.workers import get_worker_count
logconfig = logfile_path(debug=False)
bind = 'unix:/tmp/gunicorn_web.sock'
workers = get_worker_count('web', 2, minimum=2, maximum=32)
worker_class = 'gevent'
pythonpath = '.'
preload_app = True
def post_fork(server, worker):
# Reset the Random library to ensure it won't raise the "PID check failed." error after
# gunicorn forks.
Random.atfork()
def when_ready(server):
logger = logging.getLogger(__name__)
logger.debug('Starting web gunicorn with %s workers and %s worker class', workers,
worker_class)

View file

@ -0,0 +1,71 @@
import json
import os
import base64
from requests import Request, Session
QUAYPATH = os.environ.get('QUAYPATH', '.')
KUBE_EXTRA_CA_CERTDIR = os.environ.get('KUBE_EXTRA_CA_CERTDIR', '%s/conf/kube_extra_certs' % QUAYPATH)
KUBERNETES_API_HOST = os.environ.get('KUBERNETES_SERVICE_HOST', '')
port = os.environ.get('KUBERNETES_SERVICE_PORT')
if port:
KUBERNETES_API_HOST += ':' + port
SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise')
QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret')
EXTRA_CA_DIRECTORY_PREFIX = 'extra_ca_certs_'
def _lookup_secret(service_token):
secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET)
response = _execute_k8s_api(service_token, 'GET', secret_url)
if response.status_code != 200:
raise Exception('Cannot get the config secret')
return json.loads(response.text)
def _execute_k8s_api(service_account_token, method, relative_url, data=None, api_prefix='api/v1', content_type='application/json'):
headers = {
'Authorization': 'Bearer ' + service_account_token
}
if data:
headers['Content-Type'] = content_type
data = json.dumps(data) if data else None
session = Session()
url = 'https://%s/%s/%s' % (KUBERNETES_API_HOST, api_prefix, relative_url)
request = Request(method, url, data=data, headers=headers)
return session.send(request.prepare(), verify=False, timeout=2)
def is_extra_cert(key):
return key.find(EXTRA_CA_DIRECTORY_PREFIX) == 0
def main():
# Load the service account token from the local store.
if not os.path.exists(SERVICE_ACCOUNT_TOKEN_PATH):
raise Exception('Cannot load Kubernetes service account token')
with open(SERVICE_ACCOUNT_TOKEN_PATH, 'r') as f:
service_token = f.read()
secret_data = _lookup_secret(service_token).get('data', {})
cert_keys = filter(is_extra_cert, secret_data.keys())
for cert_key in cert_keys:
if not os.path.exists(KUBE_EXTRA_CA_CERTDIR):
os.mkdir(KUBE_EXTRA_CA_CERTDIR)
cert_value = base64.b64decode(secret_data[cert_key])
cert_filename = cert_key.replace(EXTRA_CA_DIRECTORY_PREFIX, '')
print "Found an extra cert %s in config-secret, copying to kube ca dir"
with open(os.path.join(KUBE_EXTRA_CA_CERTDIR, cert_filename), 'w') as f:
f.write(cert_value)
if __name__ == '__main__':
main()

12
conf/init/02_get_kube_certs.sh Executable file
View file

@ -0,0 +1,12 @@
#!/usr/bin/env bash
QUAYDIR=${QUAYDIR:-"/"}
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd $QUAYDIR
if [[ "$KUBERNETES_SERVICE_HOST" != "" ]];then
echo "Running on kubernetes, attempting to retrieve extra certs from secret"
python $QUAYCONF/init/02_get_kube_certs.py
fi

0
conf/init/__init__.py Normal file
View file

15
conf/init/certs_create.sh Executable file
View file

@ -0,0 +1,15 @@
#! /bin/bash
set -e
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd ${QUAYDIR:-"/"}
SYSTEM_CERTDIR=${SYSTEM_CERTDIR:-"/etc/pki/ca-trust/source/anchors"}
# Create certs for jwtproxy to mitm outgoing TLS connections
# echo '{"CN":"CA","key":{"algo":"rsa","size":2048}}' | cfssl gencert -initca - | cfssljson -bare mitm
mkdir -p /certificates; cd /certificates
openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 \
-subj "/C=US/ST=NY/L=NYC/O=Dis/CN=self-signed" \
-keyout mitm-key.pem -out mitm.pem
cp /certificates/mitm-key.pem $QUAYCONF/mitm.key
cp /certificates/mitm.pem $QUAYCONF/mitm.cert
cp /certificates/mitm.pem $SYSTEM_CERTDIR/mitm.crt

52
conf/init/certs_install.sh Executable file
View file

@ -0,0 +1,52 @@
#! /bin/bash
set -e
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
QUAYCONFIG=${QUAYCONFIG:-"$QUAYCONF/stack"}
CERTDIR=${CERTDIR:-"$QUAYCONFIG/extra_ca_certs"}
SYSTEM_CERTDIR=${SYSTEM_CERTDIR:-"/etc/pki/ca-trust/source/anchors"}
PYTHON_ROOT=${PYTHON_ROOT:-"/opt/rh/python27/root/usr/lib/python2.7"}
# If we're running under kube, the previous script (02_get_kube_certs.sh) will put the certs in a different location
if [[ "$KUBERNETES_SERVICE_HOST" != "" ]];then
CERTDIR=${KUBE_EXTRA_CA_CERTDIR:-"$QUAYPATH/conf/kube_extra_certs"}
fi
cd ${QUAYDIR:-"/quay-registry"}
# Add the custom LDAP certificate
if [ -e $QUAYCONFIG/ldap.crt ]
then
cp $QUAYCONFIG/ldap.crt ${SYSTEM_CERTDIR}/ldap.crt
fi
# Add extra trusted certificates (as a directory)
if [ -d $CERTDIR ]; then
if test "$(ls -A "$CERTDIR")"; then
echo "Installing extra certificates found in $CERTDIR directory"
cp $CERTDIR/* ${SYSTEM_CERTDIR}
cat $CERTDIR/* >> $PYTHON_ROOT/site-packages/requests/cacert.pem
cat $CERTDIR/* >> $PYTHON_ROOT/site-packages/certifi/cacert.pem
fi
fi
# Add extra trusted certificates (as a file)
if [ -f $CERTDIR ]; then
echo "Installing extra certificates found in $CERTDIR file"
csplit -z -f ${SYSTEM_CERTDIR}/extra-ca- $CERTDIR '/-----BEGIN CERTIFICATE-----/' '{*}'
cat $CERTDIR >> $PYTHON_ROOT/site-packages/requests/cacert.pem
cat $CERTDIR >> $PYTHON_ROOT/site-packages/certifi/cacert.pem
fi
# Add extra trusted certificates (prefixed)
for f in $(find $QUAYCONFIG/ -maxdepth 1 -type f -name "extra_ca*")
do
echo "Installing extra cert $f"
cp "$f" ${SYSTEM_CERTDIR}
cat "$f" >> $PYTHON_ROOT/site-packages/requests/cacert.pem
cat "$f" >> $PYTHON_ROOT/site-packages/certifi/cacert.pem
done
# Update all CA certificates.
update-ca-trust extract

16
conf/init/copy_config_files.sh Executable file
View file

@ -0,0 +1,16 @@
#! /bin/sh
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd ${QUAYDIR:-"/"}
if [ -e $QUAYCONF/stack/robots.txt ]
then
cp $QUAYCONF/stack/robots.txt $QUAYPATH/templates/robots.txt
fi
if [ -e $QUAYCONF/stack/favicon.ico ]
then
cp $QUAYCONF/stack/favicon.ico $QUAYPATH/static/favicon.ico
fi

42
conf/init/logrotate.conf Normal file
View file

@ -0,0 +1,42 @@
#
# This file exists because of a bug in phusion/baseimage:0.9.19 where the su
# directive below is configured to use the nonexistant syslog user.
#
# see "man logrotate" for details
# rotate log files weekly
weekly
# use the syslog group by default, since this is the owning group
# of /var/log/syslog.
su root root
# keep 4 weeks worth of backlogs
rotate 4
# create new (empty) log files after rotating old ones
create
# uncomment this if you want your log files compressed
#compress
# packages drop log rotation information into this directory
include /etc/logrotate.d
# no packages own wtmp, or btmp -- we'll rotate them here
/var/log/wtmp {
missingok
monthly
create 0664 root utmp
rotate 1
}
/var/log/btmp {
missingok
monthly
create 0660 root utmp
rotate 1
}
# system-specific logs may be configured here

View file

@ -0,0 +1,126 @@
import os
import os.path
import yaml
import jinja2
QUAYPATH = os.getenv("QUAYPATH", ".")
QUAYDIR = os.getenv("QUAYDIR", "/")
QUAYCONF_DIR = os.getenv("QUAYCONF", os.path.join(QUAYDIR, QUAYPATH, "conf"))
STATIC_DIR = os.path.join(QUAYDIR, 'static')
SSL_PROTOCOL_DEFAULTS = ['TLSv1', 'TLSv1.1', 'TLSv1.2']
SSL_CIPHER_DEFAULTS = [
'ECDHE-RSA-AES128-GCM-SHA256',
'ECDHE-ECDSA-AES128-GCM-SHA256',
'ECDHE-RSA-AES256-GCM-SHA384',
'ECDHE-ECDSA-AES256-GCM-SHA384',
'DHE-RSA-AES128-GCM-SHA256',
'DHE-DSS-AES128-GCM-SHA256',
'kEDH+AESGCM',
'ECDHE-RSA-AES128-SHA256',
'ECDHE-ECDSA-AES128-SHA256',
'ECDHE-RSA-AES128-SHA',
'ECDHE-ECDSA-AES128-SHA',
'ECDHE-RSA-AES256-SHA384',
'ECDHE-ECDSA-AES256-SHA384',
'ECDHE-RSA-AES256-SHA',
'ECDHE-ECDSA-AES256-SHA',
'DHE-RSA-AES128-SHA256',
'DHE-RSA-AES128-SHA',
'DHE-DSS-AES128-SHA256',
'DHE-RSA-AES256-SHA256',
'DHE-DSS-AES256-SHA',
'DHE-RSA-AES256-SHA',
'AES128-GCM-SHA256',
'AES256-GCM-SHA384',
'AES128-SHA256',
'AES256-SHA256',
'AES128-SHA',
'AES256-SHA',
'AES',
'CAMELLIA',
'!3DES',
'!aNULL',
'!eNULL',
'!EXPORT',
'!DES',
'!RC4',
'!MD5',
'!PSK',
'!aECDH',
'!EDH-DSS-DES-CBC3-SHA',
'!EDH-RSA-DES-CBC3-SHA',
'!KRB5-DES-CBC3-SHA',
]
def write_config(filename, **kwargs):
with open(filename + ".jnj") as f:
template = jinja2.Template(f.read())
rendered = template.render(kwargs)
with open(filename, 'w') as f:
f.write(rendered)
def generate_nginx_config(config):
"""
Generates nginx config from the app config
"""
config = config or {}
use_https = os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/ssl.key'))
use_old_certs = os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/ssl.old.key'))
v1_only_domain = config.get('V1_ONLY_DOMAIN', None)
enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False)
ssl_protocols = config.get('SSL_PROTOCOLS', SSL_PROTOCOL_DEFAULTS)
ssl_ciphers = config.get('SSL_CIPHERS', SSL_CIPHER_DEFAULTS)
write_config(os.path.join(QUAYCONF_DIR, 'nginx/nginx.conf'), use_https=use_https,
use_old_certs=use_old_certs,
enable_rate_limits=enable_rate_limits,
v1_only_domain=v1_only_domain,
ssl_protocols=ssl_protocols,
ssl_ciphers=':'.join(ssl_ciphers))
def generate_server_config(config):
"""
Generates server config from the app config
"""
config = config or {}
tuf_server = config.get('TUF_SERVER', None)
tuf_host = config.get('TUF_HOST', None)
signing_enabled = config.get('FEATURE_SIGNING', False)
maximum_layer_size = config.get('MAXIMUM_LAYER_SIZE', '20G')
enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False)
write_config(
os.path.join(QUAYCONF_DIR, 'nginx/server-base.conf'), tuf_server=tuf_server, tuf_host=tuf_host,
signing_enabled=signing_enabled, maximum_layer_size=maximum_layer_size,
enable_rate_limits=enable_rate_limits,
static_dir=STATIC_DIR)
def generate_rate_limiting_config(config):
"""
Generates rate limiting config from the app config
"""
config = config or {}
non_rate_limited_namespaces = config.get('NON_RATE_LIMITED_NAMESPACES') or set()
enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False)
write_config(
os.path.join(QUAYCONF_DIR, 'nginx/rate-limiting.conf'),
non_rate_limited_namespaces=non_rate_limited_namespaces,
enable_rate_limits=enable_rate_limits,
static_dir=STATIC_DIR)
if __name__ == "__main__":
if os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/config.yaml')):
with open(os.path.join(QUAYCONF_DIR, 'stack/config.yaml'), 'r') as f:
config = yaml.load(f)
else:
config = None
generate_rate_limiting_config(config)
generate_server_config(config)
generate_nginx_config(config)

8
conf/init/nginx_conf_create.sh Executable file
View file

@ -0,0 +1,8 @@
#!/bin/bash
QUAYDIR=${QUAYDIR:-"/"}
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd $QUAYDIR
python $QUAYCONF/init/nginx_conf_create.py

10
conf/init/runmigration.sh Executable file
View file

@ -0,0 +1,10 @@
#!/bin/bash
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
set -e
cd ${QUAYDIR:-"/"}
# Run the database migration
PYTHONPATH=${QUAYPATH:-"."} python $QUAYCONF/init/v3_migration.py > revision_head
PYTHONPATH=${QUAYPATH:-"."} alembic upgrade `cat revision_head`

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t blobuploadcleanupworker

View file

@ -0,0 +1,10 @@
#! /bin/bash
echo 'Starting Blob upload cleanup worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.blobuploadcleanupworker.blobuploadcleanupworker 2>&1
echo 'Blob upload cleanup exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t buildlogsarchiver

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting build logs archiver worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.buildlogsarchiver.buildlogsarchiver 2>&1
echo 'Diffs worker exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t buildmanager

View file

@ -0,0 +1,11 @@
#! /bin/bash
echo 'Starting internal build manager'
# Run the build manager.
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
export PYTHONPATH=$QUAYPATH
exec venv/bin/python -m buildman.builder 2>&1
echo 'Internal build manager exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t chunkcleanupworker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting chunk cleanup worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.chunkcleanupworker 2>&1
echo 'Chunk cleanup worker exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t expiredappspecifictokenworker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting Expired app specific token GC worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.expiredappspecifictokenworker 2>&1
echo 'Expired app specific token GC exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t exportactionlogsworker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting Export Actions Log worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.exportactionlogsworker 2>&1
echo 'Export Actions Log worker exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t gcworker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting GC worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.gc.gcworker 2>&1
echo 'Repository GC exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t globalpromstats

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting global prometheus stats worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.globalpromstats.globalpromstats
echo 'Global prometheus stats exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t labelbackfillworker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting label backfill worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.labelbackfillworker 2>&1
echo 'Repository label backfill exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t logrotateworker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting log rotation worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.logrotateworker
echo 'Log rotation worker exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t namespacegcworker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting Namespace GC worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.namespacegcworker 2>&1
echo 'Namespace GC exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t notificationworker

View file

@ -0,0 +1,10 @@
#! /bin/bash
echo 'Starting notification worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.notificationworker.notificationworker
echo 'Notification worker exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t queuecleanupworker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting Queue cleanup worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.queuecleanupworker 2>&1
echo 'Repository Queue cleanup exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t repositoryactioncounter

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting repository action count worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.repositoryactioncounter 2>&1
echo 'Repository action worker exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t security_notification_worker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting security scanner notification worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.security_notification_worker 2>&1
echo 'Security scanner notification worker exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t securityworker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting security scanner worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.securityworker.securityworker 2>&1
echo 'Security scanner worker exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t storagereplication

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting storage replication worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.storagereplication 2>&1
echo 'Repository storage replication exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t tagbackfillworker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting tag backfill worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.tagbackfillworker 2>&1
echo 'Repository tag backfill exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t teamsyncworker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting team synchronization worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.teamsyncworker.teamsyncworker 2>&1
echo 'Team synchronization worker exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t dnsmasq

View file

@ -0,0 +1,7 @@
#! /bin/bash
echo 'Starting dnsmasq'
/usr/sbin/dnsmasq --no-daemon --user=root --listen-address=127.0.0.1
echo 'dnsmasq'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t gunicorn_registry

View file

@ -0,0 +1,12 @@
#! /bin/bash
echo 'Starting gunicon'
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
DB_CONNECTION_POOLING=${DB_CONNECTION_POOLING:-"true"}
cd ${QUAYDIR:-"/"}
DB_CONNECTION_POOLING=$DB_CONNECTION_POOLING PYTHONPATH=$QUAYPATH nice -n 10 venv/bin/gunicorn -c $QUAYCONF/gunicorn_registry.py registry:application
echo 'Gunicorn exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t gunicorn_secscan

View file

@ -0,0 +1,11 @@
#! /bin/bash
echo 'Starting gunicon'
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYCONF/gunicorn_secscan.py secscan:application
echo 'Gunicorn exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t gunicorn_verbs

View file

@ -0,0 +1,11 @@
#! /bin/bash
echo 'Starting gunicon'
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH nice -n 10 venv/bin/gunicorn -c $QUAYCONF/gunicorn_verbs.py verbs:application
echo 'Gunicorn exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t gunicorn_web

View file

@ -0,0 +1,11 @@
#! /bin/bash
echo 'Starting gunicon'
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYCONF/gunicorn_web.py web:application
echo 'Gunicorn exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t jwtproxy

View file

@ -0,0 +1,16 @@
#! /bin/bash
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
if [ -f $QUAYCONF/jwtproxy_conf.yaml ];
then
echo 'Starting jwtproxy'
/usr/local/bin/jwtproxy --config $QUAYCONF/jwtproxy_conf.yaml
rm /tmp/jwtproxy_secscan.sock
echo 'Jwtproxy exited'
else
sleep 1
fi

View file

@ -0,0 +1,7 @@
#!/bin/sh
# Ensure dependencies start before the logger
sv check syslog-ng > /dev/null || exit 1
# Start the logger
exec logger -i -t memcached

View file

@ -0,0 +1,12 @@
#! /bin/bash
echo 'Starting memcached'
if [ "$DEBUGLOG" == "true" ]
then
memcached -u memcached -m 64 -vv -l 127.0.0.1 -p 18080
else
memcached -u memcached -m 64 -l 127.0.0.1 -p 18080
fi
echo 'memcached exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t nginx

View file

@ -0,0 +1,12 @@
#! /bin/bash
echo 'Starting nginx'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
/usr/sbin/nginx -c $QUAYCONF/nginx/nginx.conf
echo 'Nginx exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t prometheus-aggregator

View file

@ -0,0 +1,7 @@
#! /bin/bash
echo 'Starting prometheus aggregator'
/usr/local/bin/prometheus-aggregator
echo 'Prometheus aggregator exited'

View file

@ -0,0 +1,4 @@
#!/bin/sh
# Start the logger
exec logger -i -t service_key_worker

View file

@ -0,0 +1,9 @@
#! /bin/bash
echo 'Starting service key worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.servicekeyworker.servicekeyworker 2>&1
echo 'Service key worker exited'

View file

@ -0,0 +1,147 @@
import os
import os.path
import jinja2
QUAYPATH = os.getenv("QUAYPATH", ".")
QUAYDIR = os.getenv("QUAYDIR", "/")
QUAYCONF_DIR = os.getenv("QUAYCONF", os.path.join(QUAYDIR, QUAYPATH, "conf"))
QUAY_SERVICES = os.getenv("QUAY_SERVICES", [])
QUAY_OVERRIDE_SERVICES = os.getenv("QUAY_OVERRIDE_SERVICES", [])
def default_services():
return {
"blobuploadcleanupworker": {
"autostart": "true"
},
"buildlogsarchiver": {
"autostart": "true"
},
"builder": {
"autostart": "true"
},
"chunkcleanupworker": {
"autostart": "true"
},
"expiredappspecifictokenworker": {
"autostart": "true"
},
"exportactionlogsworker": {
"autostart": "true"
},
"gcworker": {
"autostart": "true"
},
"globalpromstats": {
"autostart": "true"
},
"labelbackfillworker": {
"autostart": "true"
},
"logrotateworker": {
"autostart": "true"
},
"namespacegcworker": {
"autostart": "true"
},
"notificationworker": {
"autostart": "true"
},
"queuecleanupworker": {
"autostart": "true"
},
"repositoryactioncounter": {
"autostart": "true"
},
"security_notification_worker": {
"autostart": "true"
},
"securityworker": {
"autostart": "true"
},
"storagereplication": {
"autostart": "true"
},
"tagbackfillworker": {
"autostart": "true"
},
"teamsyncworker": {
"autostart": "true"
},
"dnsmasq": {
"autostart": "true"
},
"gunicorn-registry": {
"autostart": "true"
},
"gunicorn-secscan": {
"autostart": "true"
},
"gunicorn-verbs": {
"autostart": "true"
},
"gunicorn-web": {
"autostart": "true"
},
"ip-resolver-update-worker": {
"autostart": "true"
},
"jwtproxy": {
"autostart": "true"
},
"memcache": {
"autostart": "true"
},
"nginx": {
"autostart": "true"
},
"prometheus-aggregator": {
"autostart": "true"
},
"servicekey": {
"autostart": "true"
},
"repomirrorworker": {
"autostart": "false"
}
}
def generate_supervisord_config(filename, config):
with open(filename + ".jnj") as f:
template = jinja2.Template(f.read())
rendered = template.render(config=config)
with open(filename, 'w') as f:
f.write(rendered)
def limit_services(config, enabled_services):
if enabled_services == []:
return
for service in config.keys():
if service in enabled_services:
config[service]["autostart"] = "true"
else:
config[service]["autostart"] = "false"
def override_services(config, override_services):
if override_services == []:
return
for service in config.keys():
if service + "=true" in override_services:
config[service]["autostart"] = "true"
elif service + "=false" in override_services:
config[service]["autostart"] = "false"
if __name__ == "__main__":
config = default_services()
limit_services(config, QUAY_SERVICES)
override_services(config, QUAY_OVERRIDE_SERVICES)
generate_supervisord_config(os.path.join(QUAYCONF_DIR, 'supervisord.conf'), config)

View file

@ -0,0 +1,8 @@
#!/bin/bash
QUAYDIR=${QUAYDIR:-"/"}
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd $QUAYDIR
python $QUAYCONF/init/supervisord_conf_create.py

View file

View file

@ -0,0 +1,778 @@
import os
import pytest
import json
import yaml
import jinja2
from ..supervisord_conf_create import QUAYCONF_DIR, default_services, limit_services
def render_supervisord_conf(config):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../supervisord.conf.jnj")) as f:
template = jinja2.Template(f.read())
return template.render(config=config)
def test_supervisord_conf_create_defaults():
config = default_services()
limit_services(config, [])
rendered = render_supervisord_conf(config)
expected = """[supervisord]
nodaemon=true
[unix_http_server]
file=%(ENV_QUAYCONF)s/supervisord.sock
user=root
[supervisorctl]
serverurl=unix:///%(ENV_QUAYCONF)s/supervisord.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[eventlistener:stdout]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command = supervisor_stdout
buffer_size = 1024
events = PROCESS_LOG
result_handler = supervisor_stdout:event_handler
;;; Run batch scripts
[program:blobuploadcleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.blobuploadcleanupworker.blobuploadcleanupworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:buildlogsarchiver]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.buildlogsarchiver.buildlogsarchiver
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:builder]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m buildman.builder
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:chunkcleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.chunkcleanupworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:expiredappspecifictokenworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.expiredappspecifictokenworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:exportactionlogsworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.exportactionlogsworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gcworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.gc.gcworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:globalpromstats]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.globalpromstats.globalpromstats
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:labelbackfillworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.labelbackfillworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:logrotateworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.logrotateworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:namespacegcworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.namespacegcworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:notificationworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.notificationworker.notificationworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:queuecleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.queuecleanupworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:repositoryactioncounter]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.repositoryactioncounter
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:security_notification_worker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.security_notification_worker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:securityworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.securityworker.securityworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:storagereplication]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.storagereplication
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:tagbackfillworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.tagbackfillworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:teamsyncworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.teamsyncworker.teamsyncworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
;;; Run interactive scripts
[program:dnsmasq]
command=/usr/sbin/dnsmasq --no-daemon --user=root --listen-address=127.0.0.1 --port=8053
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-registry]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s,
DB_CONNECTION_POOLING=%(ENV_DB_CONNECTION_POOLING_REGISTRY)s
command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_registry.py registry:application
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-secscan]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_secscan.py secscan:application
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-verbs]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_verbs.py verbs:application
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-web]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_web.py web:application
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:jwtproxy]
command=/usr/local/bin/jwtproxy --config %(ENV_QUAYCONF)s/jwtproxy_conf.yaml
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:memcache]
command=memcached -u memcached -m 64 -l 127.0.0.1 -p 18080
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:nginx]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=nginx -c %(ENV_QUAYCONF)s/nginx/nginx.conf
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:prometheus-aggregator]
command=/usr/local/bin/prometheus-aggregator
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:servicekey]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.servicekeyworker.servicekeyworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:repomirrorworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.repomirrorworker.repomirrorworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
# EOF NO NEWLINE"""
assert rendered == expected
def test_supervisord_conf_create_all_overrides():
config = default_services()
limit_services(config, "servicekey,prometheus-aggregator")
rendered = render_supervisord_conf(config)
expected = """[supervisord]
nodaemon=true
[unix_http_server]
file=%(ENV_QUAYCONF)s/supervisord.sock
user=root
[supervisorctl]
serverurl=unix:///%(ENV_QUAYCONF)s/supervisord.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[eventlistener:stdout]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command = supervisor_stdout
buffer_size = 1024
events = PROCESS_LOG
result_handler = supervisor_stdout:event_handler
;;; Run batch scripts
[program:blobuploadcleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.blobuploadcleanupworker.blobuploadcleanupworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:buildlogsarchiver]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.buildlogsarchiver.buildlogsarchiver
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:builder]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m buildman.builder
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:chunkcleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.chunkcleanupworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:expiredappspecifictokenworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.expiredappspecifictokenworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:exportactionlogsworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.exportactionlogsworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gcworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.gc.gcworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:globalpromstats]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.globalpromstats.globalpromstats
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:labelbackfillworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.labelbackfillworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:logrotateworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.logrotateworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:namespacegcworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.namespacegcworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:notificationworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.notificationworker.notificationworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:queuecleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.queuecleanupworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:repositoryactioncounter]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.repositoryactioncounter
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:security_notification_worker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.security_notification_worker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:securityworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.securityworker.securityworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:storagereplication]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.storagereplication
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:tagbackfillworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.tagbackfillworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:teamsyncworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.teamsyncworker.teamsyncworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
;;; Run interactive scripts
[program:dnsmasq]
command=/usr/sbin/dnsmasq --no-daemon --user=root --listen-address=127.0.0.1 --port=8053
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-registry]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s,
DB_CONNECTION_POOLING=%(ENV_DB_CONNECTION_POOLING_REGISTRY)s
command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_registry.py registry:application
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-secscan]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_secscan.py secscan:application
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-verbs]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_verbs.py verbs:application
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-web]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_web.py web:application
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:jwtproxy]
command=/usr/local/bin/jwtproxy --config %(ENV_QUAYCONF)s/jwtproxy_conf.yaml
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:memcache]
command=memcached -u memcached -m 64 -l 127.0.0.1 -p 18080
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:nginx]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=nginx -c %(ENV_QUAYCONF)s/nginx/nginx.conf
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:prometheus-aggregator]
command=/usr/local/bin/prometheus-aggregator
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:servicekey]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.servicekeyworker.servicekeyworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:repomirrorworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.repomirrorworker.repomirrorworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
# EOF NO NEWLINE"""
assert rendered == expected

15
conf/init/v3_migration.py Normal file
View file

@ -0,0 +1,15 @@
from app import app
from active_migration import ActiveDataMigration
if not app.config.get('SETUP_COMPLETE', False):
print 'head'
else:
v3_upgrade_mode = app.config.get('V3_UPGRADE_MODE')
if v3_upgrade_mode == 'background':
raise Exception('V3_UPGRADE_MODE must be "complete". This requires a full upgrade to Quay:v3.0. See https://access.qa.redhat.com/documentation/en-us/red_hat_quay/3/html/upgrade_quay/index')
elif v3_upgrade_mode == 'production-transition':
print '481623ba00ba'
elif v3_upgrade_mode == 'post-oci-rollout' or v3_upgrade_mode == 'post-oci-roll-back-compat' or v3_upgrade_mode == 'complete':
print ActiveDataMigration.alembic_migration_revision
else:
raise Exception('Unknown V3_UPGRADE_MODE: %s' % v3_upgrade_mode)

4
conf/init/zz_boot.sh Executable file
View file

@ -0,0 +1,4 @@
#!/bin/bash
cd ${QUAYDIR:-"/"}
python ${QUAYPATH:-"."}/boot.py

View file

@ -0,0 +1,28 @@
jwtproxy:
signer_proxy:
enabled: true
listen_addr: :8081
ca_key_file: {{ conf_dir }}/mitm.key
ca_crt_file: {{ conf_dir }}/mitm.cert
signer:
issuer: quay
expiration_time: 5m
max_skew: 1m
private_key:
type: preshared
options:
key_id: {{ key_id }}
private_key_path: {{ service_key_location }}
verifier_proxies:
- enabled: true
listen_addr: unix:/tmp/jwtproxy_secscan.sock
socket_permission: 0777
verifier:
upstream: unix:/tmp/gunicorn_secscan.sock
audience: {{ audience }}
key_server:
type: keyregistry
options:
issuer: {{ security_issuer }}
registry: {{ registry }}

36
conf/logging.conf Normal file
View file

@ -0,0 +1,36 @@
[loggers]
keys=root,gunicorn.error,gunicorn.access
[handlers]
keys=console
[formatters]
keys=generic,json
[logger_root]
level=INFO
handlers=console
[handler_console]
class=StreamHandler
formatter=generic
args=(sys.stdout, )
[formatter_generic]
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
class=logging.Formatter
[formatter_json]
class=loghandler.JsonFormatter
[logger_gunicorn.error]
level=ERROR
handlers=console
propagate=0
qualname=gunicorn.error
[logger_gunicorn.access]
handlers=console
propagate=0
qualname=gunicorn.access
level=DEBUG

41
conf/logging_debug.conf Normal file
View file

@ -0,0 +1,41 @@
[loggers]
keys=root,boto,gunicorn.error,gunicorn.access
[handlers]
keys=console
[formatters]
keys=generic,json
[logger_root]
level=DEBUG
handlers=console
[logger_boto]
level=INFO
handlers=console
qualname=boto
[logger_gunicorn.access]
handlers=console
propagate=0
qualname=gunicorn.access
level=DEBUG
[handler_console]
class=StreamHandler
formatter=generic
args=(sys.stdout, )
[logger_gunicorn.error]
level=ERROR
handlers=console
propagate=0
qualname=gunicorn.error
[formatter_generic]
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
class=logging.Formatter
[formatter_json]
class=loghandler.JsonFormatter

View file

@ -0,0 +1,41 @@
[loggers]
keys=root,boto,gunicorn.error,gunicorn.access
[handlers]
keys=console
[formatters]
keys=generic,json
[logger_root]
level=DEBUG
handlers=console
[logger_boto]
level=INFO
handlers=console
qualname=boto
[logger_gunicorn.access]
handlers=console
propagate=0
qualname=gunicorn.access
level=DEBUG
[handler_console]
class=StreamHandler
formatter=json
args=(sys.stdout, )
[logger_gunicorn.error]
level=ERROR
handlers=console
propagate=0
qualname=gunicorn.error
[formatter_generic]
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
class=logging.Formatter
[formatter_json]
class=loghandler.JsonFormatter

36
conf/logging_json.conf Normal file
View file

@ -0,0 +1,36 @@
[loggers]
keys=root,gunicorn.error,gunicorn.access
[handlers]
keys=console
[formatters]
keys=json,generic
[logger_root]
level=INFO
handlers=console
[handler_console]
class=StreamHandler
formatter=json
args=(sys.stdout, )
[formatter_generic]
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
class=logging.Formatter
[formatter_json]
class=loghandler.JsonFormatter
[logger_gunicorn.error]
level=ERROR
handlers=console
propagate=0
qualname=gunicorn.error
[logger_gunicorn.access]
handlers=console
propagate=0
qualname=gunicorn.access
level=DEBUG

8
conf/nginx/dhparams.pem Normal file
View file

@ -0,0 +1,8 @@
-----BEGIN DH PARAMETERS-----
MIIBCAKCAQEAk7fEh4MFr446aU61ZGxCl8VHvcJhDGcdd+3zaNxdWF7Wvr5QE8zX
QswoM5K2szlK7klcJOXer2IToHHQQn00nuWO3m6quZGV6EPbRmRKfRGa8pzSwH+R
Ph0OUpEQPh7zvegeVwEbrblD7i53ookbHlYGtxsPb28Y06OP5/xpks9C815Zy4gy
tx2yHi4FkFo52yErBF9jD/glsZYVHCo42LFrVGa5/7V0g++fG8yXCrBnqmz2d8FF
uU6/KJcmDCUn1m3mDfcf5HgeXSIsukW/XMZ3l9w1fdluJRwdEE9W2ePgqMiG3eC0
2T1sPfXCdXPQ7/5Gzf1eMtRZ/McipxVbgwIBAg==
-----END DH PARAMETERS-----

View file

@ -0,0 +1,7 @@
# vim: ft=nginx
server {
listen 8080 default_server;
server_name _;
rewrite ^ https://$host$request_uri? permanent;
}

73
conf/nginx/http-base.conf Normal file
View file

@ -0,0 +1,73 @@
# vim: ft=nginx
set_real_ip_from 0.0.0.0/0;
real_ip_recursive on;
log_format lb_logs '$remote_addr ($proxy_protocol_addr) '
'- $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'($request_time $request_length $upstream_response_time)';
types_hash_max_size 2048;
include /etc/opt/rh/rh-nginx112/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
client_body_temp_path /tmp/nginx 1 2;
proxy_temp_path /tmp/nginx-proxy;
fastcgi_temp_path /tmp/nginx-fastcgi;
uwsgi_temp_path /tmp/nginx-uwsgi;
scgi_temp_path /tmp/nginx-scgi;
sendfile on;
gzip on;
gzip_http_version 1.0;
gzip_proxied any;
gzip_min_length 500;
gzip_disable "MSIE [1-6]\.";
gzip_types text/plain text/xml text/css
text/javascript application/x-javascript
application/javascript image/svg+xml
application/octet-stream;
map $proxy_protocol_addr $proper_forwarded_for {
"" $proxy_add_x_forwarded_for;
default $proxy_protocol_addr;
}
map $http_x_forwarded_proto $proper_scheme {
default $scheme;
https https;
}
upstream web_app_server {
server unix:/tmp/gunicorn_web.sock fail_timeout=0;
}
upstream jwtproxy_secscan {
server unix:/tmp/jwtproxy_secscan.sock fail_timeout=0;
}
upstream verbs_app_server {
server unix:/tmp/gunicorn_verbs.sock fail_timeout=0;
}
upstream registry_app_server {
server unix:/tmp/gunicorn_registry.sock fail_timeout=0;
}
# NOTE: Exposed for the _internal_ping *only*. All other secscan routes *MUST* go through
# the jwtproxy.
upstream secscan_app_server {
server unix:/tmp/gunicorn_secscan.sock fail_timeout=0;
}
upstream build_manager_controller_server {
server localhost:8686;
}
upstream build_manager_websocket_server {
server localhost:8787;
}

129
conf/nginx/nginx.conf.jnj Normal file
View file

@ -0,0 +1,129 @@
# vim: ft=nginx
include root-base.conf;
{% if use_https %}
http {
include http-base.conf;
include hosted-http-base.conf;
include rate-limiting.conf;
server_names_hash_bucket_size 64;
resolver 127.0.0.1:8053 valid=10s;
ssl_ciphers '{{ ssl_ciphers }}';
ssl_protocols {% for ssl_protocol in ssl_protocols %}{{ ssl_protocol }} {% endfor %};
ssl_session_cache shared:SSL:60m;
ssl_session_timeout 2h;
ssl_session_tickets on;
ssl_prefer_server_ciphers on;
ssl_dhparam dhparams.pem;
server {
server_name _;
ssl_certificate ../stack/ssl.cert;
ssl_certificate_key ../stack/ssl.key;
include server-base.conf;
listen 8443 ssl http2 default;
ssl on;
# This header must be set only for HTTPS
add_header Strict-Transport-Security "max-age=63072000; preload";
access_log /var/log/nginx/access.log lb_logs;
}
server {
server_name _;
ssl_certificate ../stack/ssl.cert;
ssl_certificate_key ../stack/ssl.key;
include server-base.conf;
listen 7443 ssl http2 default proxy_protocol;
ssl on;
# This header must be set only for HTTPS
add_header Strict-Transport-Security "max-age=63072000; preload";
real_ip_header proxy_protocol;
access_log /var/log/nginx/access.log lb_logs;
}
{% if v1_only_domain %}
server {
include server-base.conf;
server_name {{ v1_only_domain }};
{% if use_old_certs %}
ssl_certificate ../stack/ssl.old.cert;
ssl_certificate_key ../stack/ssl.old.key;
{% else %}
ssl_certificate ../stack/ssl.cert;
ssl_certificate_key ../stack/ssl.key;
{% endif %}
listen 8443 ssl;
ssl on;
# This header must be set only for HTTPS
add_header Strict-Transport-Security "max-age=63072000; preload";
access_log /var/log/nginx/access.log lb_logs;
}
server {
server_name {{ v1_only_domain }};
{% if use_old_certs %}
ssl_certificate ../stack/ssl.old.cert;
ssl_certificate_key ../stack/ssl.old.key;
{% else %}
ssl_certificate ../stack/ssl.cert;
ssl_certificate_key ../stack/ssl.key;
{% endif %}
include server-base.conf;
listen 7443 ssl proxy_protocol;
ssl on;
# This header must be set only for HTTPS
add_header Strict-Transport-Security "max-age=63072000; preload";
real_ip_header proxy_protocol;
access_log /var/log/nginx/access.log lb_logs;
}
{% endif %}
}
{% else %}
http {
include http-base.conf;
include rate-limiting.conf;
resolver 127.0.0.1:8053 valid=10s;
server {
include server-base.conf;
listen 8080 default;
access_log /var/log/nginx/access.log lb_logs;
}
}
{% endif %}

View file

@ -0,0 +1,66 @@
# vim: ft=nginx
# Define two buckets: Once for http1 connections (which we force to shard across our fleet) and
# one for http2 connections (which will all hit the same node).
map $http2 $http1_bucket {
"" $proxy_protocol_addr; # HTTP1 case: use the IP address, since shared across nodes.
default $request_id; # HTTP2 case: use request ID to "disable" check.
}
map $http2 $http2_bucket {
"" $request_id; # HTTP1 case: use the request ID to "disable" check.
default $connection; # HTTP2 case: use the connection serial number to limit.
}
# Define two additional buckets that fall to $request_id (thus no effective rate limiting) if
# a specific set of namespaces is matched. This allows us to turn off rate limiting selectively
# for special internal namespaces.
map $namespace $namespaced_http1_bucket {
{% for namespace in non_rate_limited_namespaces %}
"{{ namespace }}" $request_id;
{% endfor %}
{% if enable_rate_limits %}
default $http1_bucket;
{% else %}
default $request_id;
{% endif %}
}
map $namespace $namespaced_http2_bucket {
{% for namespace in non_rate_limited_namespaces %}
"{{ namespace }}" $request_id;
{% endfor %}
{% if enable_rate_limits %}
default $http2_bucket;
{% else %}
default $request_id;
{% endif %}
}
{% if enable_rate_limits %}
limit_req_zone $http_authorization zone=staticauth:10m rate=30r/s;
{% else %}
limit_req_zone $request_id zone=staticauth:10m rate=300r/s;
{% endif %}
limit_req_zone $http1_bucket zone=dynamicauth_very_light_http1:10m rate=30r/s;
limit_req_zone $http2_bucket zone=dynamicauth_very_light_http2:10m rate=600r/s;
limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_very_light_http1:10m rate=30r/s;
limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_very_light_http2:10m rate=600r/s;
limit_req_zone $http1_bucket zone=dynamicauth_light_http1:10m rate=20r/s;
limit_req_zone $http2_bucket zone=dynamicauth_light_http2:10m rate=400r/s;
limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_light_http1:10m rate=20r/s;
limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_light_http2:10m rate=400r/s;
# This zone should always be used with burst=<number> (nodelay|delay) as the
# limit is very low on purpose but should allow for the burst of traffic
# required for a registry operation. The burst number should also vary per
# endpoint.
limit_req_zone $http1_bucket zone=dynamicauth_heavy_http1:10m rate=1r/s;
limit_req_zone $http2_bucket zone=dynamicauth_heavy_http2:10m rate=20r/s;
limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_heavy_http1:10m rate=1r/s;
limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_heavy_http2:10m rate=20r/s;
limit_req_status 429;
limit_req_log_level warn;

1
conf/nginx/resolver.conf Normal file
View file

@ -0,0 +1 @@
resolver 127.0.0.1:8053 valid=10s;

15
conf/nginx/root-base.conf Normal file
View file

@ -0,0 +1,15 @@
# vim: ft=nginx
pid /tmp/nginx.pid;
error_log /var/log/nginx/error.log;
worker_processes auto;
worker_priority -10;
worker_rlimit_nofile 10240;
daemon off;
events {
worker_connections 10240;
accept_mutex off;
}

View file

@ -0,0 +1,337 @@
# vim: ft=nginx
keepalive_timeout 5;
if ($host = "www.quay.io") {
return 301 $proper_scheme://quay.io$request_uri;
}
# Disable the ability to be embedded into iframes
add_header X-Frame-Options DENY;
# Proxy Headers
proxy_set_header X-Forwarded-For $proper_forwarded_for;
proxy_set_header X-Forwarded-Proto $proper_scheme;
proxy_set_header Host $host;
proxy_redirect off;
proxy_set_header Transfer-Encoding $http_transfer_encoding;
location / {
proxy_pass http://web_app_server;
}
location /push {
proxy_pass http://web_app_server;
client_max_body_size 5M;
}
location /realtime {
proxy_pass http://web_app_server;
proxy_buffering off;
proxy_request_buffering off;
}
location ~ ^/_storage_proxy/([^/]+)/([^/]+)/([^/]+)/(.+) {
include resolver.conf;
auth_request /_storage_proxy_auth;
proxy_pass $2://$3/$4$is_args$args;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $3;
add_header Host $3;
proxy_buffering off;
proxy_request_buffering off;
proxy_read_timeout 60s;
}
location = /_storage_proxy_auth {
proxy_pass http://web_app_server;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Original-URI $request_uri;
proxy_read_timeout 10;
}
location ~ ^/v2/_catalog(.*)$ {
proxy_pass http://registry_app_server;
proxy_read_timeout 10;
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
{% if enable_rate_limits %}
limit_req zone=dynamicauth_heavy_http1 burst=1 nodelay;
limit_req zone=dynamicauth_heavy_http2 burst=5 nodelay;
{% endif %}
}
location /secscan/ {
proxy_pass http://jwtproxy_secscan;
}
location /secscan/_internal_ping {
proxy_pass http://secscan_app_server;
}
{% if signing_enabled %}
location ~ ^/v2/(.+)/_trust/tuf/(.*)$ {
set $upstream_tuf {{ tuf_server }};
proxy_pass $upstream_tuf$uri;
proxy_set_header Host "{{ tuf_host }}";
}
{% endif %}
location /cnr {
proxy_buffering off;
proxy_request_buffering off;
proxy_pass http://registry_app_server;
proxy_read_timeout 120;
proxy_temp_path /tmp 1 2;
{% if enable_rate_limits %}
limit_req zone=staticauth burst=5 nodelay;
{% endif %}
}
location /api/ {
proxy_pass http://web_app_server;
{% if enable_rate_limits %}
limit_req zone=dynamicauth_heavy_http1 burst=25 nodelay;
limit_req zone=dynamicauth_heavy_http2 burst=100 nodelay;
{% endif %}
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
}
location /api/suconfig {
proxy_pass http://web_app_server;
# For suconfig, set our read timeout as super large for both DB migrations
# and awaiting for secrets to be updated.
proxy_read_timeout 2000;
}
# This block handles blob requests, and will receive a high volume of traffic, so we set the burst
# much higher.
location ~ /v2/([^/]+)\/[^/]+/blobs/ {
# If we're being accessed via v1.quay.io, pretend we don't support v2.
if ($host = "v1.quay.io") {
return 404;
}
# NOTE: We disable gzip for HEAD requests because Docker issues them to determine the Content
# Length of a blob. Unfortunately, nginx, seeing an empty body, overwrites the header with
# a length of 0, which breaks this functionality.
if ($request_method = HEAD) {
gzip off;
}
proxy_buffering off;
proxy_request_buffering off;
proxy_read_timeout 2000;
proxy_temp_path /tmp 1 2;
client_max_body_size {{ maximum_layer_size }};
# Setting ANY header clears all inherited proxy_set_header directives
proxy_set_header X-Forwarded-For $proper_forwarded_for;
proxy_set_header X-Forwarded-Proto $proper_scheme;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_pass http://registry_app_server;
set $namespace $1;
{% if enable_rate_limits %}
limit_req zone=namespaced_dynamicauth_light_http1 burst=50 nodelay;
limit_req zone=namespaced_dynamicauth_light_http2 burst=100 nodelay;
{% endif %}
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
}
# This block handles tags endpoint requests, for which we want to restrict traffic due to how
# heavy an operation it can be
location ~ /v2/([^/]+)\/[^/]+/tags/ {
# If we're being accessed via v1.quay.io, pretend we don't support v2.
if ($host = "v1.quay.io") {
return 404;
}
# Setting ANY header clears all inherited proxy_set_header directives
proxy_set_header X-Forwarded-For $proper_forwarded_for;
proxy_set_header X-Forwarded-Proto $proper_scheme;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_pass http://registry_app_server;
set $namespace $1;
{% if enable_rate_limits %}
limit_req zone=namespaced_dynamicauth_heavy_http1 burst=2 nodelay;
limit_req zone=namespaced_dynamicauth_heavy_http2 burst=2 nodelay;
{% endif %}
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
}
# This block handles manifests endpoint requests, for which we want to restrict traffic heavier than
# the generic V2 operations, as it handles pushes and pulls.
location ~ /v2/([^/]+)\/[^/]+/manifests/ {
# If we're being accessed via v1.quay.io, pretend we don't support v2.
if ($host = "v1.quay.io") {
return 404;
}
# Setting ANY header clears all inherited proxy_set_header directives
proxy_set_header X-Forwarded-For $proper_forwarded_for;
proxy_set_header X-Forwarded-Proto $proper_scheme;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_pass http://registry_app_server;
set $namespace $1;
{% if enable_rate_limits %}
limit_req zone=namespaced_dynamicauth_light_http1 burst=10 nodelay;
limit_req zone=namespaced_dynamicauth_light_http2 burst=50 nodelay;
{% endif %}
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
}
# This block applies to the beginning of a push or pull
location = /v2/auth {
# If we're being accessed via v1.quay.io, pretend we don't support v2.
if ($host = "v1.quay.io") {
return 404;
}
# Setting ANY header clears all inherited proxy_set_header directives
proxy_set_header X-Forwarded-For $proper_forwarded_for;
proxy_set_header X-Forwarded-Proto $proper_scheme;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_pass http://registry_app_server;
{% if enable_rate_limits %}
limit_req zone=staticauth burst=2 nodelay;
{% endif %}
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
}
# This block handles all other V2 requests, for which we can use a higher rate limit.
location ~ ^/v2 {
# If we're being accessed via v1.quay.io, pretend we don't support v2.
if ($host = "v1.quay.io") {
return 404;
}
# NOTE: We disable gzip for HEAD requests because Docker issues them to determine the Content
# Length of a blob. Unfortunately, nginx, seeing an empty body, overwrites the header with
# a length of 0, which breaks this functionality. Included here for completeness.
if ($request_method = HEAD) {
gzip off;
}
# Setting ANY header clears all inherited proxy_set_header directives
proxy_set_header X-Forwarded-For $proper_forwarded_for;
proxy_set_header X-Forwarded-Proto $proper_scheme;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_pass http://registry_app_server;
{% if enable_rate_limits %}
limit_req zone=dynamicauth_very_light_http1 burst=20 nodelay;
limit_req zone=dynamicauth_very_light_http2 burst=80 nodelay;
{% endif %}
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
}
location /v1/ {
# Setting ANY header clears all inherited proxy_set_header directives
proxy_set_header X-Forwarded-For $proper_forwarded_for;
proxy_set_header X-Forwarded-Proto $proper_scheme;
proxy_set_header Host $host;
proxy_buffering off;
proxy_request_buffering off;
proxy_http_version 1.1;
proxy_pass http://registry_app_server;
proxy_temp_path /tmp 1 2;
client_max_body_size {{ maximum_layer_size }};
{% if enable_rate_limits %}
limit_req zone=dynamicauth_heavy_http1 burst=5 nodelay;
limit_req zone=dynamicauth_heavy_http2 burst=25 nodelay;
{% endif %}
keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin.
}
location = /v1/_ping {
add_header Content-Type text/plain;
add_header X-Docker-Registry-Version 0.6.0;
add_header X-Docker-Registry-Standalone 0;
return 200 'true';
}
location /c1/ {
proxy_buffering off;
proxy_request_buffering off;
proxy_pass http://verbs_app_server;
proxy_temp_path /tmp 1 2;
{% if enable_rate_limits %}
limit_req zone=staticauth burst=5 nodelay;
{% endif %}
}
location /static/ {
# checks for static file, if not found proxy to app
alias {{static_dir}}/;
error_page 404 /404;
}
error_page 502 {{static_dir}}/502.html;
location ~ ^/b1/controller(/?)(.*) {
proxy_pass http://build_manager_controller_server/$2;
}
location ~ ^/b1/socket(/?)(.*) {
proxy_pass http://build_manager_websocket_server/$2;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 300;
}

376
conf/supervisord.conf.jnj Normal file
View file

@ -0,0 +1,376 @@
[supervisord]
nodaemon=true
[unix_http_server]
file=%(ENV_QUAYCONF)s/supervisord.sock
user=root
[supervisorctl]
serverurl=unix:///%(ENV_QUAYCONF)s/supervisord.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[eventlistener:stdout]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command = supervisor_stdout
buffer_size = 1024
events = PROCESS_LOG
result_handler = supervisor_stdout:event_handler
;;; Run batch scripts
[program:blobuploadcleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.blobuploadcleanupworker.blobuploadcleanupworker
autostart = {{ config['blobuploadcleanupworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:buildlogsarchiver]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.buildlogsarchiver.buildlogsarchiver
autostart = {{ config['buildlogsarchiver']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:builder]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m buildman.builder
autostart = {{ config['builder']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:chunkcleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.chunkcleanupworker
autostart = {{ config['chunkcleanupworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:expiredappspecifictokenworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.expiredappspecifictokenworker
autostart = {{ config['expiredappspecifictokenworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:exportactionlogsworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.exportactionlogsworker
autostart = {{ config['exportactionlogsworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gcworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.gc.gcworker
autostart = {{ config['gcworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:globalpromstats]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.globalpromstats.globalpromstats
autostart = {{ config['globalpromstats']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:labelbackfillworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.labelbackfillworker
autostart = {{ config['labelbackfillworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:logrotateworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.logrotateworker
autostart = {{ config['logrotateworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:namespacegcworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.namespacegcworker
autostart = {{ config['namespacegcworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:notificationworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.notificationworker.notificationworker
autostart = {{ config['notificationworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:queuecleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.queuecleanupworker
autostart = {{ config['queuecleanupworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:repositoryactioncounter]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.repositoryactioncounter
autostart = {{ config['repositoryactioncounter']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:security_notification_worker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.security_notification_worker
autostart = {{ config['security_notification_worker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:securityworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.securityworker.securityworker
autostart = {{ config['securityworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:storagereplication]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.storagereplication
autostart = {{ config['storagereplication']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:tagbackfillworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.tagbackfillworker
autostart = {{ config['tagbackfillworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:teamsyncworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.teamsyncworker.teamsyncworker
autostart = {{ config['teamsyncworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
;;; Run interactive scripts
[program:dnsmasq]
command=/usr/sbin/dnsmasq --no-daemon --user=root --listen-address=127.0.0.1 --port=8053
autostart = {{ config['dnsmasq']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-registry]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s,
DB_CONNECTION_POOLING=%(ENV_DB_CONNECTION_POOLING_REGISTRY)s
command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_registry.py registry:application
autostart = {{ config['gunicorn-registry']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-secscan]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_secscan.py secscan:application
autostart = {{ config['gunicorn-secscan']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-verbs]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_verbs.py verbs:application
autostart = {{ config['gunicorn-verbs']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-web]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_web.py web:application
autostart = {{ config['gunicorn-web']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:jwtproxy]
command=/usr/local/bin/jwtproxy --config %(ENV_QUAYCONF)s/jwtproxy_conf.yaml
autostart = {{ config['jwtproxy']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:memcache]
command=memcached -u memcached -m 64 -l 127.0.0.1 -p 18080
autostart = {{ config['memcache']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:nginx]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=nginx -c %(ENV_QUAYCONF)s/nginx/nginx.conf
autostart = {{ config['nginx']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:prometheus-aggregator]
command=/usr/local/bin/prometheus-aggregator
autostart = {{ config['prometheus-aggregator']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:servicekey]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.servicekeyworker.servicekeyworker
autostart = {{ config['servicekey']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:repomirrorworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.repomirrorworker.repomirrorworker
autostart = {{ config['repomirrorworker']['autostart'] }}
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
# EOF NO NEWLINE