Switch over to phusion baseimage. Prevent everything from daemonizing and start it with runit under phusion. Make workers trap and handle sigint and sigterm. Extend the reservation to 1hr for dockerfilebuild. Update nginx to remove the dependency on libgd. Merge the requirements and requirements enterprise files.
This commit is contained in:
parent
14fba3ae7c
commit
576fbe4f0d
17 changed files with 87 additions and 63 deletions
19
Dockerfile
19
Dockerfile
|
@ -1,13 +1,17 @@
|
||||||
FROM ubuntu:13.10
|
FROM phusion/baseimage:0.9.9
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
ENV HOME /root
|
||||||
|
|
||||||
RUN apt-get update
|
RUN apt-get update
|
||||||
RUN apt-get install -y git python-virtualenv python-dev phantomjs libjpeg8 libjpeg62-dev libfreetype6 libfreetype6-dev libevent-dev gdebi-core
|
RUN apt-get install -y git python-virtualenv python-dev phantomjs libjpeg8 libjpeg62-dev libfreetype6 libfreetype6-dev libevent-dev gdebi-core g++
|
||||||
|
|
||||||
ADD binary_dependencies binary_dependencies
|
ADD binary_dependencies binary_dependencies
|
||||||
RUN gdebi --n binary_dependencies/*.deb
|
RUN gdebi --n binary_dependencies/*.deb
|
||||||
|
|
||||||
ADD requirements.enterprise requirements.txt
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||||
|
|
||||||
|
ADD requirements.txt requirements.txt
|
||||||
RUN virtualenv --distribute venv
|
RUN virtualenv --distribute venv
|
||||||
RUN venv/bin/pip install -r requirements.txt
|
RUN venv/bin/pip install -r requirements.txt
|
||||||
|
|
||||||
|
@ -28,10 +32,15 @@ ADD workers workers
|
||||||
ADD app.py app.py
|
ADD app.py app.py
|
||||||
ADD application.py application.py
|
ADD application.py application.py
|
||||||
ADD config.py config.py
|
ADD config.py config.py
|
||||||
ADD run.sh run.sh
|
|
||||||
|
ADD conf/init/mklogsdir.sh /etc/my_init.d/
|
||||||
|
ADD conf/init/gunicorn.sh /etc/service/gunicorn/run
|
||||||
|
ADD conf/init/nginx.sh /etc/service/nginx/run
|
||||||
|
ADD conf/init/diffsworker.sh /etc/service/diffsworker/run
|
||||||
|
ADD conf/init/webhookworker.sh /etc/service/webhookworker/run
|
||||||
|
|
||||||
VOLUME ["/conf/stack", "/mnt/logs"]
|
VOLUME ["/conf/stack", "/mnt/logs"]
|
||||||
|
|
||||||
EXPOSE 443 80
|
EXPOSE 443 80
|
||||||
|
|
||||||
ENTRYPOINT ["./run.sh"]
|
CMD ["/sbin/my_init"]
|
19
README.md
19
README.md
|
@ -15,30 +15,13 @@ sudo mkdir -p /mnt/logs/
|
||||||
sudo docker run -d -p 80:80 -p 443:443 -v /mnt/logs:/mnt/logs -v `pwd`/quayconfig/production:/conf/stack quay.io/quay/quay
|
sudo docker run -d -p 80:80 -p 443:443 -v /mnt/logs:/mnt/logs -v `pwd`/quayconfig/production:/conf/stack quay.io/quay/quay
|
||||||
```
|
```
|
||||||
|
|
||||||
start the log shipper:
|
start the log shipper (DEPRECATED):
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo docker pull quay.io/quay/logstash
|
sudo docker pull quay.io/quay/logstash
|
||||||
sudo docker run -d -e REDIS_PORT_6379_TCP_ADDR=logs.quay.io -v /mnt/logs:/mnt/logs quay.io/quay/logstash quay.conf
|
sudo docker run -d -e REDIS_PORT_6379_TCP_ADDR=logs.quay.io -v /mnt/logs:/mnt/logs quay.io/quay/logstash quay.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
start the workers (FIXME):
|
|
||||||
|
|
||||||
```
|
|
||||||
STACK=prod python -m workers.diffsworker -D
|
|
||||||
STACK=prod python -m workers.webhookworker -D
|
|
||||||
```
|
|
||||||
|
|
||||||
bouncing the servers (FIXME):
|
|
||||||
|
|
||||||
```
|
|
||||||
sudo kill -HUP `cat /mnt/logs/nginx.pid`
|
|
||||||
kill -HUP `cat /mnt/logs/gunicorn.pid`
|
|
||||||
|
|
||||||
kill <pids of worker daemons>
|
|
||||||
restart daemons
|
|
||||||
```
|
|
||||||
|
|
||||||
running the tests:
|
running the tests:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
BIN
binary_dependencies/nginx_1.4.2-nobuffer-2_amd64.deb
Normal file
BIN
binary_dependencies/nginx_1.4.2-nobuffer-2_amd64.deb
Normal file
Binary file not shown.
|
@ -2,7 +2,6 @@ bind = 'unix:/tmp/gunicorn.sock'
|
||||||
workers = 8
|
workers = 8
|
||||||
worker_class = 'gevent'
|
worker_class = 'gevent'
|
||||||
timeout = 2000
|
timeout = 2000
|
||||||
daemon = True
|
|
||||||
pidfile = '/mnt/logs/gunicorn.pid'
|
pidfile = '/mnt/logs/gunicorn.pid'
|
||||||
errorlog = '/mnt/logs/application.log'
|
errorlog = '/mnt/logs/application.log'
|
||||||
loglevel = 'debug'
|
loglevel = 'debug'
|
||||||
|
|
8
conf/init/diffsworker.sh
Executable file
8
conf/init/diffsworker.sh
Executable file
|
@ -0,0 +1,8 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
echo 'Starting diffs worker'
|
||||||
|
|
||||||
|
cd /
|
||||||
|
venv/bin/python -m workers.diffsworker --log=/mnt/logs/diffsworker.log
|
||||||
|
|
||||||
|
echo 'Diffs worker exited'
|
8
conf/init/gunicorn.sh
Executable file
8
conf/init/gunicorn.sh
Executable file
|
@ -0,0 +1,8 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
echo 'Starting gunicon'
|
||||||
|
|
||||||
|
cd /
|
||||||
|
venv/bin/gunicorn -c conf/gunicorn_config.py application:application
|
||||||
|
|
||||||
|
echo 'Gunicorn exited'
|
4
conf/init/mklogsdir.sh
Executable file
4
conf/init/mklogsdir.sh
Executable file
|
@ -0,0 +1,4 @@
|
||||||
|
#! /bin/sh
|
||||||
|
|
||||||
|
echo 'Creating logs directory'
|
||||||
|
mkdir -p /mnt/logs
|
14
conf/init/nginx.sh
Executable file
14
conf/init/nginx.sh
Executable file
|
@ -0,0 +1,14 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
echo 'Starting nginx'
|
||||||
|
|
||||||
|
if [ -f /conf/stack/ssl.key ]
|
||||||
|
then
|
||||||
|
echo "Using HTTPS"
|
||||||
|
/usr/local/nginx/sbin/nginx -c /conf/nginx-enterprise.conf
|
||||||
|
else
|
||||||
|
echo "No SSL key provided, using HTTP"
|
||||||
|
/usr/local/nginx/sbin/nginx -c /conf/nginx-enterprise-nossl.conf
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo 'Nginx exited'
|
8
conf/init/webhookworker.sh
Executable file
8
conf/init/webhookworker.sh
Executable file
|
@ -0,0 +1,8 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
echo 'Starting webhook worker'
|
||||||
|
|
||||||
|
cd /
|
||||||
|
venv/bin/python -m workers.webhookworker --log=/mnt/logs/webhookworker.log
|
||||||
|
|
||||||
|
echo 'Webhook worker exited'
|
|
@ -4,6 +4,8 @@ worker_processes 2;
|
||||||
|
|
||||||
user root nogroup;
|
user root nogroup;
|
||||||
|
|
||||||
|
daemon off;
|
||||||
|
|
||||||
http {
|
http {
|
||||||
include http-base.conf;
|
include http-base.conf;
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,8 @@ worker_processes 2;
|
||||||
|
|
||||||
user root nogroup;
|
user root nogroup;
|
||||||
|
|
||||||
|
daemon off;
|
||||||
|
|
||||||
http {
|
http {
|
||||||
include http-base.conf;
|
include http-base.conf;
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,6 @@ argparse==1.2.1
|
||||||
beautifulsoup4==4.3.2
|
beautifulsoup4==4.3.2
|
||||||
blinker==1.3
|
blinker==1.3
|
||||||
boto==2.27.0
|
boto==2.27.0
|
||||||
distribute==0.6.34
|
|
||||||
git+https://github.com/DevTable/docker-py.git
|
git+https://github.com/DevTable/docker-py.git
|
||||||
ecdsa==0.11
|
ecdsa==0.11
|
||||||
gevent==1.0
|
gevent==1.0
|
||||||
|
|
Binary file not shown.
|
@ -1,5 +1,4 @@
|
||||||
import logging
|
import logging
|
||||||
import daemon
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
from data.queue import image_diff_queue
|
from data.queue import image_diff_queue
|
||||||
|
@ -34,26 +33,16 @@ class DiffsWorker(Worker):
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Worker daemon to compute diffs')
|
parser = argparse.ArgumentParser(description='Worker daemon to compute diffs')
|
||||||
parser.add_argument('-D', action='store_true', default=False,
|
parser.add_argument('--log', help='Specify the log file for the worker as a daemon.')
|
||||||
help='Run the worker in daemon mode.')
|
|
||||||
parser.add_argument('--log', default='diffsworker.log',
|
|
||||||
help='Specify the log file for the worker as a daemon.')
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.log is not None:
|
||||||
worker = DiffsWorker(image_diff_queue)
|
|
||||||
|
|
||||||
if args.D:
|
|
||||||
handler = logging.FileHandler(args.log)
|
handler = logging.FileHandler(args.log)
|
||||||
handler.setFormatter(formatter)
|
|
||||||
root_logger.addHandler(handler)
|
|
||||||
with daemon.DaemonContext(files_preserve=[handler.stream]):
|
|
||||||
worker.start()
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
handler = logging.StreamHandler()
|
handler = logging.StreamHandler()
|
||||||
handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
root_logger.addHandler(handler)
|
root_logger.addHandler(handler)
|
||||||
worker.start()
|
|
||||||
|
worker = DiffsWorker(image_diff_queue)
|
||||||
|
worker.start()
|
|
@ -461,7 +461,7 @@ parser.add_argument('--log', default='dockerfilebuild.log',
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
worker = DockerfileBuildWorker(dockerfile_build_queue)
|
worker = DockerfileBuildWorker(dockerfile_build_queue, reservation_seconds=60*60) # 1 hour
|
||||||
|
|
||||||
if args.D:
|
if args.D:
|
||||||
handler = logging.FileHandler(args.log)
|
handler = logging.FileHandler(args.log)
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
import logging
|
import logging
|
||||||
import daemon
|
|
||||||
import argparse
|
import argparse
|
||||||
import requests
|
import requests
|
||||||
import json
|
import json
|
||||||
|
@ -37,25 +36,16 @@ class WebhookWorker(Worker):
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Worker daemon to send webhooks')
|
parser = argparse.ArgumentParser(description='Worker daemon to send webhooks')
|
||||||
parser.add_argument('-D', action='store_true', default=False,
|
parser.add_argument('--log', help='Specify the log file for the worker as a daemon.')
|
||||||
help='Run the worker in daemon mode.')
|
|
||||||
parser.add_argument('--log', default='webhooks.log',
|
|
||||||
help='Specify the log file for the worker as a daemon.')
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.log is not None:
|
||||||
|
handler = logging.FileHandler(args.log)
|
||||||
|
else:
|
||||||
|
handler = logging.StreamHandler()
|
||||||
|
handler.setFormatter(formatter)
|
||||||
|
root_logger.addHandler(handler)
|
||||||
|
|
||||||
worker = WebhookWorker(webhook_queue, poll_period_seconds=15,
|
worker = WebhookWorker(webhook_queue, poll_period_seconds=15,
|
||||||
reservation_seconds=3600)
|
reservation_seconds=3600)
|
||||||
|
worker.start()
|
||||||
if args.D:
|
|
||||||
handler = logging.FileHandler(args.log)
|
|
||||||
handler.setFormatter(formatter)
|
|
||||||
root_logger.addHandler(handler)
|
|
||||||
with daemon.DaemonContext(files_preserve=[handler.stream]):
|
|
||||||
worker.start()
|
|
||||||
|
|
||||||
else:
|
|
||||||
handler = logging.StreamHandler()
|
|
||||||
handler.setFormatter(formatter)
|
|
||||||
root_logger.addHandler(handler)
|
|
||||||
worker.start()
|
|
|
@ -1,5 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
import json
|
import json
|
||||||
|
import signal
|
||||||
|
|
||||||
from threading import Event
|
from threading import Event
|
||||||
from apscheduler.scheduler import Scheduler
|
from apscheduler.scheduler import Scheduler
|
||||||
|
@ -52,8 +53,16 @@ class Worker(object):
|
||||||
self._sched.add_interval_job(self.poll_queue, seconds=self._poll_period_seconds)
|
self._sched.add_interval_job(self.poll_queue, seconds=self._poll_period_seconds)
|
||||||
self._sched.add_interval_job(self.watchdog, seconds=self._watchdog_period_seconds)
|
self._sched.add_interval_job(self.watchdog, seconds=self._watchdog_period_seconds)
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, self.join)
|
||||||
|
signal.signal(signal.SIGINT, self.join)
|
||||||
|
|
||||||
while not self._stop.wait(1):
|
while not self._stop.wait(1):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def join(self):
|
logger.debug('Waiting for running tasks to complete.')
|
||||||
|
self._sched.shutdown()
|
||||||
|
logger.debug('Finished.')
|
||||||
|
|
||||||
|
def join(self, signal_num=None, stack_frame=None):
|
||||||
|
logger.debug('Shutting down worker gracefully.')
|
||||||
self._stop.set()
|
self._stop.set()
|
||||||
|
|
Reference in a new issue